repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sourcelair/ceryx | ceryx/tests/client/connection.py | 2 | 1957 | from urllib3.connection import HTTPConnection, HTTPSConnection
import os
import socket
DEFAULT_CERYX_HOST = "ceryx" # Set by Docker Compose in tests
CERYX_HOST = os.getenv("CERYX_HOST", DEFAULT_CERYX_HOST)
class CeryxTestsHTTPConnection(HTTPConnection):
"""
Custom-built HTTPConnection for Ceryx tests. Force sets the request's
host to the configured Ceryx host, if the request's original host
ends with `.ceryx.test`.
"""
@property
def host(self):
"""
Do what the original property did. We just want to touch the setter.
"""
return self._dns_host.rstrip('.')
@host.setter
def host(self, value):
"""
If the request header ends with `.ceryx.test` then force set the actual
host to the configured Ceryx host, so as to send corresponding
requests to Ceryx.
"""
self._dns_host = CERYX_HOST if value.endswith(".ceryx.test") else value
class CeryxTestsHTTPSConnection(CeryxTestsHTTPConnection, HTTPSConnection):
def __init__(
self, host, port=None, key_file=None, cert_file=None,
key_password=None, strict=None,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT, ssl_context=None,
server_hostname=None, **kw,
):
# Initialise the HTTPConnection subclass created above.
CeryxTestsHTTPConnection.__init__(
self, host, port, strict=strict, timeout=timeout, **kw,
)
self.key_file = key_file
self.cert_file = cert_file
self.key_password = key_password
self.ssl_context = ssl_context
self.server_hostname = server_hostname
# ------------------------------
# Original comment from upstream
# ------------------------------
#
# Required property for Google AppEngine 1.9.0 which otherwise causes
# HTTPS requests to go out as HTTP. (See Issue #356)
self._protocol = 'https'
| mit |
Ophiuchus1312/enigma2-master | lib/python/Screens/TimerEdit.py | 1 | 20176 | from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.Label import Label
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from Components.Sources.StaticText import StaticText
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from Screens.TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from Tools.FuzzyDate import FuzzyTime
from Tools.Directories import resolveFilename, SCOPE_HDD
from time import time, localtime
from timer import TimerEntry as RealTimerEntry
from enigma import eServiceCenter
import Tools.CopyFiles
import os
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Timer List"))
self.onChangedEntry = [ ]
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self["description"] = Label()
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer overview"))
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def createSummary(self):
return TimerEditListSummary
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
# print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, simulTimerList)
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
self["description"].setText(cur.description)
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
if len(self.list) == 0:
return
timer = self['timerlist'].getCurrent()
if timer:
try:
name = str(timer.name)
time = ("%s %s ... %s") % (FuzzyTime(timer.begin)[0], FuzzyTime(timer.begin)[1], FuzzyTime(timer.end)[1])
duration = ("(%d " + _("mins") + ")") % ((timer.end - timer.begin) / 60)
service = str(timer.service_ref.getServiceName())
if timer.state == RealTimerEntry.StateWaiting:
state = _("waiting")
elif timer.state == RealTimerEntry.StatePrepared:
state = _("about to start")
elif timer.state == RealTimerEntry.StateRunning:
if timer.justplay:
state = _("zapped")
else:
state = _("recording...")
elif timer.state == RealTimerEntry.StateEnded:
state = _("done!")
else:
state = _("<unknown>")
except:
name = ""
time = ""
duration = ""
service = ""
else:
name = ""
time = ""
duration = ""
service = ""
for cb in self.onChangedEntry:
cb(name, time, duration, service, state)
def fillTimerList(self):
#helper function to move finished timers to end of list
def eol_compare(x, y):
if x[0].state != y[0].state and x[0].state == RealTimerEntry.StateEnded or y[0].state == RealTimerEntry.StateEnded:
return cmp(x[0].state, y[0].state)
return cmp(x[0].begin, y[0].begin)
list = self.list
print list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
if config.usage.timerlist_finished_timer_position.index: #end of list
list.sort(cmp = eol_compare)
else:
list.sort(key = lambda x: x[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
service = str(cur.service_ref.getServiceName())
t = localtime(cur.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + cur.name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
if not cur:
return
onhdd = False
self.moviename = f
path = resolveFilename(SCOPE_HDD)
files = os.listdir(path)
for file in files:
if file.startswith(f):
onhdd = True
break
if onhdd:
message = (_("Do you really want to delete %s?") % (cur.name))
choices = [(_("No"), "no"),
(_("Yes, delete from Timerlist"), "yes"),
(_("Yes, delete from Timerlist and delete recording"), "yesremove")]
self.session.openWithCallback(self.startDelete, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name), default = False)
def startDelete(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == 'no':
return
elif answer[1] == 'yes':
self.removeTimer(True)
elif answer[1] == 'yesremove':
if config.EMC.movie_trashcan_enable.getValue():
trashpath = config.EMC.movie_trashcan_path.getValue()
self.MoveToTrash(trashpath)
elif config.usage.movielist_trashcan.getValue():
trashpath = resolveFilename(SCOPE_HDD) + '.Trash'
self.MoveToTrash(trashpath)
else:
self.session.openWithCallback(self.callbackRemoveRecording, MessageBox, _("Do you really want to delete the recording?"), default = False)
def callbackRemoveRecording(self, answer):
if not answer:
return
self.delete()
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def MoveToTrash(self, trashpath):
self.removeTimer(True)
moviepath = os.path.normpath(resolveFilename(SCOPE_HDD))
movedList =[]
files = os.listdir(moviepath)
for file in files:
if file.startswith(self.moviename):
movedList.append((os.path.join(moviepath, file), os.path.join(trashpath, file)))
Tools.CopyFiles.moveFiles(movedList, None)
def delete(self):
item = self["timerlist"].getCurrent()
if item is None:
return # huh?
name = item.name
service = str(item.service_ref.getServiceName())
t = localtime(item.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
path = resolveFilename(SCOPE_HDD)
self.removeTimer(True)
from enigma import eBackgroundFileEraser
files = os.listdir(path)
for file in files:
if file.startswith(f):
eBackgroundFileEraser.getInstance().erase(os.path.realpath(path + file))
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
# print "finished edit"
if answer[0]:
# print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button("Edit")
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer sanity error"))
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleNewTimer(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def toggleTimer(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if self.timer[x].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleNewTimer})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
class TimerEditListSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["name"] = StaticText("")
self["service"] = StaticText("")
self["time"] = StaticText("")
self["duration"] = StaticText("")
self["state"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.updateState()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, time, duration, service, state):
self["name"].text = name
self["service"].text = service
self["time"].text = time
self["duration"].text = duration
self["state"].text = state
| gpl-2.0 |
jianlirong/incubator-hawq | pxf/src/scripts/pxf_manual_failover.py | 12 | 5127 | #!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pxf_manual_failover.py
# This python script will adapt the PXF external tables to the new NameNode in case
# of High Availability manual failover.
# The script receives as input the new namenode host and then goes over each external
# table entry in the catalog table pg_exttable and updates the LOCATION field -
# replaces the old Namenode host with the new one.
import sys
from gppylib.db import dbconn
def wrongUsage():
'Print usage string and leave'
print "usage: pxf_manual_failover <new_namenode_host> <database> [-h <hawq_master_host>] [-p <hawq_master_port>]"
exit()
def getNewHost():
'reads new NameNode from command line - exits if wrong input'
if len(sys.argv) < 2:
wrongUsage()
return sys.argv[1]
def getDatabase():
'reads database from command line - exits if wrong input'
if len(sys.argv) < 3:
wrongUsage()
return sys.argv[2]
def getOptionalInput(flag, default):
"""generic function - retrieves optional parameters from the input
If [flag <value>] is not on the command line, we use default
Explaining the parsing. This is how the command line string that
sys.argv returns, looks like:
['./pxf_manual_failover.py', 'localhost', 'films', '-h', 'isenshackamac.corp.emc.com', '-p', '5432']
"""
input = list(sys.argv)
if input.count(flag) == 0:
return default
flag_idx = input.index(flag)
if len(input) < flag_idx +1:
wrongUsage()
return input[flag_idx + 1]
def getMasterHost():
'reads hawq_master_host from command line - optional'
return getOptionalInput("-h", "localhost")
def getMasterPort():
'reads hawq_master_port from command line - optional'
return getOptionalInput("-p", 5432)
def isPxfTable(location):
'decide if this is a PXF table by analyzing the LOCATION field for the table entry in pg_exttable'
return cmp(location[1:7],"pxf://") == 0
def makeNewLocation(new_host, location):
'replaces [host] substring in [location] with [new_host]'
start = location.find("//")
end = location.find(":", start)
size = len(location)
new_location = location[:start] + "//" + new_host + location[end:size]
return new_location
def promptUser(new_host, database, hawq_master_host, hawq_master_port):
'Give user a last chance to change his mind'
print "Will replace the current Namenode hostname with [" + new_host + "] in database [" + database + "]"
print "Hawq master is: [" + hawq_master_host + "] and Hawq port is: [" + str(hawq_master_port) + "]"
reply = raw_input('Do you wish to continue: Yes[Y]/No[N] ?')
reply = reply.lower()
if not(cmp(reply, 'yes') == 0 or cmp(reply, 'y') == 0):
print "User decided to cancel operation. Leaving..."
exit()
def connectToDb(hawq_master_host, hawq_master_port, database):
'connect to database'
url = dbconn.DbURL(hawq_master_host
,port = hawq_master_port
,dbname = database
)
return dbconn.connect(dburl = url)
def updateOneRecord(conn, new_host, row):
'Updates the LOCATION field of one record'
if not(isPxfTable(row[0])):
return
new_location = makeNewLocation(new_host, row[0])
dbconn.execSQL(conn, "UPDATE pg_exttable SET location = '" + new_location + "' WHERE reloid = "
+ str(row[1]))
print "Updated LOCATION for table ", row[2], "oid: ", row[1], \
"\n Old LOCATION: ", row[0], "\n New LOCATION: ", new_location
def updateNnHost(conn, new_host):
'update the LOCATION field for each record in pg_exttable'
dbconn.execSQL(conn, "set allow_system_table_mods = 'DML'")
dbconn.execSQL(conn, "START TRANSACTION")
cursor = dbconn.execSQL(conn, "SELECT location, reloid, relname FROM pg_exttable, pg_class WHERE reloid = relfilenode")
for row in cursor:
updateOneRecord(conn, new_host, row)
conn.commit()
def main():
'The driver function of this module'
new_host = getNewHost()
database = getDatabase()
hawq_master_host = getMasterHost()
hawq_master_port = getMasterPort()
promptUser(new_host, database, hawq_master_host, hawq_master_port)
conn = connectToDb(hawq_master_host, hawq_master_port, database)
updateNnHost(conn, new_host)
conn.close()
if __name__ == "__main__":
main()
| apache-2.0 |
Mirdrack/4chanscrapper | lib/python2.7/site-packages/requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
libcrosswind/libcrosswind | platform/windows/compilers/x64/TDM-GCC-64/gdb64/bin/lib/wsgiref/headers.py | 229 | 5879 | """Manage HTTP Response Headers
Much of this module is red-handedly pilfered from email.message in the stdlib,
so portions are Copyright (C) 2001,2002 Python Software Foundation, and were
written by Barry Warsaw.
"""
from types import ListType, TupleType
# Regular expression that matches `special' characters in parameters, the
# existence of which force quoting of the parameter value.
import re
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
class Headers:
"""Manage a collection of HTTP response headers"""
def __init__(self,headers):
if type(headers) is not ListType:
raise TypeError("Headers must be a list of name/value tuples")
self._headers = headers
def __len__(self):
"""Return the total number of headers, including duplicates."""
return len(self._headers)
def __setitem__(self, name, val):
"""Set the value of a header."""
del self[name]
self._headers.append((name, val))
def __delitem__(self,name):
"""Delete all occurrences of a header, if present.
Does *not* raise an exception if the header is missing.
"""
name = name.lower()
self._headers[:] = [kv for kv in self._headers if kv[0].lower() != name]
def __getitem__(self,name):
"""Get the first header value for 'name'
Return None if the header is missing instead of raising an exception.
Note that if the header appeared multiple times, the first exactly which
occurrance gets returned is undefined. Use getall() to get all
the values matching a header field name.
"""
return self.get(name)
def has_key(self, name):
"""Return true if the message contains the header."""
return self.get(name) is not None
__contains__ = has_key
def get_all(self, name):
"""Return a list of all the values for the named field.
These will be sorted in the order they appeared in the original header
list or were added to this instance, and may contain duplicates. Any
fields deleted and re-inserted are always appended to the header list.
If no fields exist with the given name, returns an empty list.
"""
name = name.lower()
return [kv[1] for kv in self._headers if kv[0].lower()==name]
def get(self,name,default=None):
"""Get the first header value for 'name', or return 'default'"""
name = name.lower()
for k,v in self._headers:
if k.lower()==name:
return v
return default
def keys(self):
"""Return a list of all the header field names.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [k for k, v in self._headers]
def values(self):
"""Return a list of all header values.
These will be sorted in the order they appeared in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return [v for k, v in self._headers]
def items(self):
"""Get all the header fields and values.
These will be sorted in the order they were in the original header
list, or were added to this instance, and may contain duplicates.
Any fields deleted and re-inserted are always appended to the header
list.
"""
return self._headers[:]
def __repr__(self):
return "Headers(%r)" % self._headers
def __str__(self):
"""str() returns the formatted headers, complete with end line,
suitable for direct HTTP transmission."""
return '\r\n'.join(["%s: %s" % kv for kv in self._headers]+['',''])
def setdefault(self,name,value):
"""Return first matching header value for 'name', or 'value'
If there is no header named 'name', add a new header with name 'name'
and value 'value'."""
result = self.get(name)
if result is None:
self._headers.append((name,value))
return value
else:
return result
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in _params.items():
if v is None:
parts.append(k.replace('_', '-'))
else:
parts.append(_formatparam(k.replace('_', '-'), v))
self._headers.append((_name, "; ".join(parts)))
| gpl-3.0 |
prark/bitcoinxt | qa/rpc-tests/test_framework/blocktools.py | 93 | 2057 | # blocktools.py - utilities for manipulating blocks and transactions
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from script import CScript, CScriptOp
# Create a block (with regtest difficulty)
def create_block(hashprev, coinbase, nTime=None):
block = CBlock()
if nTime is None:
import time
block.nTime = int(time.time()+600)
else:
block.nTime = nTime
block.hashPrevBlock = hashprev
block.nBits = 0x207fffff # Will break after a difficulty adjustment...
block.vtx.append(coinbase)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def serialize_script_num(value):
r = bytearray(0)
if value == 0:
return r
neg = value < 0
absvalue = -value if neg else value
while (absvalue):
r.append(chr(absvalue & 0xff))
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return r
counter=1
# Create an anyone-can-spend coinbase transaction, assuming no miner fees
def create_coinbase(heightAdjust = 0):
global counter
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff),
ser_string(serialize_script_num(counter+heightAdjust)), 0xffffffff))
counter += 1
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = 50*100000000
halvings = int((counter+heightAdjust)/150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.scriptPubKey = ""
coinbase.vout = [ coinbaseoutput ]
coinbase.calc_sha256()
return coinbase
# Create a transaction with an anyone-can-spend output, that spends the
# nth output of prevtx.
def create_transaction(prevtx, n, sig, value):
tx = CTransaction()
assert(n < len(prevtx.vout))
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), sig, 0xffffffff))
tx.vout.append(CTxOut(value, ""))
tx.calc_sha256()
return tx
| mit |
polaris-gslb/polaris-core | tests/test-polaris-pdns.py | 2 | 1937 | #!/usr/bin/env python3
import subprocess
import sys
import time
import json
POLARIS_PDNS_FILE = '/opt/polaris/bin/polaris-pdns'
def pretty_json(s):
d = json.loads(s)
return json.dumps(d, indent=4, separators=(',', ': '))
class TestPolarisPDNS:
def __init__(self, polaris_pdns_file):
self.proc = subprocess.Popen([ polaris_pdns_file ],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
def execute_query(self, query):
query += '\n'
self.proc.stdin.write(query.encode())
self.proc.stdin.flush()
output = self.proc.stdout.readline().decode()
return pretty_json(output)
def prepare_query(self, method, params):
q = {
'method': method,
'parameters': {
'qtype': params['qtype'],
'qname': params['qname'],
'remote': params['remote'],
'local': params['local'],
'real-remote': params['real-remote'],
'zone-id': params['zone-id']
}
}
return json.dumps(q)
if __name__ == '__main__':
t = TestPolarisPDNS(POLARIS_PDNS_FILE)
method = 'lookup'
params = {
'qtype': 'A',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
method = 'lookup'
params = {
'qtype': 'SOA',
'qname': 'www.example.com',
'remote': '10.1.1.21',
'local': '0.0.0.0',
'real-remote': '10.1.1.21/32',
'zone-id': -1
}
q = t.prepare_query(method, params)
print("query: ", pretty_json(q), "\n")
print("response: ", t.execute_query(q))
| bsd-3-clause |
daniel20162016/my-first | read_xml_all/calcul_matrix_compare_je_good_192matrix.py | 1 | 6357 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 31 15:45:22 2016
@author: wang
"""
#from matplotlib import pylab as plt
#from numpy import fft, fromstring, int16, linspace
#import wave
from read_wav_xml_good_1 import*
from matrix_24_2 import*
from max_matrix_norm import*
import numpy as np
# open a wave file
filename = 'francois_filon_pure_3.wav'
filename_1 ='francois_filon_pure_3.xml'
word ='je'
wave_signal_float,framerate, word_start_point, word_length_point, word_end_point= read_wav_xml_good_1(filename,filename_1,word)
#print 'word_start_point=',word_start_point
#print 'word_length_point=',word_length_point
#print 'word_end_point=',word_end_point
XJ_1 =wave_signal_float
t_step=1920;
t_entre_step=1440;
t_du_1_1 = int(word_start_point[0]);
t_du_1_2 = int(word_end_point[0]);
t_du_2_1 = int(word_start_point[1]);
t_du_2_2 = int(word_end_point[1]);
t_du_3_1 = int(word_start_point[2]);
t_du_3_2 = int(word_end_point[2]);
t_du_4_1 = int(word_start_point[3]);
t_du_4_2 = int(word_end_point[3]);
t_du_5_1 = int(word_start_point[4]);
t_du_5_2 = int(word_end_point[4]);
fs=framerate
#XJ_du_1 = wave_signal_float[(t_du_1_1-1):t_du_1_2];
#length_XJ_du_1 = int(word_length_point[0]+1);
#x1,y1,z1=matrix_24_2(XJ_du_1,fs)
#x1=max_matrix_norm(x1)
#==============================================================================
# this part is to calcul the first matrix
#==============================================================================
XJ_du_1_2 = XJ_1[(t_du_1_1-1):(t_du_1_1+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_1 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_1[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_du_1_1+t_entre_step*(i)-1):(t_du_1_1+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_1[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the second matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_2_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_2 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_2[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_2[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 3 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_3_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_3 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_3[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_3[24*i+j]=x1_all[j]
#==============================================================================
# this part is to calcul the 4 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_4_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_4 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_4[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_4[24*i+j]=x1_all[j]
#print 'matrix_all_step_4=',matrix_all_step_4
#==============================================================================
# this part is to calcul the 5 matrix
#==============================================================================
for k in range (1,2):
t_start=t_du_5_1
XJ_du_1_2 = XJ_1[(t_start-1):(t_start+t_step)];
x1_1,y1_1,z1_1=matrix_24_2(XJ_du_1_2 ,fs)
x1_1=max_matrix_norm(x1_1)
matrix_all_step_new_5 = np.zeros([192])
for i in range(0,24):
matrix_all_step_new_5[i]=x1_1[i]
#==============================================================================
# the other colonne is the all fft
#==============================================================================
for i in range(1,8):
# print i
XJ_du_1_total = XJ_1[(t_start+t_entre_step*(i)-1):(t_start+t_step+t_entre_step*(i) )];
x1_all,y1_all,z1_all=matrix_24_2(XJ_du_1_total,fs)
x1_all=max_matrix_norm(x1_all)
for j in range(0,24):
matrix_all_step_new_5[24*i+j]=x1_all[j]
#print 'matrix_all_step_5=',matrix_all_step_5
np.savez('je_compare_192_matrix.npz',matrix_all_step_new_1,matrix_all_step_new_2,matrix_all_step_new_3,matrix_all_step_new_4,matrix_all_step_new_5)
| mit |
wholland/env | env.py | 1 | 6119 | #!/usr/bin/python
import argparse
import json
import shutil
import os
def copy_file(src, dest, backup):
success = True
if not backup is None:
(backup_folder, backup_file) = os.path.split(backup)
print("Creating backup file for " + dest + " at " + backup)
try:
if not os.path.exists(backup_folder):
os.makedirs(backup_folder)
shutil.copyfile(dest, backup)
except Exception as e:
print("Backup failed: " + str(e))
success = False
if success:
(dest_folder, dest_file) = os.path.split(dest)
print("Copy file " + src + " to " + dest)
try:
if not os.path.exists(dest_folder):
os.makedirs(dest_folder)
shutil.copyfile(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def copy_dir(src, dest, backup):
success = True
if not backup is None:
try:
print("Creating backup file for " + dest + " at " + backup)
rmtree(backup, ignore_errors=True)
shutil.copytree(dest, backup)
except IOError as e:
print("Backup failed: " + str(e))
success = False
if success:
try:
print("Copy directory " + src + " to " + dest)
shutil.copytree(src, dest)
except IOError as e:
print("Copy failed: " + str(e))
def push(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pushing " + definition["name"]);
src = os.path.expanduser(os.path.join(args.source, definition["source"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
backup = os.path.expanduser(os.path.join(args.backup, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if args.unsafe:
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_file(src, dest, backup)
else:
print("Would copy file. Src:" + src + " Dest:" + dest + " Backup:" + backup);
elif definition["type"].lower() == "d":
# Copy a directory
if args.verbose:
print(definition["name"] + ": Pushing directory from " + src + " to " + dest)
if args.unsafe:
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
else:
if not args.wimp:
copy_dir(src, dest, backup)
else:
print("Would copy dir. Src:" + src + " Dest:" + dest + " Backup:" + backup);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def pull(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
print("Pulling " + definition["name"]);
src = os.path.expanduser(os.path.join(args.target, definition["target"]))
dest = os.path.expanduser(os.path.join(args.source, definition["source"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def revert(args):
defs = json.load(open(os.path.expanduser(args.file)))
for definition in defs:
if definition["group"] in args.categories:
src = os.path.expanduser(os.path.join(args.backup, definition["target"]))
dest = os.path.expanduser(os.path.join(args.target, definition["target"]))
if definition["type"].lower() == "f":
# Copy a file
if not args.wimp:
copy_file(src, dest, None)
else:
print("Would copy file. Src:" + src + " Dest:" + dest);
elif definition["type"].lower() == "d":
# Copy a directory
if not args.wimp:
copy_dir(src, dest, None)
else:
print("Would copy directory. Src:" + src + " Dest:" + dest);
else:
print(definition["name"] + ": Unknown type \""+definition["type"]+"\"")
def main():
default_defs = "~/env/env.def"
default_source = "~/env/"
default_target = "~/"
default_backup = "~/.backup/env/"
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action="store_true", help="Increase Verbosity")
parser.add_argument("-f", "--file", default=default_defs, help="Definition File to use")
parser.add_argument("-s", "--source", default=default_source, help="Override source root")
parser.add_argument("-t", "--target", default=default_target, help="Override target root")
parser.add_argument("-w", "--wimp", action="store_true", help="Don't actually make any changes (implies -v)")
subparsers = parser.add_subparsers()
parser_push = subparsers.add_parser("push", help="Push configs into environment")
parser_push.add_argument("-u", "--unsafe", action="store_true", help="No backups Created")
parser_push.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_push.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_push.add_argument("categories", nargs=argparse.REMAINDER)
parser_push.set_defaults(func=push)
parser_pull = subparsers.add_parser("pull", help="Pull configs from environment")
parser_pull.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_pull.add_argument("categories", nargs=argparse.REMAINDER)
parser_pull.set_defaults(func=pull)
parser_revert = subparsers.add_parser("revert", help="Revert configs from backups")
parser_revert.add_argument("-c", "--cleanup", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-a", "--All", action="store_true", help="Cleanup Backups")
parser_revert.add_argument("-b", "--backup", default=default_backup, help="Override backup root")
parser_revert.add_argument("categories", nargs=argparse.REMAINDER)
parser_revert.set_defaults(func=revert)
args = parser.parse_args()
if args.wimp:
args.verbose = True
args.func(args)
if __name__ == "__main__":
main();
| mit |
jailuthra/misc | python/quicksort.py | 1 | 1066 | import sys
import random
comparisons = 0
def main():
global comparisons
with open(sys.argv[1], 'r') as f:
arr = [int(x) for x in f.read().split()]
quicksort(arr, 0, len(arr)-1)
# print(arr)
print(comparisons)
def getPivot(arr, l, r):
first = arr[l]
mid = arr[(l+r)//2]
last = arr[r]
if first <= mid <= last or last <= mid <= first:
return (l+r)//2
elif mid <= first <= last or last <= first <= mid:
return l
else:
return r
def partition(arr, l, r):
k = getPivot(arr, l, r)
k = random.randint(l, r)
pivot = arr[k]
arr[k], arr[l] = arr[l], arr[k]
i = l+1
for j in range(l+1, r+1):
if arr[j] < pivot:
arr[j], arr[i] = arr[i], arr[j]
i += 1
arr[l], arr[i-1] = arr[i-1], arr[l]
return i-1
def quicksort(arr, l, r):
if r - l < 0:
return
global comparisons
comparisons += r - l
p = partition(arr, l, r)
quicksort(arr, l, p-1)
quicksort(arr, p+1, r)
if __name__ == '__main__':
main()
| mit |
kosz85/django | django/conf/locale/nn/formats.py | 65 | 1743 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
rubenvereecken/pokemongo-api | POGOProtos/Data/Battle/BattleParticipant_pb2.py | 16 | 4760 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Data/Battle/BattleParticipant.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data.Battle import BattlePokemonInfo_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2
from POGOProtos.Data.Player import PlayerPublicProfile_pb2 as POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Data/Battle/BattleParticipant.proto',
package='POGOProtos.Data.Battle',
syntax='proto3',
serialized_pb=_b('\n.POGOProtos/Data/Battle/BattleParticipant.proto\x12\x16POGOProtos.Data.Battle\x1a.POGOProtos/Data/Battle/BattlePokemonInfo.proto\x1a\x30POGOProtos/Data/Player/PlayerPublicProfile.proto\"\xac\x02\n\x11\x42\x61ttleParticipant\x12\x41\n\x0e\x61\x63tive_pokemon\x18\x01 \x01(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\x12K\n\x16trainer_public_profile\x18\x02 \x01(\x0b\x32+.POGOProtos.Data.Player.PlayerPublicProfile\x12\x42\n\x0freverse_pokemon\x18\x03 \x03(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\x12\x43\n\x10\x64\x65\x66\x65\x61ted_pokemon\x18\x04 \x03(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfob\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_BATTLEPARTICIPANT = _descriptor.Descriptor(
name='BattleParticipant',
full_name='POGOProtos.Data.Battle.BattleParticipant',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='active_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.active_pokemon', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trainer_public_profile', full_name='POGOProtos.Data.Battle.BattleParticipant.trainer_public_profile', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='reverse_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.reverse_pokemon', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='defeated_pokemon', full_name='POGOProtos.Data.Battle.BattleParticipant.defeated_pokemon', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=473,
)
_BATTLEPARTICIPANT.fields_by_name['active_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_BATTLEPARTICIPANT.fields_by_name['trainer_public_profile'].message_type = POGOProtos_dot_Data_dot_Player_dot_PlayerPublicProfile__pb2._PLAYERPUBLICPROFILE
_BATTLEPARTICIPANT.fields_by_name['reverse_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_BATTLEPARTICIPANT.fields_by_name['defeated_pokemon'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
DESCRIPTOR.message_types_by_name['BattleParticipant'] = _BATTLEPARTICIPANT
BattleParticipant = _reflection.GeneratedProtocolMessageType('BattleParticipant', (_message.Message,), dict(
DESCRIPTOR = _BATTLEPARTICIPANT,
__module__ = 'POGOProtos.Data.Battle.BattleParticipant_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Data.Battle.BattleParticipant)
))
_sym_db.RegisterMessage(BattleParticipant)
# @@protoc_insertion_point(module_scope)
| mit |
sarahfo/oppia | core/domain/dependency_registry_test.py | 29 | 4131 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for JavaScript library dependencies."""
__author__ = 'Sean Lip'
from core.domain import dependency_registry
from core.domain import exp_services
from core.domain import interaction_registry
from core.tests import test_utils
import feconf
class DependencyRegistryTests(test_utils.GenericTestBase):
"""Tests for the dependency registry."""
def test_get_dependency_html(self):
self.assertIn(
'jsrepl',
dependency_registry.Registry.get_dependency_html('jsrepl'))
with self.assertRaises(IOError):
dependency_registry.Registry.get_dependency_html('a')
class DependencyControllerTests(test_utils.GenericTestBase):
"""Tests for dependency loading on user-facing pages."""
def test_no_dependencies_in_non_exploration_pages(self):
response = self.testapp.get(feconf.GALLERY_URL)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
response = self.testapp.get('/about')
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
def test_dependencies_loaded_in_exploration_editor(self):
exp_services.load_demo('0')
# Register and login as an editor.
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.login(self.EDITOR_EMAIL)
# Verify that the exploration does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id('0')
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# However, jsrepl is loaded in the exploration editor anyway, since
# all dependencies are loaded in the exploration editor.
response = self.testapp.get('/create/0')
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
self.logout()
def test_dependency_does_not_load_in_exploration_not_containing_it(self):
EXP_ID = '0'
exp_services.load_demo(EXP_ID)
# Verify that exploration 0 does not have a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertNotIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is not loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
response.mustcontain(no=['jsrepl'])
def test_dependency_loads_in_exploration_containing_it(self):
EXP_ID = '1'
exp_services.load_demo(EXP_ID)
# Verify that exploration 1 has a jsrepl dependency.
exploration = exp_services.get_exploration_by_id(EXP_ID)
interaction_ids = exploration.get_interaction_ids()
all_dependency_ids = (
interaction_registry.Registry.get_deduplicated_dependency_ids(
interaction_ids))
self.assertIn('jsrepl', all_dependency_ids)
# Thus, jsrepl is loaded in the exploration reader.
response = self.testapp.get('/explore/%s' % EXP_ID)
self.assertEqual(response.status_int, 200)
response.mustcontain('jsrepl')
| apache-2.0 |
jobscore/sync-engine | migrations/env.py | 3 | 2894 | from __future__ import with_statement
from alembic import context
from logging.config import fileConfig
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(context.config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
from inbox.models.base import MailSyncBase
target_metadata = MailSyncBase.metadata
from inbox.config import config
from inbox.ignition import EngineManager
# Alembic configuration is confusing. Here we look for a shard id both as a
# "main option" (where it's programmatically set by bin/create-db), and in the
# "x" argument, which is the primary facility for passing additional
# command-line args to alembic. So you would do e.g.
#
# alembic -x shard_id=1 upgrade +1
#
# to target shard 1 for the migration.
config_shard_id = context.config.get_main_option('shard_id')
x_shard_id = context.get_x_argument(as_dictionary=True).get(
'shard_id')
if config_shard_id is not None:
shard_id = int(config_shard_id)
elif x_shard_id is not None:
shard_id = int(x_shard_id)
else:
raise ValueError('No shard_id is configured for migration; '
'run `alembic -x shard_id=<target shard id> upgrade +1`')
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
engine_manager = EngineManager(config.get_required('DATABASE_HOSTS'),
config.get_required('DATABASE_USERS'),
include_disabled=True)
engine = engine_manager.engines[shard_id]
context.configure(engine=engine, url=engine.url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine_manager = EngineManager(config.get_required('DATABASE_HOSTS'),
config.get_required('DATABASE_USERS'),
include_disabled=True)
engine = engine_manager.engines[shard_id]
connection = engine.connect()
# Set sane lock wait timeout value.
connection.execute('SET @@lock_wait_timeout=15')
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| agpl-3.0 |
nens/threedi-qgis-plugin | tests/test_geo_utils.py | 1 | 1446 | """
Test geo utils.
"""
from qgis.core import QgsCoordinateTransform
from ThreeDiToolbox.tests.utilities import ensure_qgis_app_is_initialized
from ThreeDiToolbox.utils.geo_utils import get_coord_transformation_instance
import pytest
@pytest.fixture
def rdnew_to_wgs84():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 28992, 4326
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
@pytest.fixture
def wgs84_to_rdnew():
ensure_qgis_app_is_initialized()
src_epsg, dest_epsg = 4326, 28992
transformer = get_coord_transformation_instance(src_epsg, dest_epsg)
return transformer
def test_get_coord_transformation_instance(rdnew_to_wgs84, wgs84_to_rdnew):
assert isinstance(rdnew_to_wgs84, QgsCoordinateTransform)
assert isinstance(wgs84_to_rdnew, QgsCoordinateTransform)
def test_get_coord_transformation_epsg(rdnew_to_wgs84):
assert rdnew_to_wgs84.sourceCrs().isValid()
assert rdnew_to_wgs84.sourceCrs().authid() == "EPSG:28992"
assert rdnew_to_wgs84.destinationCrs().isValid()
assert rdnew_to_wgs84.destinationCrs().authid() == "EPSG:4326"
def test_get_coord_transformation_epsg_reverse(wgs84_to_rdnew):
assert wgs84_to_rdnew.sourceCrs().isValid()
assert wgs84_to_rdnew.sourceCrs().authid() == "EPSG:4326"
assert wgs84_to_rdnew.destinationCrs().isValid()
assert wgs84_to_rdnew.destinationCrs().authid() == "EPSG:28992"
| gpl-3.0 |
xfournet/intellij-community | python/lib/Lib/unicodedata.py | 69 | 6437 | from bisect import bisect_left
import operator
import java.lang.Character
# XXX - this is intended as a stopgap measure until 2.5.1, which will have a Java implementation
# requires java 6 for `normalize` function
# only has one version of the database
# does not normalized ideographs
_codepoints = {}
_eaw = {}
_names = {}
_segments = []
_eaw_segments = []
Nonesuch = object()
def get_int(col):
try:
return int(col)
except ValueError:
return None
def get_yn(col):
if col == 'Y': return 1
else: return 0
def get_numeric(col):
try:
return float(col)
except ValueError:
try:
a, b = col.split('/')
return float(a)/float(b)
except:
return None
def init_unicodedata(data):
for row in data:
cols = row.split(';')
codepoint = int(cols[0], 16)
name = cols[1]
if name == '<CJK Ideograph, Last>':
lookup_name = 'CJK UNIFIED IDEOGRAPH'
else:
lookup_name = name
data = (
cols[2],
get_int(cols[3]),
cols[4],
cols[5],
get_int(cols[6]),
get_int(cols[7]),
get_numeric(cols[8]),
get_yn(cols[9]),
lookup_name,
)
if name.find('First') >= 0:
start = codepoint
elif name.find('Last') >= 0:
_segments.append((start, (start, codepoint), data))
else:
_names[name] = unichr(codepoint)
_codepoints[codepoint] = data
def init_east_asian_width(data):
for row in data:
if row.startswith('#'):
continue
row = row.partition('#')[0]
cols = row.split(';')
if len(cols) < 2:
continue
cr = cols[0].split('..')
width = cols[1].rstrip()
if len(cr) == 1:
codepoint = int(cr[0], 16)
_eaw[codepoint] = width
else:
start = int(cr[0], 16)
end = int(cr[1], 16)
_eaw_segments.append((start, (start, end), width))
# xxx - need to normalize the segments, so
# <CJK Ideograph, Last> ==> CJK UNIFIED IDEOGRAPH;
# may need to do some sort of analysis against CPython for the normalization!
def name(unichr, default=None):
codepoint = get_codepoint(unichr, "name")
v = _codepoints.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _segments)
if v is not None:
return "%s-%X" % (v[8], codepoint)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v[8]
# xxx - also need to add logic here so that if it's CJK UNIFIED
# IDEOGRAPH-8000, we go against the segment to verify the prefix
def lookup(name):
return _names[name]
def check_segments(codepoint, segments):
i = bisect_left(segments, (codepoint,))
if i < len(segments):
segment = segments[i - 1]
if codepoint <= segment[1][1]:
return segment[2]
return None
def get_codepoint(unichr, fn=None):
if not(isinstance(unichr, unicode)):
raise TypeError(fn, "() argument 1 must be unicode, not " + type(unichr))
if len(unichr) > 1 or len(unichr) == 0:
raise TypeError("need a single Unicode character as parameter")
return ord(unichr)
def get_eaw(unichr, default, fn):
codepoint = get_codepoint(unichr, fn)
v = _eaw.get(codepoint, None)
if v is None:
v = check_segments(codepoint, _eaw_segments)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
return v
def get(unichr, default, fn, getter):
codepoint = get_codepoint(unichr, fn)
data = _codepoints.get(codepoint, None)
if data is None:
data = check_segments(codepoint, _segments)
if data is None:
if default is not Nonesuch:
return default
raise ValueError()
v = getter(data)
if v is None:
if default is not Nonesuch:
return default
raise ValueError()
else:
return v
category_getter = operator.itemgetter(0)
combining_getter = operator.itemgetter(1)
bidirectional_getter = operator.itemgetter(2)
decomposition_getter = operator.itemgetter(3)
decimal_getter = operator.itemgetter(4)
digit_getter = operator.itemgetter(5)
numeric_getter = operator.itemgetter(6)
mirrored_getter = operator.itemgetter(7)
def decimal(unichr, default=Nonesuch):
return get(unichr, default, 'decimal', decimal_getter)
def decomposition(unichr, default=''):
return get(unichr, default, 'decomposition', decomposition_getter)
def digit(unichr, default=Nonesuch):
return get(unichr, default, 'digit', digit_getter)
def numeric(unichr, default=Nonesuch):
return get(unichr, default, 'numeric', numeric_getter)
def category(unichr):
return get(unichr, 'Cn', 'catgegory', category_getter)
def bidirectional(unichr):
return get(unichr, '', 'bidirectional', bidirectional_getter)
def combining(unichr):
return get(unichr, 0, 'combining', combining_getter)
def mirrored(unichr):
return get(unichr, 0, 'mirrored', mirrored_getter)
def east_asian_width(unichr):
return get_eaw(unichr, 'N', 'east_asian_width')
def jymirrored(unichr):
return java.lang.Character.isMirrored(get_codepoint(unichr, 'mirrored'))
try:
from java.text import Normalizer
_forms = {
'NFC': Normalizer.Form.NFC,
'NFKC': Normalizer.Form.NFKC,
'NFD': Normalizer.Form.NFD,
'NFKD': Normalizer.Form.NFKD
}
def normalize(form, unistr):
"""
Return the normal form 'form' for the Unicode string unistr. Valid
values for form are 'NFC', 'NFKC', 'NFD', and 'NFKD'.
"""
try:
normalizer_form = _forms[form]
except KeyError:
raise ValueError('invalid normalization form')
return Normalizer.normalize(unistr, normalizer_form)
except ImportError:
pass
def init():
import pkgutil
import os.path
import StringIO
import sys
my_path = os.path.dirname(__file__)
loader = pkgutil.get_loader('unicodedata')
init_unicodedata(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'UnicodeData.txt'))))
init_east_asian_width(StringIO.StringIO(loader.get_data(os.path.join(my_path, 'EastAsianWidth.txt'))))
init()
| apache-2.0 |
keithroe/vtkoptix | ThirdParty/Twisted/twisted/test/test_ident.py | 41 | 6029 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for twisted.protocols.ident module.
"""
import struct
from twisted.protocols import ident
from twisted.python import failure
from twisted.internet import error
from twisted.internet import defer
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
class ClassParserTestCase(unittest.TestCase):
"""
Test parsing of ident responses.
"""
def setUp(self):
"""
Create a ident client used in tests.
"""
self.client = ident.IdentClient()
def test_indentError(self):
"""
'UNKNOWN-ERROR' error should map to the L{ident.IdentError} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 123, 456))
self.client.lineReceived('123, 456 : ERROR : UNKNOWN-ERROR')
return self.assertFailure(d, ident.IdentError)
def test_noUSerError(self):
"""
'NO-USER' error should map to the L{ident.NoUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 234, 456))
self.client.lineReceived('234, 456 : ERROR : NO-USER')
return self.assertFailure(d, ident.NoUser)
def test_invalidPortError(self):
"""
'INVALID-PORT' error should map to the L{ident.InvalidPort} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 345, 567))
self.client.lineReceived('345, 567 : ERROR : INVALID-PORT')
return self.assertFailure(d, ident.InvalidPort)
def test_hiddenUserError(self):
"""
'HIDDEN-USER' error should map to the L{ident.HiddenUser} exception.
"""
d = defer.Deferred()
self.client.queries.append((d, 567, 789))
self.client.lineReceived('567, 789 : ERROR : HIDDEN-USER')
return self.assertFailure(d, ident.HiddenUser)
def test_lostConnection(self):
"""
A pending query which failed because of a ConnectionLost should
receive an L{ident.IdentError}.
"""
d = defer.Deferred()
self.client.queries.append((d, 765, 432))
self.client.connectionLost(failure.Failure(error.ConnectionLost()))
return self.assertFailure(d, ident.IdentError)
class TestIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
return self.resultValue
class TestErrorIdentServer(ident.IdentServer):
def lookup(self, serverAddress, clientAddress):
raise self.exceptionType()
class NewException(RuntimeError):
pass
class ServerParserTestCase(unittest.TestCase):
def testErrors(self):
p = TestErrorIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.exceptionType = ident.IdentError
p.lineReceived('123, 345')
self.assertEqual(L[0], '123, 345 : ERROR : UNKNOWN-ERROR')
p.exceptionType = ident.NoUser
p.lineReceived('432, 210')
self.assertEqual(L[1], '432, 210 : ERROR : NO-USER')
p.exceptionType = ident.InvalidPort
p.lineReceived('987, 654')
self.assertEqual(L[2], '987, 654 : ERROR : INVALID-PORT')
p.exceptionType = ident.HiddenUser
p.lineReceived('756, 827')
self.assertEqual(L[3], '756, 827 : ERROR : HIDDEN-USER')
p.exceptionType = NewException
p.lineReceived('987, 789')
self.assertEqual(L[4], '987, 789 : ERROR : UNKNOWN-ERROR')
errs = self.flushLoggedErrors(NewException)
self.assertEqual(len(errs), 1)
for port in -1, 0, 65536, 65537:
del L[:]
p.lineReceived('%d, 5' % (port,))
p.lineReceived('5, %d' % (port,))
self.assertEqual(
L, ['%d, 5 : ERROR : INVALID-PORT' % (port,),
'5, %d : ERROR : INVALID-PORT' % (port,)])
def testSuccess(self):
p = TestIdentServer()
p.makeConnection(StringTransport())
L = []
p.sendLine = L.append
p.resultValue = ('SYS', 'USER')
p.lineReceived('123, 456')
self.assertEqual(L[0], '123, 456 : USERID : SYS : USER')
if struct.pack('=L', 1)[0] == '\x01':
_addr1 = '0100007F'
_addr2 = '04030201'
else:
_addr1 = '7F000001'
_addr2 = '01020304'
class ProcMixinTestCase(unittest.TestCase):
line = ('4: %s:0019 %s:02FA 0A 00000000:00000000 '
'00:00000000 00000000 0 0 10927 1 f72a5b80 '
'3000 0 0 2 -1') % (_addr1, _addr2)
def testDottedQuadFromHexString(self):
p = ident.ProcServerMixin()
self.assertEqual(p.dottedQuadFromHexString(_addr1), '127.0.0.1')
def testUnpackAddress(self):
p = ident.ProcServerMixin()
self.assertEqual(p.unpackAddress(_addr1 + ':0277'),
('127.0.0.1', 631))
def testLineParser(self):
p = ident.ProcServerMixin()
self.assertEqual(
p.parseLine(self.line),
(('127.0.0.1', 25), ('1.2.3.4', 762), 0))
def testExistingAddress(self):
username = []
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
p.getUsername = lambda uid: (username.append(uid), 'root')[1]
self.assertEqual(
p.lookup(('127.0.0.1', 25), ('1.2.3.4', 762)),
(p.SYSTEM_NAME, 'root'))
self.assertEqual(username, [0])
def testNonExistingAddress(self):
p = ident.ProcServerMixin()
p.entries = lambda: iter([self.line])
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 26),
('1.2.3.4', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.5', 762))
self.assertRaises(ident.NoUser, p.lookup, ('127.0.0.1', 25),
('1.2.3.4', 763))
| bsd-3-clause |
agconti/Shopify-Django | venv/lib/python2.7/site-packages/django/contrib/gis/db/models/sql/compiler.py | 93 | 13247 | try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from django.utils.six.moves import zip
from django.db.backends.util import truncate_name, typecast_timestamp
from django.db.models.sql import compiler
from django.db.models.sql.constants import MULTI
from django.utils import six
SQLCompiler = compiler.SQLCompiler
class GeoSQLCompiler(compiler.SQLCompiler):
def get_columns(self, with_aliases=False):
"""
Return the list of columns to use in the select statement. If no
columns have been specified, returns all columns relating to fields in
the model.
If 'with_aliases' is true, any column names that are duplicated
(without the table names) are given unique aliases. This is needed in
some cases to avoid ambiguitity with nested queries.
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
result = ['(%s) AS %s' % (self.get_extra_select_format(alias) % col[0], qn2(alias))
for alias, col in six.iteritems(self.query.extra_select)]
aliases = set(self.query.extra_select.keys())
if with_aliases:
col_aliases = aliases.copy()
else:
col_aliases = set()
if self.query.select:
only_load = self.deferred_to_columns()
# This loop customized for GeoQuery.
for col, field in zip(self.query.select, self.query.select_fields):
if isinstance(col, (list, tuple)):
alias, column = col
table = self.query.alias_map[alias].table_name
if table in only_load and column not in only_load[table]:
continue
r = self.get_field_select(field, alias, column)
if with_aliases:
if col[1] in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append('%s AS %s' % (r, qn2(col[1])))
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(r)
aliases.add(r)
col_aliases.add(col[1])
else:
result.append(col.as_sql(qn, self.connection))
if hasattr(col, 'alias'):
aliases.add(col.alias)
col_aliases.add(col.alias)
elif self.query.default_cols:
cols, new_aliases = self.get_default_columns(with_aliases,
col_aliases)
result.extend(cols)
aliases.update(new_aliases)
max_name_length = self.connection.ops.max_name_length()
result.extend([
'%s%s' % (
self.get_extra_select_format(alias) % aggregate.as_sql(qn, self.connection),
alias is not None
and ' AS %s' % qn(truncate_name(alias, max_name_length))
or ''
)
for alias, aggregate in self.query.aggregate_select.items()
])
# This loop customized for GeoQuery.
for (table, col), field in zip(self.query.related_select_cols, self.query.related_select_fields):
r = self.get_field_select(field, table, col)
if with_aliases and col in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (r, c_alias))
aliases.add(c_alias)
col_aliases.add(c_alias)
else:
result.append(r)
aliases.add(r)
col_aliases.add(col)
self._select_aliases = aliases
return result
def get_default_columns(self, with_aliases=False, col_aliases=None,
start_alias=None, opts=None, as_pairs=False, local_only=False):
"""
Computes the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
This routine is overridden from Query to handle customized selection of
geometry columns.
"""
result = []
if opts is None:
opts = self.query.model._meta
aliases = set()
only_load = self.deferred_to_columns()
if start_alias:
seen = {None: start_alias}
for field, model in opts.get_fields_with_model():
# For local fields (even if through proxy) the model should
# be None.
if model == opts.concrete_model:
model = None
if local_only and model is not None:
continue
if start_alias:
try:
alias = seen[model]
except KeyError:
link_field = opts.get_ancestor_link(model)
alias = self.query.join((start_alias, model._meta.db_table,
link_field.column, model._meta.pk.column))
seen[model] = alias
else:
# If we're starting from the base model of the queryset, the
# aliases will have already been set up in pre_sql_setup(), so
# we can save time here.
alias = self.query.included_inherited_models[model]
table = self.query.alias_map[alias].table_name
if table in only_load and field.column not in only_load[table]:
continue
if as_pairs:
result.append((alias, field.column))
aliases.add(alias)
continue
# This part of the function is customized for GeoQuery. We
# see if there was any custom selection specified in the
# dictionary, and set up the selection format appropriately.
field_sel = self.get_field_select(field, alias)
if with_aliases and field.column in col_aliases:
c_alias = 'Col%d' % len(col_aliases)
result.append('%s AS %s' % (field_sel, c_alias))
col_aliases.add(c_alias)
aliases.add(c_alias)
else:
r = field_sel
result.append(r)
aliases.add(r)
if with_aliases:
col_aliases.add(field.column)
return result, aliases
def resolve_columns(self, row, fields=()):
"""
This routine is necessary so that distances and geometries returned
from extra selection SQL get resolved appropriately into Python
objects.
"""
values = []
aliases = list(self.query.extra_select)
# Have to set a starting row number offset that is used for
# determining the correct starting row index -- needed for
# doing pagination with Oracle.
rn_offset = 0
if self.connection.ops.oracle:
if self.query.high_mark is not None or self.query.low_mark: rn_offset = 1
index_start = rn_offset + len(aliases)
# Converting any extra selection values (e.g., geometries and
# distance objects added by GeoQuerySet methods).
values = [self.query.convert_values(v,
self.query.extra_select_fields.get(a, None),
self.connection)
for v, a in zip(row[rn_offset:index_start], aliases)]
if self.connection.ops.oracle or getattr(self.query, 'geo_values', False):
# We resolve the rest of the columns if we're on Oracle or if
# the `geo_values` attribute is defined.
for value, field in zip_longest(row[index_start:], fields):
values.append(self.query.convert_values(value, field, self.connection))
else:
values.extend(row[index_start:])
return tuple(values)
#### Routines unique to GeoQuery ####
def get_extra_select_format(self, alias):
sel_fmt = '%s'
if hasattr(self.query, 'custom_select') and alias in self.query.custom_select:
sel_fmt = sel_fmt % self.query.custom_select[alias]
return sel_fmt
def get_field_select(self, field, alias=None, column=None):
"""
Returns the SELECT SQL string for the given field. Figures out
if any custom selection SQL is needed for the column The `alias`
keyword may be used to manually specify the database table where
the column exists, if not in the model associated with this
`GeoQuery`. Similarly, `column` may be used to specify the exact
column name, rather than using the `column` attribute on `field`.
"""
sel_fmt = self.get_select_format(field)
if field in self.query.custom_select:
field_sel = sel_fmt % self.query.custom_select[field]
else:
field_sel = sel_fmt % self._field_column(field, alias, column)
return field_sel
def get_select_format(self, fld):
"""
Returns the selection format string, depending on the requirements
of the spatial backend. For example, Oracle and MySQL require custom
selection formats in order to retrieve geometries in OGC WKT. For all
other fields a simple '%s' format string is returned.
"""
if self.connection.ops.select and hasattr(fld, 'geom_type'):
# This allows operations to be done on fields in the SELECT,
# overriding their values -- used by the Oracle and MySQL
# spatial backends to get database values as WKT, and by the
# `transform` method.
sel_fmt = self.connection.ops.select
# Because WKT doesn't contain spatial reference information,
# the SRID is prefixed to the returned WKT to ensure that the
# transformed geometries have an SRID different than that of the
# field -- this is only used by `transform` for Oracle and
# SpatiaLite backends.
if self.query.transformed_srid and ( self.connection.ops.oracle or
self.connection.ops.spatialite ):
sel_fmt = "'SRID=%d;'||%s" % (self.query.transformed_srid, sel_fmt)
else:
sel_fmt = '%s'
return sel_fmt
# Private API utilities, subject to change.
def _field_column(self, field, table_alias=None, column=None):
"""
Helper function that returns the database column for the given field.
The table and column are returned (quoted) in the proper format, e.g.,
`"geoapp_city"."point"`. If `table_alias` is not specified, the
database table associated with the model of this `GeoQuery` will be
used. If `column` is specified, it will be used instead of the value
in `field.column`.
"""
if table_alias is None: table_alias = self.query.model._meta.db_table
return "%s.%s" % (self.quote_name_unless_alias(table_alias),
self.connection.ops.quote_name(column or field.column))
class SQLInsertCompiler(compiler.SQLInsertCompiler, GeoSQLCompiler):
pass
class SQLDeleteCompiler(compiler.SQLDeleteCompiler, GeoSQLCompiler):
pass
class SQLUpdateCompiler(compiler.SQLUpdateCompiler, GeoSQLCompiler):
pass
class SQLAggregateCompiler(compiler.SQLAggregateCompiler, GeoSQLCompiler):
pass
class SQLDateCompiler(compiler.SQLDateCompiler, GeoSQLCompiler):
"""
This is overridden for GeoDjango to properly cast date columns, since
`GeoQuery.resolve_columns` is used for spatial values.
See #14648, #16757.
"""
def results_iter(self):
if self.connection.ops.oracle:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.query.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if self.connection.ops.oracle:
date = self.resolve_columns(row, fields)[offset]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
| mit |
ar7z1/ansible | lib/ansible/modules/messaging/rabbitmq_policy.py | 16 | 4535 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, John Dewey <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rabbitmq_policy
short_description: Manage the state of policies in RabbitMQ.
description:
- Manage the state of a policy in RabbitMQ.
version_added: "1.5"
author: "John Dewey (@retr0h)"
options:
name:
description:
- The name of the policy to manage.
required: true
vhost:
description:
- The name of the vhost to apply to.
default: /
apply_to:
description:
- What the policy applies to. Requires RabbitMQ 3.2.0 or later.
default: all
choices: [all, exchanges, queues]
version_added: "2.1"
pattern:
description:
- A regex of queues to apply the policy to.
required: true
tags:
description:
- A dict or string describing the policy.
required: true
priority:
description:
- The priority of the policy.
default: 0
node:
description:
- Erlang node name of the rabbit we wish to configure.
default: rabbit
state:
description:
- The state of the policy.
default: present
choices: [present, absent]
'''
EXAMPLES = '''
- name: ensure the default vhost contains the HA policy via a dict
rabbitmq_policy:
name: HA
pattern: .*
args:
tags:
ha-mode: all
- name: ensure the default vhost contains the HA policy
rabbitmq_policy:
name: HA
pattern: .*
tags:
ha-mode: all
'''
import json
from ansible.module_utils.basic import AnsibleModule
class RabbitMqPolicy(object):
def __init__(self, module, name):
self._module = module
self._name = name
self._vhost = module.params['vhost']
self._pattern = module.params['pattern']
self._apply_to = module.params['apply_to']
self._tags = module.params['tags']
self._priority = module.params['priority']
self._node = module.params['node']
self._rabbitmqctl = module.get_bin_path('rabbitmqctl', True)
def _exec(self, args, run_in_check_mode=False):
if not self._module.check_mode or (self._module.check_mode and run_in_check_mode):
cmd = [self._rabbitmqctl, '-q', '-n', self._node]
args.insert(1, '-p')
args.insert(2, self._vhost)
rc, out, err = self._module.run_command(cmd + args, check_rc=True)
return out.splitlines()
return list()
def list(self):
policies = self._exec(['list_policies'], True)
for policy in policies:
if not policy:
continue
policy_name = policy.split('\t')[1]
if policy_name == self._name:
return True
return False
def set(self):
args = ['set_policy']
args.append(self._name)
args.append(self._pattern)
args.append(json.dumps(self._tags))
args.append('--priority')
args.append(self._priority)
if self._apply_to != 'all':
args.append('--apply-to')
args.append(self._apply_to)
return self._exec(args)
def clear(self):
return self._exec(['clear_policy', self._name])
def main():
arg_spec = dict(
name=dict(required=True),
vhost=dict(default='/'),
pattern=dict(required=True),
apply_to=dict(default='all', choices=['all', 'exchanges', 'queues']),
tags=dict(type='dict', required=True),
priority=dict(default='0'),
node=dict(default='rabbit'),
state=dict(default='present', choices=['present', 'absent']),
)
module = AnsibleModule(
argument_spec=arg_spec,
supports_check_mode=True
)
name = module.params['name']
state = module.params['state']
rabbitmq_policy = RabbitMqPolicy(module, name)
result = dict(changed=False, name=name, state=state)
if rabbitmq_policy.list():
if state == 'absent':
rabbitmq_policy.clear()
result['changed'] = True
else:
result['changed'] = False
elif state == 'present':
rabbitmq_policy.set()
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
knoguchi/kenix-scm | server/lib/boto/swf/layer1_decisions.py | 18 | 11934 | """
Helper class for creating decision responses.
"""
class Layer1Decisions(object):
"""
Use this object to build a list of decisions for a decision response.
Each method call will add append a new decision. Retrieve the list
of decisions from the _data attribute.
"""
def __init__(self):
self._data = []
def schedule_activity_task(self,
activity_id,
activity_type_name,
activity_type_version,
task_list=None,
control=None,
heartbeat_timeout=None,
schedule_to_close_timeout=None,
schedule_to_start_timeout=None,
start_to_close_timeout=None,
input=None):
"""
Schedules an activity task.
:type activity_id: string
:param activity_id: The activityId of the type of the activity
being scheduled.
:type activity_type_name: string
:param activity_type_name: The name of the type of the activity
being scheduled.
:type activity_type_version: string
:param activity_type_version: The version of the type of the
activity being scheduled.
:type task_list: string
:param task_list: If set, specifies the name of the task list in
which to schedule the activity task. If not specified, the
defaultTaskList registered with the activity type will be used.
Note: a task list for this activity task must be specified either
as a default for the activity type or through this field. If
neither this field is set nor a default task list was specified
at registration time then a fault will be returned.
"""
o = {}
o['decisionType'] = 'ScheduleActivityTask'
attrs = o['scheduleActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
attrs['activityType'] = {
'name': activity_type_name,
'version': activity_type_version,
}
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if control is not None:
attrs['control'] = control
if heartbeat_timeout is not None:
attrs['heartbeatTimeout'] = heartbeat_timeout
if schedule_to_close_timeout is not None:
attrs['scheduleToCloseTimeout'] = schedule_to_close_timeout
if schedule_to_start_timeout is not None:
attrs['scheduleToStartTimeout'] = schedule_to_start_timeout
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_activity_task(self, activity_id):
"""
Attempts to cancel a previously scheduled activity task. If
the activity task was scheduled but has not been assigned to a
worker, then it will be canceled. If the activity task was
already assigned to a worker, then the worker will be informed
that cancellation has been requested in the response to
RecordActivityTaskHeartbeat.
"""
o = {}
o['decisionType'] = 'RequestCancelActivityTask'
attrs = o['requestCancelActivityTaskDecisionAttributes'] = {}
attrs['activityId'] = activity_id
self._data.append(o)
def record_marker(self, marker_name, details=None):
"""
Records a MarkerRecorded event in the history. Markers can be
used for adding custom information in the history for instance
to let deciders know that they do not need to look at the
history beyond the marker event.
"""
o = {}
o['decisionType'] = 'RecordMarker'
attrs = o['recordMarkerDecisionAttributes'] = {}
attrs['markerName'] = marker_name
if details is not None:
attrs['details'] = details
self._data.append(o)
def complete_workflow_execution(self, result=None):
"""
Closes the workflow execution and records a WorkflowExecutionCompleted
event in the history
"""
o = {}
o['decisionType'] = 'CompleteWorkflowExecution'
attrs = o['completeWorkflowExecutionDecisionAttributes'] = {}
if result is not None:
attrs['result'] = result
self._data.append(o)
def fail_workflow_execution(self, reason=None, details=None):
"""
Closes the workflow execution and records a
WorkflowExecutionFailed event in the history.
"""
o = {}
o['decisionType'] = 'FailWorkflowExecution'
attrs = o['failWorkflowExecutionDecisionAttributes'] = {}
if reason is not None:
attrs['reason'] = reason
if details is not None:
attrs['details'] = details
self._data.append(o)
def cancel_workflow_executions(self, details=None):
"""
Closes the workflow execution and records a WorkflowExecutionCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelWorkflowExecution'
attrs = o['cancelWorkflowExecutionsDecisionAttributes'] = {}
if details is not None:
attrs['details'] = details
self._data.append(o)
def continue_as_new_workflow_execution(self,
child_policy=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
start_to_close_timeout=None,
workflow_type_version=None):
"""
Closes the workflow execution and starts a new workflow execution of
the same type using the same workflow id and a unique run Id. A
WorkflowExecutionContinuedAsNew event is recorded in the history.
"""
o = {}
o['decisionType'] = 'ContinueAsNewWorkflowExecution'
attrs = o['continueAsNewWorkflowExecutionDecisionAttributes'] = {}
if child_policy is not None:
attrs['childPolicy'] = child_policy
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if start_to_close_timeout is not None:
attrs['startToCloseTimeout'] = start_to_close_timeout
if workflow_type_version is not None:
attrs['workflowTypeVersion'] = workflow_type_version
self._data.append(o)
def start_timer(self,
start_to_fire_timeout,
timer_id,
control=None):
"""
Starts a timer for this workflow execution and records a TimerStarted
event in the history. This timer will fire after the specified delay
and record a TimerFired event.
"""
o = {}
o['decisionType'] = 'StartTimer'
attrs = o['startTimerDecisionAttributes'] = {}
attrs['startToFireTimeout'] = start_to_fire_timeout
attrs['timerId'] = timer_id
if control is not None:
attrs['control'] = control
self._data.append(o)
def cancel_timer(self, timer_id):
"""
Cancels a previously started timer and records a TimerCanceled
event in the history.
"""
o = {}
o['decisionType'] = 'CancelTimer'
attrs = o['cancelTimerDecisionAttributes'] = {}
attrs['timerId'] = timer_id
self._data.append(o)
def signal_external_workflow_execution(self,
workflow_id,
signal_name,
run_id=None,
control=None,
input=None):
"""
Requests a signal to be delivered to the specified external workflow
execution and records a SignalExternalWorkflowExecutionInitiated
event in the history.
"""
o = {}
o['decisionType'] = 'SignalExternalWorkflowExecution'
attrs = o['signalExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
attrs['signalName'] = signal_name
if run_id is not None:
attrs['runId'] = run_id
if control is not None:
attrs['control'] = control
if input is not None:
attrs['input'] = input
self._data.append(o)
def request_cancel_external_workflow_execution(self,
workflow_id,
control=None,
run_id=None):
"""
Requests that a request be made to cancel the specified
external workflow execution and records a
RequestCancelExternalWorkflowExecutionInitiated event in the
history.
"""
o = {}
o['decisionType'] = 'RequestCancelExternalWorkflowExecution'
attrs = o['requestCancelExternalWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowId'] = workflow_id
if control is not None:
attrs['control'] = control
if run_id is not None:
attrs['runId'] = run_id
self._data.append(o)
def start_child_workflow_execution(self,
workflow_type_name,
workflow_type_version,
workflow_id,
child_policy=None,
control=None,
execution_start_to_close_timeout=None,
input=None,
tag_list=None,
task_list=None,
task_start_to_close_timeout=None):
"""
Requests that a child workflow execution be started and
records a StartChildWorkflowExecutionInitiated event in the
history. The child workflow execution is a separate workflow
execution with its own history.
"""
o = {}
o['decisionType'] = 'StartChildWorkflowExecution'
attrs = o['startChildWorkflowExecutionDecisionAttributes'] = {}
attrs['workflowType'] = {
'name': workflow_type_name,
'version': workflow_type_version,
}
attrs['workflowId'] = workflow_id
if child_policy is not None:
attrs['childPolicy'] = child_policy
if control is not None:
attrs['control'] = control
if execution_start_to_close_timeout is not None:
attrs['executionStartToCloseTimeout'] = execution_start_to_close_timeout
if input is not None:
attrs['input'] = input
if tag_list is not None:
attrs['tagList'] = tag_list
if task_list is not None:
attrs['taskList'] = {'name': task_list}
if task_start_to_close_timeout is not None:
attrs['taskStartToCloseTimeout'] = task_start_to_close_timeout
self._data.append(o)
| apache-2.0 |
JingJunYin/tensorflow | tensorflow/tools/api/generator/create_python_api_test.py | 32 | 2857 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for create_python_api."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import imp
import sys
from tensorflow.python.platform import test
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.api.generator import create_python_api
@tf_export('test_op', 'test_op1')
def test_op():
pass
@tf_export('TestClass', 'NewTestClass')
class TestClass(object):
pass
_TEST_CONSTANT = 5
_MODULE_NAME = 'test.tensorflow.test_module'
class CreatePythonApiTest(test.TestCase):
def setUp(self):
# Add fake op to a module that has 'tensorflow' in the name.
sys.modules[_MODULE_NAME] = imp.new_module(_MODULE_NAME)
setattr(sys.modules[_MODULE_NAME], 'test_op', test_op)
setattr(sys.modules[_MODULE_NAME], 'TestClass', TestClass)
test_op.__module__ = _MODULE_NAME
TestClass.__module__ = _MODULE_NAME
tf_export('consts._TEST_CONSTANT').export_constant(
_MODULE_NAME, '_TEST_CONSTANT')
def tearDown(self):
del sys.modules[_MODULE_NAME]
def testFunctionImportIsAdded(self):
imports = create_python_api.get_api_imports()
expected_import = (
'from test.tensorflow.test_module import test_op as test_op1')
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
expected_import = 'from test.tensorflow.test_module import test_op'
self.assertTrue(
expected_import in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testClassImportIsAdded(self):
imports = create_python_api.get_api_imports()
expected_import = 'from test.tensorflow.test_module import TestClass'
self.assertTrue(
'TestClass' in str(imports),
msg='%s not in %s' % (expected_import, str(imports)))
def testConstantIsAdded(self):
imports = create_python_api.get_api_imports()
expected = 'from test.tensorflow.test_module import _TEST_CONSTANT'
self.assertTrue(expected in str(imports),
msg='%s not in %s' % (expected, str(imports)))
if __name__ == '__main__':
test.main()
| apache-2.0 |
mhogg/scipy | scipy/sparse/linalg/isolve/setup.py | 108 | 1408 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
from os.path import join
def configuration(parent_package='',top_path=None):
from numpy.distutils.system_info import get_info, NotFoundError
from numpy.distutils.misc_util import Configuration
from scipy._build_utils import get_g77_abi_wrappers
config = Configuration('isolve',parent_package,top_path)
lapack_opt = get_info('lapack_opt')
if not lapack_opt:
raise NotFoundError('no lapack/blas resources found')
# iterative methods
methods = ['BiCGREVCOM.f.src',
'BiCGSTABREVCOM.f.src',
'CGREVCOM.f.src',
'CGSREVCOM.f.src',
# 'ChebyREVCOM.f.src',
'GMRESREVCOM.f.src',
# 'JacobiREVCOM.f.src',
'QMRREVCOM.f.src',
# 'SORREVCOM.f.src'
]
Util = ['STOPTEST2.f.src','getbreak.f.src']
sources = Util + methods + ['_iterative.pyf.src']
sources = [join('iterative', x) for x in sources]
sources += get_g77_abi_wrappers(lapack_opt)
config.add_extension('_iterative',
sources=sources,
extra_info=lapack_opt)
config.add_data_dir('tests')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
Basis/pip | pip/_vendor/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| mit |
uhlik/blendmaxwell | mxs.py | 2 | 222633 | #!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3
# -*- coding: utf-8 -*-
# The MIT License (MIT)
#
# Copyright (c) 2015 Jakub Uhlík
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is furnished
# to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import os
import platform
import datetime
import struct
import math
import sys
import numpy
from .log import log, LogStyles
from . import utils
s = platform.system()
if(s == 'Darwin'):
pass
elif(s == 'Linux'):
try:
from pymaxwell import *
except ImportError:
mp = os.environ.get("MAXWELL3_ROOT")
if(not mp):
raise OSError("missing MAXWELL3_ROOT environment variable")
pp = os.path.abspath(os.path.join(mp, 'python', 'pymaxwell', 'python3.5'))
if(not os.path.exists(pp)):
raise OSError("pymaxwell for python 3.5 does not exist ({})".format(pp))
sys.path.insert(0, pp)
from pymaxwell import *
elif(s == 'Windows'):
try:
from pymaxwell import *
except ImportError:
mp = os.environ.get("MAXWELL3_ROOT")
if(not mp):
raise OSError("missing MAXWELL3_ROOT environment variable")
pp = os.path.abspath(os.path.join(mp, 'python', 'pymaxwell', 'python3.5'))
if(not os.path.exists(pp)):
raise OSError("pymaxwell for python 3.5 does not exist ({})".format(pp))
sys.path.insert(0, pp)
os.environ['PATH'] = ';'.join([mp, os.environ['PATH']])
from pymaxwell import *
def read_mxm_preview(path):
import numpy
s = Cmaxwell(mwcallback)
m = s.readMaterial(path)
a, _ = m.getPreview()
r = numpy.copy(a)
return r
def material_preview_scene(scene, tmp_dir, quality, ):
s = Cmaxwell(mwcallback)
log('reading scene: {}'.format(scene), 2)
ok = s.readMXS(scene)
if(not ok):
log('error reading scene: {}'.format(scene), 2, LogStyles.ERROR, )
return None
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
names = get_material_names(s)
for n in names:
if(n.lower() == 'preview'):
break
log('swapping material: {}'.format(n), 2)
material = s.getMaterial(n)
p = os.path.join(tmp_dir, 'material.mxm')
material.read(p)
material.forceToWriteIntoScene()
log('setting parameters..', 2)
s.setRenderParameter('ENGINE', bytes(quality, encoding='UTF-8'))
exr = os.path.join(tmp_dir, "render.exr")
s.setPath('RENDER', exr, 32)
s.setRenderParameter('DO NOT SAVE MXI FILE', False)
s.setRenderParameter('DO NOT SAVE IMAGE FILE', False)
src_dir, _ = os.path.split(scene)
ok = s.addSearchingPath(src_dir)
sp = os.path.join(tmp_dir, "scene.mxs")
log('writing scene: {}'.format(sp), 2)
ok = s.writeMXS(sp)
if(not ok):
log('error writing scene: {}'.format(sp), 2, LogStyles.ERROR, )
return None
log('done.', 2)
return sp
def material_preview_mxi(tmp_dir):
mp = os.path.join(tmp_dir, 'render.mxi')
ep = os.path.join(tmp_dir, 'render.exr')
import numpy
a = numpy.zeros((1, 1, 3), dtype=numpy.float, )
if(os.path.exists(mp)):
log('reading mxi: {}'.format(mp), 2)
i = CmaxwellMxi()
i.read(mp)
a, _ = i.getRenderBuffer(32)
elif(os.path.exists(ep)):
log('reading exr: {}'.format(ep), 2)
i = CmaxwellMxi()
i.readImage(ep)
i.write(mp)
a, _ = i.getRenderBuffer(32)
else:
log('image not found..', 2)
return a
def viewport_render_scene(tmp_dir, quality, ):
s = Cmaxwell(mwcallback)
p = os.path.join(tmp_dir, "scene.mxs")
ok = s.readMXS(p)
if(not ok):
return False
s.setRenderParameter('ENGINE', bytes(quality, encoding='UTF-8'))
mxi = os.path.join(tmp_dir, "render.mxi")
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
exr = os.path.join(tmp_dir, "render.exr")
s.setPath('RENDER', exr, 32)
s.setRenderParameter('DO NOT SAVE MXI FILE', False)
s.setRenderParameter('DO NOT SAVE IMAGE FILE', False)
# turn off channels
s.setRenderParameter('EMBED CHANNELS', 1)
ls = ['DO ALPHA CHANNEL', 'DO IDOBJECT CHANNEL', 'DO IDMATERIAL CHANNEL', 'DO SHADOW PASS CHANNEL', 'DO MOTION CHANNEL',
'DO ROUGHNESS CHANNEL', 'DO FRESNEL CHANNEL', 'DO NORMALS CHANNEL', 'DO POSITION CHANNEL', 'DO ZBUFFER CHANNEL',
'DO DEEP CHANNEL', 'DO UV CHANNEL', 'DO ALPHA CUSTOM CHANNEL', 'DO REFLECTANCE CHANNEL', ]
for n in ls:
s.setRenderParameter(n, 0)
ok = s.writeMXS(p)
if(not ok):
return False
return True
def viewport_render_mxi(tmp_dir):
ep = os.path.join(tmp_dir, 'render2.exr')
a = numpy.zeros((1, 1, 3), dtype=numpy.float, )
if(os.path.exists(ep)):
log('reading exr: {}'.format(ep), 2)
i = CmaxwellMxi()
i.readImage(ep)
# i.write(mp)
a, _ = i.getRenderBuffer(32)
else:
log('image not found..', 2, LogStyles.ERROR)
return a
class MXSWriter():
def __init__(self, path, append=False, ):
"""Create scene or load existing.
path string (path)
append bool
"""
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell directly in Blender on Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
pid = utils.get_plugin_id()
if(pid != ""):
# write here directly, even though it is also part of scene data, but api change just for this is pointless..
self.mxs.setPluginID(pid)
if(append):
log("appending to existing scene..", 2, prefix="* ", )
self.mxs.readMXS(self.path)
else:
log("creating new scene..", 2, prefix="* ", )
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
def write(self):
"""Write scene fo file.
(no parameters..)
"""
log("saving scene..", 2)
ok = self.mxs.writeMXS(self.path)
log("done.", 2)
return ok
def erase_unused_materials(self):
self.mxs.eraseUnusedMaterials()
def set_base_and_pivot(self, o, matrix=None, motion=None, ):
"""Convert float tuples to Cbases and set to object.
o CmaxwellObject
base ((3 float), (3 float), (3 float), (3 float)) or None
pivot ((3 float), (3 float), (3 float), (3 float)) or None
"""
if(matrix is None):
matrix = ([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0])
base = matrix[0]
pivot = matrix[1]
l = matrix[2]
r = matrix[3]
s = matrix[4]
b = Cbase()
b.origin = Cvector(*base[0])
b.xAxis = Cvector(*base[1])
b.yAxis = Cvector(*base[2])
b.zAxis = Cvector(*base[3])
p = Cbase()
p.origin = Cvector(*pivot[0])
p.xAxis = Cvector(*pivot[1])
p.yAxis = Cvector(*pivot[2])
p.zAxis = Cvector(*pivot[3])
o.setBaseAndPivot(b, p)
o.setPivotPosition(Cvector(*l))
o.setPivotRotation(Cvector(*r))
o.setPosition(Cvector(*l))
o.setRotation(Cvector(*r))
o.setScale(Cvector(*s))
if(motion is not None):
for(t, _, b, p) in motion:
bb = Cbase()
bb.origin = Cvector(*b[0])
bb.xAxis = Cvector(*b[1])
bb.yAxis = Cvector(*b[2])
bb.zAxis = Cvector(*b[3])
pp = Cbase()
pp.origin = Cvector(*p[0])
pp.xAxis = Cvector(*p[1])
pp.yAxis = Cvector(*p[2])
pp.zAxis = Cvector(*p[3])
o.setBaseAndPivot(bb, pp, t, )
def set_object_props(self, o, hide=False, opacity=100, cid=(1.0, 1.0, 1.0), hcam=False, hcamsc=False, hgi=False, hrr=False, hzcp=False, blocked_emitters=None, ):
"""Set common object properties.
o CmaxwellObject
hide bool
opacity float
cid (float, float, float) 0.0 - 1.0 rgb
hcam bool
hcamsc bool
hgi bool
hrr bool
hzcp bool
blocked_emitters list of blocked emitter object names
"""
if(hide):
o.setHide(hide)
if(opacity != 100.0):
o.setOpacity(opacity)
c = Crgb()
c.assign(*cid)
o.setColorID(c)
if(hcam):
o.setHideToCamera(True)
if(hcamsc):
o.setHideToCameraInShadowsPass(True)
if(hgi):
o.setHideToGI(True)
if(hrr):
o.setHideToReflectionsRefractions(True)
if(hzcp):
o.excludeOfCutPlanes(True)
if(blocked_emitters):
for n in blocked_emitters:
ok = o.addExcludedLight(n)
def texture_data_to_mxparams(self, name, data, mxparams, ):
"""Create CtextureMap, fill with parameters and put into mxparams.
name string
data dict {'type': string,
'path': string,
'channel': int,
'use_global_map': bool,
'tile_method_type': [bool, bool],
'tile_method_units': int,
'repeat': [float, float],
'mirror': [bool, bool],
'offset': [float, float],
'rotation': float,
'invert': bool,
'alpha_only': bool,
'interpolation': bool,
'brightness': float,
'contrast': float,
'saturation': float,
'hue': float,
'rgb_clamp': [float, float], }
mxparams mxparams
"""
d = data
if(d is None):
return
t = CtextureMap()
t.setPath(d['path'])
v = Cvector2D()
v.assign(*d['repeat'])
t.scale = v
v = Cvector2D()
v.assign(*d['offset'])
t.offset = v
t.rotation = d['rotation']
t.uvwChannelID = d['channel']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
t.invert = d['invert']
# t.doGammaCorrection = 0
t.useAbsoluteUnits = d['tile_method_units']
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
t.useAlpha = d['alpha_only']
t.typeInterpolation = d['interpolation']
t.saturation = d['saturation'] / 100
t.contrast = d['contrast'] / 100
t.brightness = d['brightness'] / 100
t.hue = d['hue'] / 180
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
t.useGlobalMap = d['use_global_map']
# t.cosA = 1.000000
# t.sinA = 0.000000
ok = mxparams.setTextureMap(name, t)
return mxparams
def texture(self, d, ):
"""Create CtextureMap from parameters
d dict
"""
if(d is None):
return
s = self.mxs
t = CtextureMap()
t.setPath(d['path'])
t.uvwChannelID = d['channel']
t.brightness = d['brightness'] / 100
t.contrast = d['contrast'] / 100
t.saturation = d['saturation'] / 100
t.hue = d['hue'] / 180
t.useGlobalMap = d['use_global_map']
t.useAbsoluteUnits = d['tile_method_units']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
vec = Cvector2D()
vec.assign(d['offset'][0], d['offset'][1])
t.offset = vec
t.rotation = d['rotation']
t.invert = d['invert']
t.useAlpha = d['alpha_only']
if(d['interpolation']):
t.typeInterpolation = 1
else:
t.typeInterpolation = 0
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
vec = Cvector2D()
vec.assign(d['repeat'][0], d['repeat'][1])
t.scale = vec
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
for i, pt in enumerate(d['procedural']):
if(pt['use'] == 'BRICK'):
e = self.mgr.createDefaultTextureExtension('Brick')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setFloat('Brick width', pt['brick_brick_width'])
p.setFloat('Brick height', pt['brick_brick_height'])
p.setInt('Brick offset', pt['brick_brick_offset'])
p.setInt('Random offset', pt['brick_random_offset'])
p.setByte('Double brick', pt['brick_double_brick'])
p.setFloat('Small brick width', pt['brick_small_brick_width'])
p.setByte('Round corners', pt['brick_round_corners'])
p.setFloat('Boundary sharpness U', pt['brick_boundary_sharpness_u'])
p.setFloat('Boundary sharpness V', pt['brick_boundary_sharpness_v'])
p.setInt('Boundary noise detail', pt['brick_boundary_noise_detail'])
p.setFloat('Boundary noise region U', pt['brick_boundary_noise_region_u'])
p.setFloat('Boundary noise region V', pt['brick_boundary_noise_region_v'])
p.setUInt('Seed', pt['brick_seed'])
p.setByte('Random rotation', pt['brick_random_rotation'])
p.setInt('Color variation', pt['brick_color_variation'])
c = Crgb()
c.assign(*pt['brick_brick_color_0'])
p.setRgb('Brick color 0', c)
self.texture_data_to_mxparams('Brick texture 0', pt['brick_brick_texture_0'], p, )
p.setInt('Sampling factor 0', pt['brick_sampling_factor_0'])
p.setInt('Weight 0', pt['brick_weight_0'])
c = Crgb()
c.assign(*pt['brick_brick_color_1'])
p.setRgb('Brick color 1', c)
self.texture_data_to_mxparams('Brick texture 1', pt['brick_brick_texture_1'], p, )
p.setInt('Sampling factor 1', pt['brick_sampling_factor_1'])
p.setInt('Weight 1', pt['brick_weight_1'])
c = Crgb()
c.assign(*pt['brick_brick_color_2'])
p.setRgb('Brick color 2', c)
self.texture_data_to_mxparams('Brick texture 2', pt['brick_brick_texture_2'], p, )
p.setInt('Sampling factor 2', pt['brick_sampling_factor_2'])
p.setInt('Weight 2', pt['brick_weight_2'])
p.setFloat('Mortar thickness', pt['brick_mortar_thickness'])
c = Crgb()
c.assign(*pt['brick_mortar_color'])
p.setRgb('Mortar color', c)
self.texture_data_to_mxparams('Mortar texture', pt['brick_mortar_texture'], p, )
t.addProceduralTexture(p)
elif(pt['use'] == 'CHECKER'):
e = self.mgr.createDefaultTextureExtension('Checker')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['checker_color_0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['checker_color_1'])
p.setRgb('Color1', c)
p.setUInt('Number of elements U', pt['checker_number_of_elements_u'])
p.setUInt('Number of elements V', pt['checker_number_of_elements_v'])
p.setFloat('Transition sharpness', pt['checker_transition_sharpness'])
p.setUInt('Fall-off', pt['checker_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'CIRCLE'):
e = self.mgr.createDefaultTextureExtension('Circle')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['circle_background_color'])
p.setRgb('Background color', c)
c = Crgb()
c.assign(*pt['circle_circle_color'])
p.setRgb('Circle color', c)
p.setFloat('RadiusU', pt['circle_radius_u'])
p.setFloat('RadiusV', pt['circle_radius_v'])
p.setFloat('Transition factor', pt['circle_transition_factor'])
p.setUInt('Fall-off', pt['circle_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT3'):
e = self.mgr.createDefaultTextureExtension('Gradient3')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient3_gradient_u'])
c = Crgb()
c.assign(*pt['gradient3_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient3_color1_u'])
p.setRgb('Color1 U', c)
c = Crgb()
c.assign(*pt['gradient3_color2_u'])
p.setRgb('Color2 U', c)
p.setUInt('Gradient type U', pt['gradient3_gradient_type_u'])
p.setFloat('Color1 U position', pt['gradient3_color1_u_position'])
p.setByte('Gradient V', pt['gradient3_gradient_v'])
c = Crgb()
c.assign(*pt['gradient3_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient3_color1_v'])
p.setRgb('Color1 V', c)
c = Crgb()
c.assign(*pt['gradient3_color2_v'])
p.setRgb('Color2 V', c)
p.setUInt('Gradient type V', pt['gradient3_gradient_type_v'])
p.setFloat('Color1 V position', pt['gradient3_color1_v_position'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT'):
e = self.mgr.createDefaultTextureExtension('Gradient')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient_gradient_u'])
c = Crgb()
c.assign(*pt['gradient_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient_color1_u'])
p.setRgb('Color1 U', c)
p.setUInt('Gradient type U', pt['gradient_gradient_type_u'])
p.setFloat('Transition factor U', pt['gradient_transition_factor_u'])
p.setByte('Gradient V', pt['gradient_gradient_v'])
c = Crgb()
c.assign(*pt['gradient_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient_color1_v'])
p.setRgb('Color1 V', c)
p.setUInt('Gradient type V', pt['gradient_gradient_type_v'])
p.setFloat('Transition factor V', pt['gradient_transition_factor_v'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRID'):
e = self.mgr.createDefaultTextureExtension('Grid')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['grid_boundary_color'])
p.setRgb('Boundary color', c)
c = Crgb()
c.assign(*pt['grid_cell_color'])
p.setRgb('Cell color', c)
p.setFloat('Cell width', pt['grid_cell_width'])
p.setFloat('Cell height', pt['grid_cell_height'])
if(pt['grid_horizontal_lines']):
p.setFloat('Boundary thickness U', pt['grid_boundary_thickness_u'])
else:
p.setFloat('Boundary thickness U', 0.0)
if(pt['grid_vertical_lines']):
p.setFloat('Boundary thickness V', pt['grid_boundary_thickness_v'])
else:
p.setFloat('Boundary thickness V', 0.0)
p.setFloat('Transition sharpness', pt['grid_transition_sharpness'])
p.setUInt('Fall-off', pt['grid_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'MARBLE'):
e = self.mgr.createDefaultTextureExtension('Marble')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['marble_coordinates_type'])
c = Crgb()
c.assign(*pt['marble_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['marble_color1'])
p.setRgb('Color1', c)
c = Crgb()
c.assign(*pt['marble_color2'])
p.setRgb('Color2', c)
p.setFloat('Frequency', pt['marble_frequency'])
p.setFloat('Detail', pt['marble_detail'])
p.setInt('Octaves', pt['marble_octaves'])
p.setUInt('Seed', pt['marble_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'NOISE'):
e = self.mgr.createDefaultTextureExtension('Noise')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['noise_coordinates_type'])
c = Crgb()
c.assign(*pt['noise_noise_color'])
p.setRgb('Noise color', c)
c = Crgb()
c.assign(*pt['noise_background_color'])
p.setRgb('Background color', c)
p.setFloat('Detail', pt['noise_detail'])
p.setFloat('Persistance', pt['noise_persistance'])
p.setInt('Octaves', pt['noise_octaves'])
p.setFloat('Low value', pt['noise_low_value'])
p.setFloat('High value', pt['noise_high_value'])
p.setUInt('Seed', pt['noise_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'VORONOI'):
e = self.mgr.createDefaultTextureExtension('Voronoi')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['voronoi_coordinates_type'])
c = Crgb()
c.assign(*pt['voronoi_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['voronoi_color1'])
p.setRgb('Color1', c)
p.setInt('Detail', pt['voronoi_detail'])
p.setUInt('Distance', pt['voronoi_distance'])
p.setUInt('Combination', pt['voronoi_combination'])
p.setFloat('Low value', pt['voronoi_low_value'])
p.setFloat('High value', pt['voronoi_high_value'])
p.setUInt('Seed', pt['voronoi_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'TILED'):
e = self.mgr.createDefaultTextureExtension('TiledTexture')
p = e.getExtensionData()
p.setFloat('Blend factor', pt['blending_factor'])
c = Crgb()
c.assign(*pt['tiled_base_color'])
p.setRgb('Base Color', c)
p.setByte('Use base color', pt['tiled_use_base_color'])
p.setString('Filename_mask', pt['tiled_token_mask'])
p.setString('Filename', pt['tiled_filename'])
# 'Map U tile range' UCHAR
# 'Map V tile range' UCHAR
t.addProceduralTexture(p)
elif(pt['use'] == 'WIREFRAME'):
e = self.mgr.createDefaultTextureExtension('WireframeTexture')
p = e.getExtensionData()
c = Crgb()
c.assign(*pt['wireframe_fill_color'])
p.setRgb('Fill Color', c)
c = Crgb()
c.assign(*pt['wireframe_edge_color'])
p.setRgb('Edge Color', c)
c = Crgb()
c.assign(*pt['wireframe_coplanar_edge_color'])
p.setRgb('Coplanar Edge Color', c)
p.setFloat('Edge Width', pt['wireframe_edge_width'])
p.setFloat('Coplanar Edge Width', pt['wireframe_coplanar_edge_width'])
p.setFloat('Coplanar Threshold', pt['wireframe_coplanar_threshold'])
t.addProceduralTexture(p)
else:
raise TypeError("{0} is unknown procedural texture type".format(pt['use']))
return t
def material_placeholder(self, n=None, ):
if(n is not None):
pass
else:
n = 'MATERIAL_PLACEHOLDER'
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
r = b.getReflectance()
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = CtextureMap()
mgr = CextensionManager.instance()
mgr.loadAllExtensions()
e = mgr.createDefaultTextureExtension('Checker')
ch = e.getExtensionData()
ch.setUInt('Number of elements U', 32)
ch.setUInt('Number of elements V', 32)
t.addProceduralTexture(ch)
a.textureMap = t
r.setAttribute('color', a)
return m
def material_default(self, n, ):
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
return m
def material_external(self, d, ):
s = self.mxs
p = d['path']
t = s.readMaterial(p)
t.setName(d['name'])
m = s.addMaterial(t)
if(not d['embed']):
m.setReference(1, p)
return m
def material_custom(self, d, ):
s = self.mxs
m = s.createMaterial(d['name'])
d = d['data']
def global_props(d, m):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
def add_bsdf(d, l):
b = l.addBSDF()
b.setName(d['name'])
bp = d['bsdf_props']
# weight
if(bp['weight_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['weight_map'])
if(t is not None):
a.textureMap = t
a.value = bp['weight']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['weight']
b.setWeight(a)
# enabled
if(not bp['visible']):
b.setState(False)
# ior
r = b.getReflectance()
if(bp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(bp['complex_ior'])
else:
if(bp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_0'])
r.setAttribute('color', a)
if(bp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_90'])
r.setAttribute('color.tangential', a)
if(bp['transmittance_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['transmittance_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['transmittance'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['transmittance'])
r.setAttribute('transmittance.color', a)
r.setAbsorptionDistance(bp['attenuation_units'], bp['attenuation'])
r.setIOR(bp['nd'], bp['abbe'])
if(bp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(bp['k'])
if(bp['r2_enabled']):
r.setFresnelCustom(bp['r2_falloff_angle'], bp['r2_influence'], True, )
# surface
if(bp['roughness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['roughness_map'])
if(t is not None):
a.textureMap = t
a.value = bp['roughness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['roughness']
b.setAttribute('roughness', a)
if(bp['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['bump_map'])
if(t is not None):
a.textureMap = t
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
b.setAttribute('bump', a)
b.setNormalMapState(bp['bump_map_use_normal'])
if(bp['anisotropy_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy']
b.setAttribute('anisotropy', a)
if(bp['anisotropy_angle_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_angle_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy_angle']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy_angle']
b.setAttribute('angle', a)
# subsurface
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['scattering'])
r.setAttribute('scattering', a)
r.setScatteringParameters(bp['coef'], bp['asymmetry'], bp['single_sided'])
if(bp['single_sided']):
if(bp['single_sided_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['single_sided_map'])
if(t is not None):
a.textureMap = t
a.value = bp['single_sided_value']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['single_sided_value']
r.setScatteringThickness(a)
r.setScatteringThicknessRange(bp['single_sided_min'], bp['single_sided_max'])
# coating
cp = d['coating']
if(cp['enabled']):
c = b.addCoating()
if(cp['thickness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['thickness_map'])
if(t is not None):
a.textureMap = t
a.value = cp['thickness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = cp['thickness']
c.setThickness(a)
c.setThicknessRange(cp['thickness_map_min'], cp['thickness_map_max'])
r = c.getReflectance()
if(cp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(cp['complex_ior'])
else:
if(cp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_0'])
r.setAttribute('color', a)
if(cp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_90'])
r.setAttribute('color.tangential', a)
r.setIOR(cp['nd'], 1.0, )
if(cp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(cp['k'])
if(cp['r2_enabled']):
r.setFresnelCustom(cp['r2_falloff_angle'], 0.0, True, )
def add_emitter(d, l):
e = l.createEmitter()
if(d['type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['ies_data'])
e.setIESLobeIntensity(d['ies_intensity'])
elif(d['type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['spot_map'] is not None):
t = self.texture(d['spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['spot_map_enabled'], t)
e.setSpotConeAngle(d['spot_cone_angle'])
e.setSpotFallOffAngle(d['spot_falloff_angle'])
e.setSpotFallOffType(d['spot_falloff_type'])
e.setSpotBlur(d['spot_blur'])
if(d['emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['color'])
ep.rgb.assign(c)
ep.temperature = d['color_black_body']
ep.watts = d['luminance_power']
ep.luminousEfficacy = d['luminance_efficacy']
ep.luminousPower = d['luminance_output']
ep.illuminance = d['luminance_output']
ep.luminousIntensity = d['luminance_output']
ep.luminance = d['luminance_output']
e.setPair(ep)
if(d['luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['temperature_value'])
elif(d['emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['hdr_intensity']
e.setMXI(a)
e.setState(True)
def add_layer(d, m):
l = m.addLayer()
l.setName(d['name'])
lpd = d['layer_props']
if(not lpd['visible']):
l.setEnabled(False)
if(lpd['blending'] == 1):
l.setStackedBlendingMode(1)
if(lpd['opacity_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(lpd['opacity_map'])
if(t is not None):
a.textureMap = t
a.value = lpd['opacity']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = lpd['opacity']
l.setAttribute('weight', a)
epd = d['emitter']
if(epd['enabled']):
add_emitter(epd, l)
for b in d['bsdfs']:
add_bsdf(b, l)
global_props(d['global_props'], m)
displacement(d['displacement'], m)
for layer in d['layers']:
add_layer(layer, m)
return m
def material(self, d, ):
s = self.mxs
if(d['subtype'] == 'EXTERNAL'):
if(d['path'] == ''):
m = self.material_placeholder(d['name'])
else:
m = self.material_external(d)
if(d['override']):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
elif(d['subtype'] == 'EXTENSION'):
if(d['use'] == 'EMITTER'):
m = s.createMaterial(d['name'])
l = m.addLayer()
e = l.createEmitter()
if(d['emitter_type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['emitter_type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['emitter_ies_data'])
e.setIESLobeIntensity(d['emitter_ies_intensity'])
elif(d['emitter_type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['emitter_spot_map'] is not None):
t = self.texture(d['emitter_spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['emitter_spot_map_enabled'], t)
e.setSpotConeAngle(d['emitter_spot_cone_angle'])
e.setSpotFallOffAngle(d['emitter_spot_falloff_angle'])
e.setSpotFallOffType(d['emitter_spot_falloff_type'])
e.setSpotBlur(d['emitter_spot_blur'])
if(d['emitter_emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['emitter_color'])
ep.rgb.assign(c)
ep.temperature = d['emitter_color_black_body']
ep.watts = d['emitter_luminance_power']
ep.luminousEfficacy = d['emitter_luminance_efficacy']
ep.luminousPower = d['emitter_luminance_output']
ep.illuminance = d['emitter_luminance_output']
ep.luminousIntensity = d['emitter_luminance_output']
ep.luminance = d['emitter_luminance_output']
e.setPair(ep)
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['emitter_color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emitter_emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['emitter_temperature_value'])
elif(d['emitter_emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['emitter_hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['emitter_hdr_intensity']
e.setMXI(a)
e.setState(True)
def global_props(d, m):
# global properties
if(d['override_map']):
t = texture(d['override_map'], s, )
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = texture(d['bump_map'], s, )
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
global_props(d, m)
else:
m = CextensionManager.instance()
m.loadAllExtensions()
if(d['use'] == 'AGS'):
e = m.createDefaultMaterialModifierExtension('AGS')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['ags_color'])
p.setRgb('Color', c)
p.setFloat('Reflection', d['ags_reflection'])
p.setUInt('Type', d['ags_type'])
elif(d['use'] == 'OPAQUE'):
e = m.createDefaultMaterialModifierExtension('Opaque')
p = e.getExtensionData()
p.setByte('Color Type', d['opaque_color_type'])
c = Crgb()
c.assign(*d['opaque_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['opaque_color_map'], p, )
p.setByte('Shininess Type', d['opaque_shininess_type'])
p.setFloat('Shininess', d['opaque_shininess'])
self.texture_data_to_mxparams('Shininess Map', d['opaque_shininess_map'], p, )
p.setByte('Roughness Type', d['opaque_roughness_type'])
p.setFloat('Roughness', d['opaque_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['opaque_roughness_map'], p, )
p.setByte('Clearcoat', d['opaque_clearcoat'])
elif(d['use'] == 'TRANSPARENT'):
e = m.createDefaultMaterialModifierExtension('Transparent')
p = e.getExtensionData()
p.setByte('Color Type', d['transparent_color_type'])
c = Crgb()
c.assign(*d['transparent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['transparent_color_map'], p, )
p.setFloat('Ior', d['transparent_ior'])
p.setFloat('Transparency', d['transparent_transparency'])
p.setByte('Roughness Type', d['transparent_roughness_type'])
p.setFloat('Roughness', d['transparent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['transparent_roughness_map'], p, )
p.setFloat('Specular Tint', d['transparent_specular_tint'])
p.setFloat('Dispersion', d['transparent_dispersion'])
p.setByte('Clearcoat', d['transparent_clearcoat'])
elif(d['use'] == 'METAL'):
e = m.createDefaultMaterialModifierExtension('Metal')
p = e.getExtensionData()
p.setUInt('IOR', d['metal_ior'])
p.setFloat('Tint', d['metal_tint'])
p.setByte('Color Type', d['metal_color_type'])
c = Crgb()
c.assign(*d['metal_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['metal_color_map'], p, )
p.setByte('Roughness Type', d['metal_roughness_type'])
p.setFloat('Roughness', d['metal_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['metal_roughness_map'], p, )
p.setByte('Anisotropy Type', d['metal_anisotropy_type'])
p.setFloat('Anisotropy', d['metal_anisotropy'])
self.texture_data_to_mxparams('Anisotropy Map', d['metal_anisotropy_map'], p, )
p.setByte('Angle Type', d['metal_angle_type'])
p.setFloat('Angle', d['metal_angle'])
self.texture_data_to_mxparams('Angle Map', d['metal_angle_map'], p, )
p.setByte('Dust Type', d['metal_dust_type'])
p.setFloat('Dust', d['metal_dust'])
self.texture_data_to_mxparams('Dust Map', d['metal_dust_map'], p, )
p.setByte('Perforation Enabled', d['metal_perforation_enabled'])
self.texture_data_to_mxparams('Perforation Map', d['metal_perforation_map'], p, )
elif(d['use'] == 'TRANSLUCENT'):
e = m.createDefaultMaterialModifierExtension('Translucent')
p = e.getExtensionData()
p.setFloat('Scale', d['translucent_scale'])
p.setFloat('Ior', d['translucent_ior'])
p.setByte('Color Type', d['translucent_color_type'])
c = Crgb()
c.assign(*d['translucent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['translucent_color_map'], p, )
p.setFloat('Hue Shift', d['translucent_hue_shift'])
p.setByte('Invert Hue', d['translucent_invert_hue'])
p.setFloat('Vibrance', d['translucent_vibrance'])
p.setFloat('Density', d['translucent_density'])
p.setFloat('Opacity', d['translucent_opacity'])
p.setByte('Roughness Type', d['translucent_roughness_type'])
p.setFloat('Roughness', d['translucent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['translucent_roughness_map'], p, )
p.setFloat('Specular Tint', d['translucent_specular_tint'])
p.setByte('Clearcoat', d['translucent_clearcoat'])
p.setFloat('Clearcoat Ior', d['translucent_clearcoat_ior'])
elif(d['use'] == 'CARPAINT'):
e = m.createDefaultMaterialModifierExtension('Car Paint')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['carpaint_color'])
p.setRgb('Color', c)
p.setFloat('Metallic', d['carpaint_metallic'])
p.setFloat('Topcoat', d['carpaint_topcoat'])
elif(d['use'] == 'HAIR'):
e = m.createDefaultMaterialModifierExtension('Hair')
p = e.getExtensionData()
p.setByte('Color Type', d['hair_color_type'])
c = Crgb()
c.assign(*d['hair_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['hair_color_map'], p, )
self.texture_data_to_mxparams('Root-Tip Map', d['hair_root_tip_map'], p, )
p.setByte('Root-Tip Weight Type', d['hair_root_tip_weight_type'])
p.setFloat('Root-Tip Weight', d['hair_root_tip_weight'])
self.texture_data_to_mxparams('Root-Tip Weight Map', d['hair_root_tip_weight_map'], p, )
p.setFloat('Primary Highlight Strength', d['hair_primary_highlight_strength'])
p.setFloat('Primary Highlight Spread', d['hair_primary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_primary_highlight_tint'])
p.setRgb('Primary Highlight Tint', c)
p.setFloat('Secondary Highlight Strength', d['hair_secondary_highlight_strength'])
p.setFloat('Secondary Highlight Spread', d['hair_secondary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_secondary_highlight_tint'])
p.setRgb('Secondary Highlight Tint', c)
m = s.createMaterial(d['name'])
m.applyMaterialModifierExtension(p)
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
try:
displacement(d['displacement'], m)
except KeyError:
pass
elif(d['subtype'] == 'CUSTOM'):
m = self.material_custom(d)
else:
raise TypeError("Material '{}' {} is unknown type".format(d['name'], d['subtype']))
def get_material(self, n, ):
"""get material by name from scene, if material is missing, create and return placeholder"""
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
s = self.mxs
names = get_material_names(s)
m = None
if(n in names):
m = s.getMaterial(n)
if(m is None):
# should not happen because i stopped changing material names.. but i leave it here
m = self.material_placeholder()
return m
def camera(self, props, steps, active=False, lens_extra=None, response=None, region=None, custom_bokeh=(1.0, 0.0, False), cut_planes=(0.0, 1e7, False), shift_lens=(0.0, 0.0), ):
"""Create camera.
props (string name, int nSteps, float shutter, float filmWidth, float filmHeight, float iso, int diaphragmType, float angle,
int nBlades, float fps, int xRes, int yRes, float pixelAspect, int lensType, int projectionType)
steps [(int iStep, [3 float] origin, [3 float] focalPoint, [3 float] up, float focalLength, float fStop, bool focalLengthNeedCorrection), ..., ]
active bool
lens_extra float or None
response string or None
region (float x1, float y1, float x2, float y2, string type) or None
custom_bokeh (float ratio, float angle, bool enabled) or None
cut_planes (float near, float far, bool enabled) or None
shift_lens (float x, float y) or None
"""
s = self.mxs
if(props[13] in [6, 7]):
props2 = list(props[:])
props2[13] = TYPE_EXTENSION_LENS
c = s.addCamera(*props2)
else:
c = s.addCamera(*props)
for step in steps:
l = list(step[:])
l[1] = Cvector(*l[1])
l[2] = Cvector(*l[2])
l[3] = Cvector(*l[3])
c.setStep(*l)
# TYPE_THIN_LENS, TYPE_PINHOLE, TYPE_ORTHO
if(lens_extra is not None):
if(props[13] == TYPE_FISHEYE_LENS):
c.setFishLensProperties(lens_extra)
if(props[13] == TYPE_SPHERICAL_LENS):
c.setSphericalLensProperties(lens_extra)
if(props[13] == TYPE_CYLINDRICAL_LENS):
c.setCylindricalLensProperties(lens_extra)
if(props[13] == 6):
p = MXparamList()
p.createString('EXTENSION_NAME', 'Lat-Long Stereo')
p.createUInt('Type', lens_extra[0], 0, 2)
p.createFloat('FOV Vertical', lens_extra[1], 180.0, 0.0)
p.createFloat('FOV Horizontal', lens_extra[2], 360.0, 0.0)
p.createByte('Flip Ray X', lens_extra[3], 0, 1)
p.createByte('Flip Ray Y', lens_extra[4], 0, 1)
p.createFloat('Parallax Distance', lens_extra[5], 0.0, 360.0)
p.createByte('Zenith Mode', lens_extra[6], 0, 1)
p.createFloat('Separation', lens_extra[7], 0.0, 100000.0)
p.createTextureMap('Separation Map', CtextureMap())
self.texture_data_to_mxparams('Separation Map', lens_extra[8], p, )
c.applyCameraLensExtension(p)
if(props[13] == 7):
p = MXparamList()
p.createString('EXTENSION_NAME', 'Fish Stereo')
p.createUInt('Type', lens_extra[0], 0, 2)
p.createFloat('FOV', lens_extra[1], 0.0, 360.0)
p.createFloat('Separation', lens_extra[2], 0.0, 1000000.0)
p.createTextureMap('Separation Map', CtextureMap())
self.texture_data_to_mxparams('Separation Map', lens_extra[3], p, )
p.createByte('Vertical Mode', lens_extra[4], 0, 1)
p.createFloat('Dome Radius', lens_extra[5], 1.0, 1000000.0)
p.createTextureMap('Turn Map', CtextureMap())
self.texture_data_to_mxparams('Turn Map', lens_extra[6], p, )
p.createByte('Dome Tilt Compensation', lens_extra[7], 0, 1)
p.createFloat('Dome Tilt', lens_extra[8], 0.0, 90.0)
p.createTextureMap('Tilt Map', CtextureMap())
self.texture_data_to_mxparams('Tilt Map', lens_extra[9], p, )
c.applyCameraLensExtension(p)
if(response is not None):
c.setCameraResponsePreset(response)
if(custom_bokeh is not None):
c.setCustomBokeh(*custom_bokeh)
if(cut_planes is not None):
c.setCutPlanes(*cut_planes)
if(shift_lens is not None):
c.setShiftLens(*shift_lens)
if(region is not None):
if(region[2] == props[3]):
region[2] -= 1
if(region[3] == props[4]):
region[3] -= 1
c.setScreenRegion(*region)
if(active):
c.setActive()
return c
def empty(self, name, matrix, motion, object_props=None, ):
"""Create empty object.
name string
matrix (((3 float), (3 float), (3 float), (3 float)), ((3 float), (3 float), (3 float), (3 float)), (3 float), (3 float), (3 float)) - base, pivot, location, rotation, scale
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
"""
s = self.mxs
o = s.createMesh(name, 0, 0, 0, 0, )
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
return o
def mesh(self, name, matrix, motion, num_positions, vertices, normals, triangles, triangle_normals, uv_channels, object_props=None, num_materials=0, materials=[], triangle_materials=None, backface_material=None, ):
"""Create mesh object.
name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
num_positions int
vertices [[(float x, float y, float z), ..., ], [...], ]
normals [[(float x, float y, float z), ..., ], [...], ]
triangles [(int iv0, int iv1, int iv2, int in0, int in1, int in2, ), ..., ], ] # (3x vertex index, 3x normal index)
triangle_normals [[(float x, float y, float z), ..., ], [...], ]
uv_channels [[(float u1, float v1, float w1, float u2, float v2, float w2, float u3, float v3, float w3, ), ..., ], ..., ] or None # ordered by uv index and ordered by triangle index
num_materials int
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
materials [(string path, bool embed), ..., ] or None
triangle_materials [(int tri_id, int mat_id), ..., ] or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
o = s.createMesh(name, len(vertices[0]), len(normals[0]) + len(triangle_normals[0]), len(triangles), num_positions)
if(uv_channels is not None):
for i in range(len(uv_channels)):
o.addChannelUVW(i)
# an = 0
for ip in range(num_positions):
an = 0
verts = vertices[ip]
norms = normals[ip]
for i, loc in enumerate(verts):
o.setVertex(i, ip, Cvector(*loc), )
o.setNormal(i, ip, Cvector(*norms[i]), )
an += 1
for ip in range(num_positions):
trinorms = triangle_normals[ip]
for i, nor in enumerate(trinorms):
o.setNormal(an + i, ip, Cvector(*nor), )
if(type(triangles) is not list):
# pymaxwell does not like numpy arrays.. Cvectors has no problems, but setTriangle does..
triangles = triangles.tolist()
for i, tri in enumerate(triangles):
o.setTriangle(i, *tri)
if(uv_channels is not None):
for iuv, uv in enumerate(uv_channels):
for it, t in enumerate(uv):
o.setTriangleUVW(it, iuv, *t)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(materials is not None):
if(num_materials > 1):
# multi material
mats = []
for i in range(num_materials):
try:
n = materials[i]
mat = self.get_material(n)
except:
mat = self.material_placeholder()
mats.append(mat)
# pymaxwell does not like numpy arrays..
if(type(triangle_materials) is not list):
triangle_materials = triangle_materials.tolist()
for tid, mid in triangle_materials:
o.setTriangleMaterial(tid, mats[mid])
else:
# single material
if(len(materials) == 1):
if(materials[0] != ''):
mat = self.get_material(materials[0])
o.setMaterial(mat)
else:
# no material
pass
if(backface_material is not None):
if(backface_material != ''):
# only single backface material
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def instance(self, name, instanced_name, matrix, motion=None, object_props=None, materials=None, backface_material=None, ):
"""Create instance of mesh object. Instanced object must exist in scene.
name string
instanced_name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
bo = s.getObject(instanced_name)
o = s.createInstancement(name, bo)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(materials is not None):
if(len(materials) > 1):
# multi material instances inherits material from base object
pass
else:
# single material, and i think (not sure) i can't make instance with different material than base in blender..
if(len(materials) == 1):
if(materials[0] != ''):
mat = self.get_material(materials[0])
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def reference(self, name, path, flags, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create MXS reference object.
name string
path string (path)
flags [bool, bool, bool, bool]
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
"""
s = self.mxs
o = s.createMesh(name, 0, 0, 0, 0, )
o.setReferencedScenePath(path)
if(flags[0]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE)
if(flags[1]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_CAMERA)
if(flags[2]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_REFL_REFR)
if(flags[3]):
o.setReferencedOverrideFlags(FLAG_OVERRIDE_HIDE_TO_GI)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def hierarchy(self, tree, ):
"""Set hierarchy of all objects at once.
tree [(obj_name, parent_name or None, ), ..., ]
"""
s = self.mxs
for on, pn, _ in tree:
if(pn is not None):
o = s.getObject(on)
p = s.getObject(pn)
o.setParent(p)
def environment(self, env_type=None, sky_type=None, sky=None, dome=None, sun_type=None, sun=None, ibl=None, ):
"""Set Environment properties.
env_type string or None PHYSICAL_SKY, IMAGE_BASED, NONE
sky_type string or None PHYSICAL, CONSTANT
sky dict or None {sky_use_preset bool
sky_preset string (path)
sky_intensity float
sky_planet_refl float
sky_ozone float
sky_water float
sky_turbidity_coeff float
sky_wavelength_exp float
sky_reflectance float
sky_asymmetry float}
dome dict or None {dome_intensity float
dome_zenith [float, float, float]
dome_horizon [float, float, float]
dome_mid_point float}
sun_type string or None DISABLED, PHYSICAL, CUSTOM
sun dict or None {sun_power float
sun_radius_factor float
sun_temp float
sun_color [float, float, float]
sun_location_type string LATLONG, ANGLES, DIRECTION
sun_latlong_lat float
sun_latlong_lon float
sun_date string
sun_time string
sun_latlong_gmt int
sun_latlong_gmt_auto bool
sun_latlong_ground_rotation float
sun_angles_zenith float
sun_angles_azimuth float
sun_dir_x float
sun_dir_y float
sun_dir_z float}
ibl dict or None {ibl_intensity float
ibl_interpolation bool
ibl_screen_mapping bool
ibl_bg_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_bg_map string (path)
ibl_bg_intensity float
ibl_bg_scale_x float
ibl_bg_scale_y float
ibl_bg_offset_x float
ibl_bg_offset_y float
ibl_refl_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_refl_map string (path)
ibl_refl_intensity float
ibl_refl_scale_x float
ibl_refl_scale_y float
ibl_refl_offset_x float
ibl_refl_offset_y float
ibl_refr_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_refr_map string (path)
ibl_refr_intensity float
ibl_refr_scale_x float
ibl_refr_scale_y float
ibl_refr_offset_x float
ibl_refr_offset_y float
ibl_illum_type string HDR_IMAGE, ACTIVE_SKY, DISABLED
ibl_illum_map string (path)
ibl_illum_intensity float
ibl_illum_scale_x float
ibl_illum_scale_y float
ibl_illum_offset_x float
ibl_illum_offset_y float}
"""
s = self.mxs
env = s.getEnvironment()
if(env_type == 'PHYSICAL_SKY' or env_type == 'IMAGE_BASED'):
if(sky_type is not None):
env.setActiveSky(sky_type)
if(sky_type == 'PHYSICAL'):
if(not sky["sky_use_preset"]):
env.setPhysicalSkyAtmosphere(sky["sky_intensity"],
sky["sky_ozone"],
sky["sky_water"],
sky["sky_turbidity_coeff"],
sky["sky_wavelength_exp"],
sky["sky_reflectance"],
sky["sky_asymmetry"],
sky["sky_planet_refl"], )
else:
env.loadSkyFromPreset(sky["sky_preset"])
elif(sky_type == 'CONSTANT'):
hc = Crgb()
hc.assign(*dome['dome_horizon'])
zc = Crgb()
zc.assign(*dome['dome_zenith'])
env.setSkyConstant(dome["dome_intensity"], hc, zc, dome['dome_mid_point'])
sc = Crgb()
sc.assign(*sun['sun_color'])
if(sun_type == 'PHYSICAL'):
env.setSunProperties(SUN_PHYSICAL, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
elif(sun_type == 'CUSTOM'):
env.setSunProperties(SUN_CONSTANT, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
else:
# sun_type == 'DISABLED' or sun_type == None
env.setSunProperties(SUN_DISABLED, sun["sun_temp"], sun["sun_power"], sun["sun_radius_factor"], sc)
if(sun['sun_location_type'] == 'LATLONG'):
env.setSunPositionType(0)
l = sun["sun_date"].split(".")
date = datetime.date(int(l[2]), int(l[1]), int(l[0]))
day = int(date.timetuple().tm_yday)
l = sun["sun_time"].split(":")
hour = int(l[0])
minute = int(l[1])
time = hour + (minute / 60)
env.setSunLongitudeAndLatitude(sun["sun_latlong_lon"], sun["sun_latlong_lat"], sun["sun_latlong_gmt"], day, time)
env.setSunRotation(sun["sun_latlong_ground_rotation"])
elif(sun['sun_location_type'] == 'ANGLES'):
env.setSunPositionType(1)
env.setSunAngles(sun["sun_angles_zenith"], sun["sun_angles_azimuth"])
elif(sun['sun_location_type'] == 'DIRECTION'):
env.setSunPositionType(2)
env.setSunDirection(Cvector(sun["sun_dir_x"], sun["sun_dir_y"], sun["sun_dir_z"]))
if(env_type == 'IMAGE_BASED'):
env.enableEnvironment(True)
def state(s):
# channel state: 0 = Disabled; 1 = Enabled; 2 = Use active sky instead.
if(s == 'HDR_IMAGE'):
return 1
if(s == 'ACTIVE_SKY'):
return 2
if(s == 'SAME_AS_BG'):
# same as bg, set the same values as in bg layer
return 3
return 0
if(ibl is not None):
env.setEnvironmentWeight(ibl["ibl_intensity"])
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_BACKGROUND, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
s = state(ibl["ibl_refl_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_REFLECTION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_REFLECTION, ibl["ibl_refl_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_refl_intensity"], ibl["ibl_refl_scale_x"], ibl["ibl_refl_scale_y"], ibl["ibl_refl_offset_x"], ibl["ibl_refl_offset_y"], )
s = state(ibl["ibl_refr_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_REFRACTION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_REFRACTION, ibl["ibl_refr_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_refr_intensity"], ibl["ibl_refr_scale_x"], ibl["ibl_refr_scale_y"], ibl["ibl_refr_offset_x"], ibl["ibl_refr_offset_y"], )
s = state(ibl["ibl_illum_type"])
if(s == 3):
s = state(ibl["ibl_bg_type"])
env.setEnvironmentLayer(IBL_LAYER_ILLUMINATION, ibl["ibl_bg_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_bg_intensity"], ibl["ibl_bg_scale_x"], ibl["ibl_bg_scale_y"], ibl["ibl_bg_offset_x"], ibl["ibl_bg_offset_y"], )
else:
env.setEnvironmentLayer(IBL_LAYER_ILLUMINATION, ibl["ibl_illum_map"], s, not ibl["ibl_screen_mapping"], not ibl["ibl_interpolation"], ibl["ibl_illum_intensity"], ibl["ibl_illum_scale_x"], ibl["ibl_illum_scale_y"], ibl["ibl_illum_offset_x"], ibl["ibl_illum_offset_y"], )
else:
# env_type == 'NONE' or env_type == None
env.setActiveSky('')
def parameters(self, scene, materials=None, generals=None, tone=None, simulens=None, illum_caustics=None, other=None, text_overlay=None, ):
"""Set scene render parameters.
scene dict {cpu_threads int,
multilight int,
multilight_type int,
quality string RS1, RS0
sampling_level float,
time int, },
materials dict {override bool,
override_path string (path),
search_path string (path), } or None
generals dict {diplacement bool,
dispersion bool,
motion_blur bool, } or None
tone dict {burn float,
color_space int,
gamma float,
sharpness bool,
sharpness_value float,
tint float,
whitepoint float, } or None
simulens dict {aperture_map string (path),
devignetting bool,
devignetting_value float,
diffraction bool,
diffraction_value float,
frequency float,
obstacle_map string (path),
scattering bool,
scattering_value float, } or None
illum_caustics dict {illumination int,
refl_caustics int,
refr_caustics int, } or None
other dict {protect bool, }
"""
s = self.mxs
# s.setRenderParameter('ENGINE', scene["quality"])
s.setRenderParameter('ENGINE', bytes(scene["quality"], encoding='UTF-8'))
s.setRenderParameter('NUM THREADS', scene["cpu_threads"])
s.setRenderParameter('STOP TIME', scene["time"] * 60)
s.setRenderParameter('SAMPLING LEVEL', scene["sampling_level"])
s.setRenderParameter('USE MULTILIGHT', scene["multilight"])
s.setRenderParameter('SAVE LIGHTS IN SEPARATE FILES', scene["multilight_type"])
if(generals is not None):
s.setRenderParameter('DO MOTION BLUR', generals["motion_blur"])
s.setRenderParameter('DO DISPLACEMENT', generals["diplacement"])
s.setRenderParameter('DO DISPERSION', generals["dispersion"])
if(illum_caustics is not None):
v = illum_caustics['illumination']
if(v == 3):
s.setRenderParameter('DO DIRECT LAYER', 0)
s.setRenderParameter('DO INDIRECT LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT LAYER', 0)
s.setRenderParameter('DO INDIRECT LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT LAYER', 1)
s.setRenderParameter('DO INDIRECT LAYER', 0)
else:
s.setRenderParameter('DO DIRECT LAYER', 1)
s.setRenderParameter('DO INDIRECT LAYER', 1)
v = illum_caustics['refl_caustics']
if(v == 3):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 0)
else:
s.setRenderParameter('DO DIRECT REFLECTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFLECTION CAUSTIC LAYER', 1)
v = illum_caustics['refr_caustics']
if(v == 3):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 0)
elif(v == 2):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 0)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 1)
elif(v == 1):
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 0)
else:
s.setRenderParameter('DO DIRECT REFRACTION CAUSTIC LAYER', 1)
s.setRenderParameter('DO INDIRECT REFRACTION CAUSTIC LAYER', 1)
if(simulens is not None):
s.setRenderParameter('DO DEVIGNETTING', simulens["devignetting"])
s.setRenderParameter('DEVIGNETTING', simulens["devignetting_value"])
s.setRenderParameter('DO SCATTERING_LENS', simulens["scattering"])
s.setRenderParameter('SCATTERING_LENS', simulens["scattering_value"])
if(simulens["diffraction"]):
s.enableDiffraction()
s.setDiffraction(simulens["diffraction_value"], simulens["frequency"], simulens["aperture_map"], simulens["obstacle_map"])
if(tone is not None):
s.setRenderParameter('DO SHARPNESS', tone["sharpness"])
s.setRenderParameter('SHARPNESS', tone["sharpness_value"])
s.setToneMapping(tone["gamma"], tone["burn"])
s.setColorSpace(tone["color_space"])
s.setWhitePoint(tone["whitepoint"], tone["tint"])
if(materials is not None):
if(materials["override"]):
s.setOverrideMaterial(True)
if(materials["override_path"] != ""):
s.setOverrideMaterial(materials["override_path"])
if(materials["search_path"] != ""):
s.addSearchingPath(materials["search_path"])
if(materials["default_material"] != ""):
s.setDefaultMaterial(True)
s.setDefaultMaterial(materials["default_material"])
else:
s.setDefaultMaterial(False)
if(other is not None):
if(other['protect']):
s.enableProtection(True)
else:
s.enableProtection(False)
if(other['extra_sampling_enabled']):
s.setRenderParameter('DO EXTRA SAMPLING', 1)
s.setRenderParameter('EXTRA SAMPLING SL', other['extra_sampling_sl'])
s.setRenderParameter('EXTRA SAMPLING MASK', other['extra_sampling_mask'])
if(platform.system() == 'Linux'):
# wtf?
s.setRenderParameter('EXTRA SAMPLING CUSTOM ALPHA', bytes(other['extra_sampling_custom_alpha'], encoding='UTF-8'))
s.setRenderParameter('EXTRA SAMPLING USER BITMAP', bytes(other['extra_sampling_user_bitmap'], encoding='UTF-8'))
else:
s.setRenderParameter('EXTRA SAMPLING CUSTOM ALPHA', other['extra_sampling_custom_alpha'])
s.setRenderParameter('EXTRA SAMPLING USER BITMAP', other['extra_sampling_user_bitmap'])
if(other['extra_sampling_invert']):
s.setRenderParameter('EXTRA SAMPLING INVERT', 1)
if(text_overlay is not None):
if(text_overlay['enabled']):
o = CoverlayTextOptions()
o.enabled_ = 1
o.text_ = Cstring(text_overlay['text'])
o.position_ = text_overlay['position']
c = Crgb()
c.assign(*text_overlay['color'])
o.color_ = c.toRGB8()
o.backgroundEnabled_ = text_overlay['background']
c = Crgb()
c.assign(*text_overlay['background_color'])
o.backgroundColor_ = c.toRGB8()
s.setOverlayTextOptions(o)
def channels(self, base_path, mxi, image, image_depth='RGB8', channels_output_mode=0, channels_render=True, channels_render_type=0, channels=None, ):
"""Set scene render channels.
base_path string (path)
mxi string (path)
image string (path)
image_depth string RGB8, RGB16, RGB32
channels_output_mode int
channels_render bool
channels_render_type int
channels dict {channels_alpha bool
channels_alpha_file string
channels_alpha_opaque bool
channels_custom_alpha bool
channels_custom_alpha_file string
channels_deep bool
channels_deep_file string
channels_deep_max_samples int
channels_deep_min_dist float
channels_deep_type int
channels_fresnel bool
channels_fresnel_file string
channels_material_id bool
channels_material_id_file string
channels_motion_vector bool
channels_motion_vector_file string
channels_normals bool
channels_normals_file string
channels_normals_space int
channels_object_id bool
channels_object_id_file string
channels_position bool
channels_position_file string
channels_position_space int
channels_roughness bool
channels_roughness_file string
channels_shadow bool
channels_shadow_file string
channels_uv bool
channels_uv_file string
channels_z_buffer bool
channels_z_buffer_far float
channels_z_buffer_file string
channels_z_buffer_near float} or None
"""
def get_ext_depth(t, e=None):
if(e is not None):
t = "{}{}".format(e[1:].upper(), int(t[3:]))
if(t == 'RGB8'):
return ('.tif', 8)
elif(t == 'RGB16'):
return ('.tif', 16)
elif(t == 'RGB32'):
return ('.tif', 32)
elif(t == 'PNG8'):
return ('.png', 8)
elif(t == 'PNG16'):
return ('.png', 16)
elif(t == 'TGA'):
return ('.tga', 8)
elif(t == 'TIF8'):
return ('.tif', 8)
elif(t == 'TIF16'):
return ('.tif', 16)
elif(t == 'TIF32'):
return ('.tif', 32)
elif(t == 'EXR16'):
return ('.exr', 16)
elif(t == 'EXR32'):
return ('.exr', 32)
elif(t == 'EXR_DEEP'):
return ('.exr', 32)
elif(t == 'JPG'):
return ('.jpg', 8)
elif(t == 'JP2'):
return ('.jp2', 8)
elif(t == 'HDR'):
return ('.hdr', 32)
elif(t == 'DTEX'):
return ('.dtex', 32)
elif(t == 'PSD8'):
return ('.psd', 8)
elif(t == 'PSD16'):
return ('.psd', 16)
elif(t == 'PSD32'):
return ('.psd', 32)
else:
return ('.tif', 8)
s = self.mxs
s.setRenderParameter('DO NOT SAVE MXI FILE', (mxi is None))
s.setRenderParameter('DO NOT SAVE IMAGE FILE', (image is None))
if(mxi is not None):
# s.setRenderParameter('MXI FULLNAME', mxi)
# s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
if(platform.system() == 'Linux'):
# wtf?
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
else:
# s.setRenderParameter('MXI FULLNAME', mxi)
s.setRenderParameter('MXI FULLNAME', bytes(mxi, encoding='UTF-8'))
if(image is not None):
if(image_depth is None):
image_depth = 'RGB8'
_, depth = get_ext_depth(image_depth, os.path.splitext(os.path.split(image)[1])[1])
s.setPath('RENDER', image, depth)
s.setRenderParameter('DO RENDER CHANNEL', int(channels_render))
s.setRenderParameter('EMBED CHANNELS', channels_output_mode)
s.setRenderParameter('RENDER LAYERS', channels_render_type)
if(channels is not None):
e, depth = get_ext_depth(channels["channels_alpha_file"])
s.setPath('ALPHA', "{}_alpha{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_shadow_file"])
s.setPath('SHADOW', "{}_shadow{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_object_id_file"])
s.setPath('OBJECT', "{}_object_id{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_material_id_file"])
s.setPath('MATERIAL', "{}_material_id{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_motion_vector_file"])
s.setPath('MOTION', "{}_motion_vector{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_z_buffer_file"])
s.setPath('Z', "{}_z_buffer{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_roughness_file"])
s.setPath('ROUGHNESS', "{}_roughness{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_fresnel_file"])
s.setPath('FRESNEL', "{}_fresnel{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_normals_file"])
s.setPath('NORMALS', "{}_normals{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_position_file"])
s.setPath('POSITION', "{}_position{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_deep_file"])
s.setPath('DEEP', "{}_deep{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_uv_file"])
s.setPath('UV', "{}_uv{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_custom_alpha_file"])
s.setPath('ALPHA_CUSTOM', "{}_custom_alpha{}".format(base_path, e), depth)
e, depth = get_ext_depth(channels["channels_reflectance_file"])
s.setPath('REFLECTANCE', "{}_reflectance{}".format(base_path, e), depth)
s.setRenderParameter('DO ALPHA CHANNEL', int(channels["channels_alpha"]))
s.setRenderParameter('OPAQUE ALPHA', int(channels["channels_alpha_opaque"]))
s.setRenderParameter('DO IDOBJECT CHANNEL', int(channels["channels_object_id"]))
s.setRenderParameter('DO IDMATERIAL CHANNEL', int(channels["channels_material_id"]))
s.setRenderParameter('DO SHADOW PASS CHANNEL', int(channels["channels_shadow"]))
s.setRenderParameter('DO MOTION CHANNEL', int(channels["channels_motion_vector"]))
s.setRenderParameter('DO ROUGHNESS CHANNEL', int(channels["channels_roughness"]))
s.setRenderParameter('DO FRESNEL CHANNEL', int(channels["channels_fresnel"]))
s.setRenderParameter('DO NORMALS CHANNEL', int(channels["channels_normals"]))
s.setRenderParameter('NORMALS CHANNEL SPACE', channels["channels_normals_space"])
s.setRenderParameter('POSITION CHANNEL SPACE', channels["channels_position_space"])
s.setRenderParameter('DO POSITION CHANNEL', int(channels["channels_position"]))
s.setRenderParameter('DO ZBUFFER CHANNEL', int(channels["channels_z_buffer"]))
s.setRenderParameter('ZBUFFER RANGE', (channels["channels_z_buffer_near"], channels["channels_z_buffer_far"]))
s.setRenderParameter('DO DEEP CHANNEL', int(channels["channels_deep"]))
s.setRenderParameter('DEEP CHANNEL TYPE', channels["channels_deep_type"])
s.setRenderParameter('DEEP MIN DISTANCE', channels["channels_deep_min_dist"])
s.setRenderParameter('DEEP MAX SAMPLES', channels["channels_deep_max_samples"])
s.setRenderParameter('DO UV CHANNEL', int(channels["channels_uv"]))
# s.setRenderParameter('MOTION CHANNEL TYPE', ?)
s.setRenderParameter('DO ALPHA CUSTOM CHANNEL', int(channels["channels_custom_alpha"]))
s.setRenderParameter('DO REFLECTANCE CHANNEL', int(channels["channels_reflectance"]))
def custom_alphas(self, groups, ):
"""Set custom alphas.
groups list of dicts: {'name': string, 'objects': list of strings, 'opaque': bool, }
"""
s = self.mxs
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
def get_object_names(s):
it = CmaxwellObjectIterator()
o = it.first(s)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
sobs = get_object_names(s)
smats = get_material_names(s)
for a in groups:
s.createCustomAlphaChannel(a['name'], a['opaque'])
for n in a['objects']:
if(n in sobs):
o = s.getObject(n)
o.addToCustomAlpha(a['name'])
for n in a['materials']:
if(n in smats):
m = s.getMaterial(n)
m.addToCustomAlpha(a['name'])
def ext_particles(self, name, properties, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create particles object.
name string
properties dict
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension('MaxwellParticles')
p = e.getExtensionData()
d = properties
if(d['embed'] is True):
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('PARTICLE_POSITIONS', list(d['pdata']['PARTICLE_POSITIONS']), c)
p.setFloatArray('PARTICLE_SPEEDS', list(d['pdata']['PARTICLE_SPEEDS']), c)
p.setFloatArray('PARTICLE_RADII', list(d['pdata']['PARTICLE_RADII']), c)
p.setIntArray('PARTICLE_IDS', list(d['pdata']['PARTICLE_IDS']))
p.setFloatArray('PARTICLE_NORMALS', list(d['pdata']['PARTICLE_NORMALS']), c)
p.setFloatArray('PARTICLE_UVW', list(d['pdata']['PARTICLE_UVW']), c)
else:
p.setString('FileName', d['filename'])
p.setFloat('Radius Factor', d['radius_multiplier'])
p.setFloat('MB Factor', d['motion_blur_multiplier'])
p.setFloat('Shutter 1/', d['shutter_speed'])
p.setFloat('Load particles %', d['load_particles'])
p.setUInt('Axis', d['axis_system'])
p.setInt('Frame#', d['frame_number'])
p.setFloat('fps', d['fps'])
p.setInt('Create N particles per particle', d['extra_create_np_pp'])
p.setFloat('Extra particles dispersion', d['extra_dispersion'])
p.setFloat('Extra particles deformation', d['extra_deformation'])
p.setByte('Load particle Force', d['load_force'])
p.setByte('Load particle Vorticity', d['load_vorticity'])
p.setByte('Load particle Normal', d['load_normal'])
p.setByte('Load particle neighbors no.', d['load_neighbors_num'])
p.setByte('Load particle UV', d['load_uv'])
p.setByte('Load particle Age', d['load_age'])
p.setByte('Load particle Isolation Time', d['load_isolation_time'])
p.setByte('Load particle Viscosity', d['load_viscosity'])
p.setByte('Load particle Density', d['load_density'])
p.setByte('Load particle Pressure', d['load_pressure'])
p.setByte('Load particle Mass', d['load_mass'])
p.setByte('Load particle Temperature', d['load_temperature'])
p.setByte('Load particle ID', d['load_id'])
p.setFloat('Min Force', d['min_force'])
p.setFloat('Max Force', d['max_force'])
p.setFloat('Min Vorticity', d['min_vorticity'])
p.setFloat('Max Vorticity', d['max_vorticity'])
p.setInt('Min Nneighbors', d['min_nneighbors'])
p.setInt('Max Nneighbors', d['max_nneighbors'])
p.setFloat('Min Age', d['min_age'])
p.setFloat('Max Age', d['max_age'])
p.setFloat('Min Isolation Time', d['min_isolation_time'])
p.setFloat('Max Isolation Time', d['max_isolation_time'])
p.setFloat('Min Viscosity', d['min_viscosity'])
p.setFloat('Max Viscosity', d['max_viscosity'])
p.setFloat('Min Density', d['min_density'])
p.setFloat('Max Density', d['max_density'])
p.setFloat('Min Pressure', d['min_pressure'])
p.setFloat('Max Pressure', d['max_pressure'])
p.setFloat('Min Mass', d['min_mass'])
p.setFloat('Max Mass', d['max_mass'])
p.setFloat('Min Temperature', d['min_temperature'])
p.setFloat('Max Temperature', d['max_temperature'])
p.setFloat('Min Velocity', d['min_velocity'])
p.setFloat('Max Velocity', d['max_velocity'])
o = s.createGeometryProceduralObject(name, p)
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def ext_hair(self, name, extension, matrix, motion, root_radius, tip_radius, data, object_props=None, display_percent=10, display_max=1000, material=None, backface_material=None, ):
"""Create hair/grass object.
name string
extension string ('MaxwellHair' ,'MGrassP')
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
root_radius float
tip_radius float
data dict of extension data
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
display_percent int
display_max int
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension(extension)
p = e.getExtensionData()
p.setByteArray('HAIR_MAJOR_VER', data['HAIR_MAJOR_VER'])
p.setByteArray('HAIR_MINOR_VER', data['HAIR_MINOR_VER'])
p.setByteArray('HAIR_FLAG_ROOT_UVS', data['HAIR_FLAG_ROOT_UVS'])
m = memoryview(struct.pack("I", data['HAIR_GUIDES_COUNT'][0])).tolist()
p.setByteArray('HAIR_GUIDES_COUNT', m)
m = memoryview(struct.pack("I", data['HAIR_GUIDES_POINT_COUNT'][0])).tolist()
p.setByteArray('HAIR_GUIDES_POINT_COUNT', m)
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('HAIR_POINTS', list(data['HAIR_POINTS']), c)
p.setFloatArray('HAIR_NORMALS', list(data['HAIR_NORMALS']), c)
if(data['HAIR_FLAG_ROOT_UVS'][0] == 1):
p.setFloatArray('HAIR_ROOT_UVS', list(data['HAIR_ROOT_UVS']), c)
p.setUInt('Display Percent', display_percent)
if(extension == 'MaxwellHair'):
p.setUInt('Display Max. Hairs', display_max)
p.setDouble('Root Radius', root_radius)
p.setDouble('Tip Radius', tip_radius)
if(extension == 'MGrassP'):
p.setUInt('Display Max. Hairs', display_max)
p.setDouble('Root Radius', root_radius)
p.setDouble('Tip Radius', tip_radius)
o = s.createGeometryProceduralObject(name, p)
if(extension == 'MaxwellHair'):
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
b, _ = o.addChannelUVW()
o.generateCustomUVW(1, b)
c, _ = o.addChannelUVW()
o.generateCustomUVW(2, c)
if(extension == 'MGrassP'):
a, _ = o.addChannelUVW()
o.generateCustomUVW(0, a)
b, _ = o.addChannelUVW()
o.generateCustomUVW(1, b)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def ext_sea(self, name, matrix, motion=None, object_props=None, geometry=None, wind=None, material=None, backface_material=None, ):
"""Create sea extension object.
name string
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
geometry (float reference_time,
int resolution,
float ocean_depth,
float vertical_scale,
float ocean_dim,
int ocean_seed,
bool enable_choppyness,
float choppy_factor, )
wind (float ocean_wind_mod,
float ocean_wind_dir,
float ocean_wind_alignment,
float ocean_min_wave_length,
float damp_factor_against_wind, )
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryLoaderExtension('MaxwellSea')
p = e.getExtensionData()
p.setFloat('Reference Time', geometry[0])
p.setUInt('Resolution', geometry[1])
p.setFloat('Ocean Depth', geometry[2])
p.setFloat('Vertical Scale', geometry[3])
p.setFloat('Ocean Dim', geometry[4])
p.setUInt('Ocean Seed', geometry[5])
p.setByte('Enable Choppyness', geometry[6])
p.setFloat('Choppy factor', geometry[7])
p.setByte('Enable White Caps', geometry[8])
p.setFloat('Ocean Wind Mod.', wind[0])
p.setFloat('Ocean Wind Dir.', wind[1])
p.setFloat('Ocean Wind Alignment', wind[2])
p.setFloat('Ocean Min. Wave Length', wind[3])
p.setFloat('Damp Factor Against Wind', wind[4])
o = s.createGeometryLoaderObject(name, p)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
def ext_volumetrics(self, name, properties, matrix, motion=None, object_props=None, material=None, backface_material=None, ):
"""Create Volumetrics Extension Object.
name string
properties (int type 1, float density) or (int type 2, float density, int seed, float low, float high, float detail, int octaves, float perssistence)
base ((3 float), (3 float), (3 float), (3 float))
pivot ((3 float), (3 float), (3 float), (3 float))
object_props (bool hide, float opacity, tuple cid=(int, int, int), bool hcam, bool hcamsc, bool hgi, bool hrr, bool hzcp, ) or None
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryProceduralExtension('MaxwellVolumetric')
p = e.getExtensionData()
d = properties
p.setByte('Create Constant Density', d[0])
p.setFloat('ConstantDensity', d[1])
if(d[0] == 2):
p.setUInt('Seed', d[2])
p.setFloat('Low value', d[3])
p.setFloat('High value', d[4])
p.setFloat('Detail', d[5])
p.setInt('Octaves', d[6])
p.setFloat('Persistance', d[7])
o = s.createGeometryProceduralObject(name, p)
self.set_base_and_pivot(o, matrix, motion, )
if(object_props is not None):
self.set_object_props(o, *object_props)
if(material is not None):
if(material != ''):
mat = self.get_material(material)
o.setMaterial(mat)
if(backface_material is not None):
if(backface_material != ''):
mat = self.get_material(backface_material)
o.setBackfaceMaterial(mat)
return o
def mod_grass(self, object_name, properties, material=None, backface_material=None, ):
"""Create grass object modifier extension.
object_name string
properties dict of many, many properties, see code..
material (string path, bool embed) or None
backface_material (string path, bool embed) or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellGrass')
p = e.getExtensionData()
if(material is not None):
mat = self.get_material(material)
if(mat is not None):
p.setString('Material', mat.getName())
if(backface_material is not None):
mat = self.get_material(backface_material)
if(mat is not None):
p.setString('Double Sided Material', mat.getName())
p.setUInt('Density', properties['density'])
self.texture_data_to_mxparams('Density Map', properties['density_map'], p, )
p.setFloat('Length', properties['length'])
self.texture_data_to_mxparams('Length Map', properties['length_map'], p, )
p.setFloat('Length Variation', properties['length_variation'])
p.setFloat('Root Width', properties['root_width'])
p.setFloat('Tip Width', properties['tip_width'])
p.setFloat('Direction Type', properties['direction_type'])
p.setFloat('Initial Angle', properties['initial_angle'])
p.setFloat('Initial Angle Variation', properties['initial_angle_variation'])
self.texture_data_to_mxparams('Initial Angle Map', properties['initial_angle_map'], p, )
p.setFloat('Start Bend', properties['start_bend'])
p.setFloat('Start Bend Variation', properties['start_bend_variation'])
self.texture_data_to_mxparams('Start Bend Map', properties['start_bend_map'], p, )
p.setFloat('Bend Radius', properties['bend_radius'])
p.setFloat('Bend Radius Variation', properties['bend_radius_variation'])
self.texture_data_to_mxparams('Bend Radius Map', properties['bend_radius_map'], p, )
p.setFloat('Bend Angle', properties['bend_angle'])
p.setFloat('Bend Angle Variation', properties['bend_angle_variation'])
self.texture_data_to_mxparams('Bend Angle Map', properties['bend_angle_map'], p, )
p.setFloat('Cut Off', properties['cut_off'])
p.setFloat('Cut Off Variation', properties['cut_off_variation'])
self.texture_data_to_mxparams('Cut Off Map', properties['cut_off_map'], p, )
p.setUInt('Points per Blade', properties['points_per_blade'])
p.setUInt('Primitive Type', properties['primitive_type'])
p.setUInt('Seed', properties['seed'])
p.setByte('Enable LOD', properties['lod'])
p.setFloat('LOD Min Distance', properties['lod_min_distance'])
p.setFloat('LOD Max Distance', properties['lod_max_distance'])
p.setFloat('LOD Max Distance Density', properties['lod_max_distance_density'])
p.setUInt('Display Percent', properties['display_percent'])
p.setUInt('Display Max. Blades', properties['display_max_blades'])
o = s.getObject(object_name)
o.applyGeometryModifierExtension(p)
return o
def mod_subdivision(self, object_name, level=2, scheme=0, interpolation=2, crease=0.0, smooth_angle=90.0, quads=None, ):
"""Create subdivision object modifier extension.
object_name string
level int
scheme int (0, "Catmull-Clark"), (1, "Loop")
interpolation int (0, "None"), (1, "Edges"), (2, "Edges And Corners"), (3, "Sharp")
crease float
smooth float
quads [[int, int], ...] or None
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('SubdivisionModifier')
p = e.getExtensionData()
p.setUInt('Subdivision Level', level)
p.setUInt('Subdivision Scheme', scheme)
p.setUInt('Interpolation', interpolation)
p.setFloat('Crease', crease)
p.setFloat('Smooth Angle', smooth_angle)
o = s.getObject(object_name)
if(scheme == 0 and quads is not None):
for t, q in quads:
o.setTriangleQuadBuddy(t, q)
o.applyGeometryModifierExtension(p)
return o
def mod_scatter(self, object_name, scatter_object, inherit_objectid=False, remove_overlapped=False, density=None, seed=0, scale=None, rotation=None, lod=None, angle=None, display_percent=10, display_max=1000, ):
"""Create scatter object modifier extension.
object_name string
scatter_object string
inherit_objectid bool
density (float, density_map or None) or None
seed int
scale ((float, float, float), scale_map or None, scale_variation (float, float, float)) or None
rotation ((float, float, float), rotation_map or None, rotation_variation (float, float, float), rotation_direction int (0, "Polygon Normal"), (1, "World Z")) or None
lod (bool, lod_min_distance float, lod_max_distance float, lod_max_distance_density float) or None
display_percent int
display_max int
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellScatter')
p = e.getExtensionData()
p.setString('Object', scatter_object)
p.setByte('Inherit ObjectID', inherit_objectid)
if(density is not None):
p.setFloat('Density', density[0])
self.texture_data_to_mxparams('Density Map', density[1], p, )
p.setUInt('Seed', seed)
p.setByte('Remove Overlapped', remove_overlapped)
if(scale is not None):
p.setFloat('Scale X', scale[0])
p.setFloat('Scale Y', scale[1])
p.setFloat('Scale Z', scale[2])
self.texture_data_to_mxparams('Scale Map', scale[3], p, )
p.setFloat('Scale X Variation', scale[4])
p.setFloat('Scale Y Variation', scale[5])
p.setFloat('Scale Z Variation', scale[6])
p.setByte('Uniform Scale', scale[7])
if(rotation is not None):
p.setFloat('Rotation X', rotation[0])
p.setFloat('Rotation Y', rotation[1])
p.setFloat('Rotation Z', rotation[2])
self.texture_data_to_mxparams('Rotation Map', rotation[3], p, )
p.setFloat('Rotation X Variation', rotation[4])
p.setFloat('Rotation Y Variation', rotation[5])
p.setFloat('Rotation Z Variation', rotation[6])
p.setUInt('Direction Type', rotation[7])
if(lod is not None):
p.setByte('Enable LOD', lod[0])
p.setFloat('LOD Min Distance', lod[1])
p.setFloat('LOD Max Distance', lod[2])
p.setFloat('LOD Max Distance Density', lod[3])
if(angle is not None):
p.setFloat('Direction Type', angle[0])
p.setFloat('Initial Angle', angle[1])
p.setFloat('Initial Angle Variation', angle[2])
self.texture_data_to_mxparams('Initial Angle Map', angle[3], p, )
p.setUInt('Display Percent', display_percent)
p.setUInt('Display Max. Blades', display_max)
o = s.getObject(object_name)
o.applyGeometryModifierExtension(p)
return o
def mod_cloner(self, object_name, cloned_object, render_emitter, pdata, radius=1.0, mb_factor=1.0, load_percent=100.0, start_offset=0, ex_npp=0, ex_p_dispersion=0.0, ex_p_deformation=0.0, align_to_velocity=False, scale_with_radius=False, inherit_obj_id=False, frame=1, fps=24.0, display_percent=10, display_max=1000, ):
"""Create cloner object modifier extension.
object_name string
cloned_object string
render_emitter bool
pdata string or dict
radius float
mb_factor float
load_percent float
start_offset int
ex_npp int
ex_p_dispersion float
ex_p_deformation float
align_to_velocity bool
scale_with_radius bool
inherit_obj_id bool
frame int
fps float
display_percent int
display_max int
"""
s = self.mxs
e = self.mgr.createDefaultGeometryModifierExtension('MaxwellCloner')
p = e.getExtensionData()
if(type(pdata) is dict):
c = Cbase()
c.origin = Cvector(0.0, 0.0, 0.0)
c.xAxis = Cvector(1.0, 0.0, 0.0)
c.yAxis = Cvector(0.0, 1.0, 0.0)
c.zAxis = Cvector(0.0, 0.0, 1.0)
p.setFloatArray('PARTICLE_POSITIONS', list(pdata['PARTICLE_POSITIONS']), c)
p.setFloatArray('PARTICLE_SPEEDS', list(pdata['PARTICLE_SPEEDS']), c)
p.setFloatArray('PARTICLE_RADII', list(pdata['PARTICLE_RADII']), c)
p.setIntArray('PARTICLE_IDS', list(pdata['PARTICLE_IDS']))
else:
p.setString('FileName', pdata)
p.setFloat('Radius Factor', radius)
p.setFloat('MB Factor', mb_factor)
p.setFloat('Load particles %', load_percent)
p.setUInt('Start offset', start_offset)
p.setUInt('Create N particles per particle', ex_npp)
p.setFloat('Extra particles dispersion', ex_p_dispersion)
p.setFloat('Extra particles deformation', ex_p_deformation)
p.setByte('Use velocity', align_to_velocity)
p.setByte('Scale with particle radius', scale_with_radius)
p.setByte('Inherit ObjectID', inherit_obj_id)
p.setInt('Frame#', frame)
p.setFloat('fps', fps)
p.setUInt('Display Percent', display_percent)
p.setUInt('Display Max. Particles', display_max)
if(not render_emitter):
o = s.getObject(object_name)
o.setHide(True)
o = s.getObject(cloned_object)
o.applyGeometryModifierExtension(p)
return o
def wireframe_override_object_materials(self, clay_mat_name, wire_base_name, ):
s = self.mxs
it = CmaxwellObjectIterator()
o = it.first(scene)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
for o in l:
# do not set material to wire base
if(o.getName()[0] != wire_base_name):
if(o.isInstance()[0] == 1):
instanced = o.getInstanced()
# do not set material to wire base instances
if(instanced.getName()[0] != wire_base_name):
o.setMaterial(clay_mat_name)
else:
o.setMaterial(clay_mat_name)
def wireframe_zero_scale_base(self, wire_base_name):
s = self.mxs
o = s.getObject(wire_base_name)
z = (0.0, 0.0, 0.0)
b = Cbase()
b.origin = Cvector(*z)
b.xAxis = Cvector(*z)
b.yAxis = Cvector(*z)
b.zAxis = Cvector(*z)
p = Cbase()
p.origin = Cvector(*z)
p.xAxis = Cvector(1.0, 0.0, 0.0)
p.yAxis = Cvector(0.0, 1.0, 0.0)
p.zAxis = Cvector(0.0, 0.0, 1.0)
o.setBaseAndPivot(b, p)
o.setScale(Cvector(0, 0, 0))
class MXMWriter():
def __init__(self, path, data, ):
"""Create Extension MXM.
path string (path)
data dict
"""
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
mat = self.material(data)
if(mat is not None):
log("writing to: {}".format(self.path), 2, prefix="* ", )
mat.write(path)
log("done.", 2, prefix="* ", )
else:
raise RuntimeError("Something unexpected happened..")
def texture_data_to_mxparams(self, name, data, mxparams, ):
"""Create CtextureMap, fill with parameters and put into mxparams.
name string
data dict {'type': string,
'path': string,
'channel': int,
'use_global_map': bool,
'tile_method_type': [bool, bool],
'tile_method_units': int,
'repeat': [float, float],
'mirror': [bool, bool],
'offset': [float, float],
'rotation': float,
'invert': bool,
'alpha_only': bool,
'interpolation': bool,
'brightness': float,
'contrast': float,
'saturation': float,
'hue': float,
'rgb_clamp': [float, float], }
mxparams mxparams
"""
d = data
if(d is None):
return
t = CtextureMap()
t.setPath(d['path'])
v = Cvector2D()
v.assign(*d['repeat'])
t.scale = v
v = Cvector2D()
v.assign(*d['offset'])
t.offset = v
t.rotation = d['rotation']
t.uvwChannelID = d['channel']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
t.invert = d['invert']
# t.doGammaCorrection = 0
t.useAbsoluteUnits = d['tile_method_units']
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
t.useAlpha = d['alpha_only']
t.typeInterpolation = d['interpolation']
t.saturation = d['saturation'] / 100
t.contrast = d['contrast'] / 100
t.brightness = d['brightness'] / 100
t.hue = d['hue'] / 180
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
t.useGlobalMap = d['use_global_map']
# t.cosA = 1.000000
# t.sinA = 0.000000
ok = mxparams.setTextureMap(name, t)
return mxparams
def texture(self, d, ):
"""Create CtextureMap from parameters
d dict
"""
s = self.mxs
t = CtextureMap()
t.setPath(d['path'])
t.uvwChannelID = d['channel']
t.brightness = d['brightness'] / 100
t.contrast = d['contrast'] / 100
t.saturation = d['saturation'] / 100
t.hue = d['hue'] / 180
t.useGlobalMap = d['use_global_map']
t.useAbsoluteUnits = d['tile_method_units']
t.uIsTiled = d['tile_method_type'][0]
t.vIsTiled = d['tile_method_type'][1]
t.uIsMirrored = d['mirror'][0]
t.vIsMirrored = d['mirror'][1]
vec = Cvector2D()
vec.assign(d['offset'][0], d['offset'][1])
t.offset = vec
t.rotation = d['rotation']
t.invert = d['invert']
t.useAlpha = d['alpha_only']
if(d['interpolation']):
t.typeInterpolation = 1
else:
t.typeInterpolation = 0
t.clampMin = d['rgb_clamp'][0] / 255
t.clampMax = d['rgb_clamp'][1] / 255
vec = Cvector2D()
vec.assign(d['repeat'][0], d['repeat'][1])
t.scale = vec
t.normalMappingFlipRed = d['normal_mapping_flip_red']
t.normalMappingFlipGreen = d['normal_mapping_flip_green']
t.normalMappingFullRangeBlue = d['normal_mapping_full_range_blue']
for i, pt in enumerate(d['procedural']):
if(pt['use'] == 'BRICK'):
e = self.mgr.createDefaultTextureExtension('Brick')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setFloat('Brick width', pt['brick_brick_width'])
p.setFloat('Brick height', pt['brick_brick_height'])
p.setInt('Brick offset', pt['brick_brick_offset'])
p.setInt('Random offset', pt['brick_random_offset'])
p.setByte('Double brick', pt['brick_double_brick'])
p.setFloat('Small brick width', pt['brick_small_brick_width'])
p.setByte('Round corners', pt['brick_round_corners'])
p.setFloat('Boundary sharpness U', pt['brick_boundary_sharpness_u'])
p.setFloat('Boundary sharpness V', pt['brick_boundary_sharpness_v'])
p.setInt('Boundary noise detail', pt['brick_boundary_noise_detail'])
p.setFloat('Boundary noise region U', pt['brick_boundary_noise_region_u'])
p.setFloat('Boundary noise region V', pt['brick_boundary_noise_region_v'])
p.setUInt('Seed', pt['brick_seed'])
p.setByte('Random rotation', pt['brick_random_rotation'])
p.setInt('Color variation', pt['brick_color_variation'])
c = Crgb()
c.assign(*pt['brick_brick_color_0'])
p.setRgb('Brick color 0', c)
self.texture_data_to_mxparams('Brick texture 0', pt['brick_brick_texture_0'], p, )
p.setInt('Sampling factor 0', pt['brick_sampling_factor_0'])
p.setInt('Weight 0', pt['brick_weight_0'])
c = Crgb()
c.assign(*pt['brick_brick_color_1'])
p.setRgb('Brick color 1', c)
self.texture_data_to_mxparams('Brick texture 1', pt['brick_brick_texture_1'], p, )
p.setInt('Sampling factor 1', pt['brick_sampling_factor_1'])
p.setInt('Weight 1', pt['brick_weight_1'])
c = Crgb()
c.assign(*pt['brick_brick_color_2'])
p.setRgb('Brick color 2', c)
self.texture_data_to_mxparams('Brick texture 2', pt['brick_brick_texture_2'], p, )
p.setInt('Sampling factor 2', pt['brick_sampling_factor_2'])
p.setInt('Weight 2', pt['brick_weight_2'])
p.setFloat('Mortar thickness', pt['brick_mortar_thickness'])
c = Crgb()
c.assign(*pt['brick_mortar_color'])
p.setRgb('Mortar color', c)
self.texture_data_to_mxparams('Mortar texture', pt['brick_mortar_texture'], p, )
t.addProceduralTexture(p)
elif(pt['use'] == 'CHECKER'):
e = self.mgr.createDefaultTextureExtension('Checker')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['checker_color_0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['checker_color_1'])
p.setRgb('Color1', c)
p.setUInt('Number of elements U', pt['checker_number_of_elements_u'])
p.setUInt('Number of elements V', pt['checker_number_of_elements_v'])
p.setFloat('Transition sharpness', pt['checker_transition_sharpness'])
p.setUInt('Fall-off', pt['checker_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'CIRCLE'):
e = self.mgr.createDefaultTextureExtension('Circle')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['circle_background_color'])
p.setRgb('Background color', c)
c = Crgb()
c.assign(*pt['circle_circle_color'])
p.setRgb('Circle color', c)
p.setFloat('RadiusU', pt['circle_radius_u'])
p.setFloat('RadiusV', pt['circle_radius_v'])
p.setFloat('Transition factor', pt['circle_transition_factor'])
p.setUInt('Fall-off', pt['circle_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT3'):
e = self.mgr.createDefaultTextureExtension('Gradient3')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient3_gradient_u'])
c = Crgb()
c.assign(*pt['gradient3_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient3_color1_u'])
p.setRgb('Color1 U', c)
c = Crgb()
c.assign(*pt['gradient3_color2_u'])
p.setRgb('Color2 U', c)
p.setUInt('Gradient type U', pt['gradient3_gradient_type_u'])
p.setFloat('Color1 U position', pt['gradient3_color1_u_position'])
p.setByte('Gradient V', pt['gradient3_gradient_v'])
c = Crgb()
c.assign(*pt['gradient3_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient3_color1_v'])
p.setRgb('Color1 V', c)
c = Crgb()
c.assign(*pt['gradient3_color2_v'])
p.setRgb('Color2 V', c)
p.setUInt('Gradient type V', pt['gradient3_gradient_type_v'])
p.setFloat('Color1 V position', pt['gradient3_color1_v_position'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRADIENT'):
e = self.mgr.createDefaultTextureExtension('Gradient')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setByte('Gradient U', pt['gradient_gradient_u'])
c = Crgb()
c.assign(*pt['gradient_color0_u'])
p.setRgb('Color0 U', c)
c = Crgb()
c.assign(*pt['gradient_color1_u'])
p.setRgb('Color1 U', c)
p.setUInt('Gradient type U', pt['gradient_gradient_type_u'])
p.setFloat('Transition factor U', pt['gradient_transition_factor_u'])
p.setByte('Gradient V', pt['gradient_gradient_v'])
c = Crgb()
c.assign(*pt['gradient_color0_v'])
p.setRgb('Color0 V', c)
c = Crgb()
c.assign(*pt['gradient_color1_v'])
p.setRgb('Color1 V', c)
p.setUInt('Gradient type V', pt['gradient_gradient_type_v'])
p.setFloat('Transition factor V', pt['gradient_transition_factor_v'])
t.addProceduralTexture(p)
elif(pt['use'] == 'GRID'):
e = self.mgr.createDefaultTextureExtension('Grid')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
c = Crgb()
c.assign(*pt['grid_boundary_color'])
p.setRgb('Boundary color', c)
c = Crgb()
c.assign(*pt['grid_cell_color'])
p.setRgb('Cell color', c)
p.setFloat('Cell width', pt['grid_cell_width'])
p.setFloat('Cell height', pt['grid_cell_height'])
if(pt['grid_horizontal_lines']):
p.setFloat('Boundary thickness U', pt['grid_boundary_thickness_u'])
else:
p.setFloat('Boundary thickness U', 0.0)
if(pt['grid_vertical_lines']):
p.setFloat('Boundary thickness V', pt['grid_boundary_thickness_v'])
else:
p.setFloat('Boundary thickness V', 0.0)
p.setFloat('Transition sharpness', pt['grid_transition_sharpness'])
p.setUInt('Fall-off', pt['grid_falloff'])
t.addProceduralTexture(p)
elif(pt['use'] == 'MARBLE'):
e = self.mgr.createDefaultTextureExtension('Marble')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['marble_coordinates_type'])
c = Crgb()
c.assign(*pt['marble_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['marble_color1'])
p.setRgb('Color1', c)
c = Crgb()
c.assign(*pt['marble_color2'])
p.setRgb('Color2', c)
p.setFloat('Frequency', pt['marble_frequency'])
p.setFloat('Detail', pt['marble_detail'])
p.setInt('Octaves', pt['marble_octaves'])
p.setUInt('Seed', pt['marble_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'NOISE'):
e = self.mgr.createDefaultTextureExtension('Noise')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['noise_coordinates_type'])
c = Crgb()
c.assign(*pt['noise_noise_color'])
p.setRgb('Noise color', c)
c = Crgb()
c.assign(*pt['noise_background_color'])
p.setRgb('Background color', c)
p.setFloat('Detail', pt['noise_detail'])
p.setFloat('Persistance', pt['noise_persistance'])
p.setInt('Octaves', pt['noise_octaves'])
p.setFloat('Low value', pt['noise_low_value'])
p.setFloat('High value', pt['noise_high_value'])
p.setUInt('Seed', pt['noise_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'VORONOI'):
e = self.mgr.createDefaultTextureExtension('Voronoi')
p = e.getExtensionData()
p.setFloat('Blend procedural', pt['blending_factor'])
p.setUInt('Coordinates type', pt['voronoi_coordinates_type'])
c = Crgb()
c.assign(*pt['voronoi_color0'])
p.setRgb('Color0', c)
c = Crgb()
c.assign(*pt['voronoi_color1'])
p.setRgb('Color1', c)
p.setInt('Detail', pt['voronoi_detail'])
p.setUInt('Distance', pt['voronoi_distance'])
p.setUInt('Combination', pt['voronoi_combination'])
p.setFloat('Low value', pt['voronoi_low_value'])
p.setFloat('High value', pt['voronoi_high_value'])
p.setUInt('Seed', pt['voronoi_seed'])
t.addProceduralTexture(p)
elif(pt['use'] == 'TILED'):
e = self.mgr.createDefaultTextureExtension('TiledTexture')
p = e.getExtensionData()
p.setFloat('Blend factor', pt['blending_factor'])
c = Crgb()
c.assign(*pt['tiled_base_color'])
p.setRgb('Base Color', c)
p.setByte('Use base color', pt['tiled_use_base_color'])
p.setString('Filename_mask', pt['tiled_token_mask'])
p.setString('Filename', pt['tiled_filename'])
# 'Map U tile range' UCHAR
# 'Map V tile range' UCHAR
t.addProceduralTexture(p)
elif(pt['use'] == 'WIREFRAME'):
e = self.mgr.createDefaultTextureExtension('WireframeTexture')
p = e.getExtensionData()
c = Crgb()
c.assign(*pt['wireframe_fill_color'])
p.setRgb('Fill Color', c)
c = Crgb()
c.assign(*pt['wireframe_edge_color'])
p.setRgb('Edge Color', c)
c = Crgb()
c.assign(*pt['wireframe_coplanar_edge_color'])
p.setRgb('Coplanar Edge Color', c)
p.setFloat('Edge Width', pt['wireframe_edge_width'])
p.setFloat('Coplanar Edge Width', pt['wireframe_coplanar_edge_width'])
p.setFloat('Coplanar Threshold', pt['wireframe_coplanar_threshold'])
t.addProceduralTexture(p)
else:
raise TypeError("{0} is unknown procedural texture type".format(pt['use']))
return t
def material_placeholder(self, n=None, ):
if(n is not None):
pass
else:
n = 'MATERIAL_PLACEHOLDER'
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
r = b.getReflectance()
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = CtextureMap()
mgr = CextensionManager.instance()
mgr.loadAllExtensions()
e = mgr.createDefaultTextureExtension('Checker')
ch = e.getExtensionData()
ch.setUInt('Number of elements U', 32)
ch.setUInt('Number of elements V', 32)
t.addProceduralTexture(ch)
a.textureMap = t
r.setAttribute('color', a)
return m
def material_default(self, n, ):
s = self.mxs
m = s.createMaterial(n)
l = m.addLayer()
b = l.addBSDF()
return m
def material_external(self, d, ):
s = self.mxs
p = d['path']
t = s.readMaterial(p)
t.setName(d['name'])
m = s.addMaterial(t)
if(not d['embed']):
m.setReference(1, p)
return m
def material_custom(self, d, ):
s = self.mxs
m = s.createMaterial(d['name'])
d = d['data']
def global_props(d, m):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = self.texture(d['active_display_map'])
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
def add_bsdf(d, l):
b = l.addBSDF()
b.setName(d['name'])
bp = d['bsdf_props']
# weight
if(bp['weight_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['weight_map'])
if(t is not None):
a.textureMap = t
a.value = bp['weight']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['weight']
b.setWeight(a)
# enabled
if(not bp['visible']):
b.setState(False)
# ior
r = b.getReflectance()
if(bp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(bp['complex_ior'])
else:
if(bp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_0'])
r.setAttribute('color', a)
if(bp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['reflectance_90'])
r.setAttribute('color.tangential', a)
if(bp['transmittance_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['transmittance_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*bp['transmittance'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['transmittance'])
r.setAttribute('transmittance.color', a)
r.setAbsorptionDistance(bp['attenuation_units'], bp['attenuation'])
r.setIOR(bp['nd'], bp['abbe'])
if(bp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(bp['k'])
if(bp['r2_enabled']):
r.setFresnelCustom(bp['r2_falloff_angle'], bp['r2_influence'], True, )
# surface
if(bp['roughness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['roughness_map'])
if(t is not None):
a.textureMap = t
a.value = bp['roughness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['roughness']
b.setAttribute('roughness', a)
if(bp['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['bump_map'])
if(t is not None):
a.textureMap = t
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
if(bp['bump_map_use_normal']):
a.value = bp['bump_normal']
else:
a.value = bp['bump']
b.setAttribute('bump', a)
b.setNormalMapState(bp['bump_map_use_normal'])
if(bp['anisotropy_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy']
b.setAttribute('anisotropy', a)
if(bp['anisotropy_angle_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['anisotropy_angle_map'])
if(t is not None):
a.textureMap = t
a.value = bp['anisotropy_angle']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['anisotropy_angle']
b.setAttribute('angle', a)
# subsurface
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*bp['scattering'])
r.setAttribute('scattering', a)
r.setScatteringParameters(bp['coef'], bp['asymmetry'], bp['single_sided'])
if(bp['single_sided']):
if(bp['single_sided_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(bp['single_sided_map'])
if(t is not None):
a.textureMap = t
a.value = bp['single_sided_value']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = bp['single_sided_value']
r.setScatteringThickness(a)
r.setScatteringThicknessRange(bp['single_sided_min'], bp['single_sided_max'])
# coating
cp = d['coating']
if(cp['enabled']):
c = b.addCoating()
if(cp['thickness_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['thickness_map'])
if(t is not None):
a.textureMap = t
a.value = cp['thickness']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = cp['thickness']
c.setThickness(a)
c.setThicknessRange(cp['thickness_map_min'], cp['thickness_map_max'])
r = c.getReflectance()
if(cp['ior'] == 1):
# measured data
r.setActiveIorMode(1)
r.setComplexIor(cp['complex_ior'])
else:
if(cp['reflectance_0_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_0_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_0'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_0'])
r.setAttribute('color', a)
if(cp['reflectance_90_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(cp['reflectance_90_map'])
if(t is not None):
a.textureMap = t
a.rgb.assign(*cp['reflectance_90'])
else:
a = Cattribute()
a.activeType = MAP_TYPE_RGB
a.rgb.assign(*cp['reflectance_90'])
r.setAttribute('color.tangential', a)
r.setIOR(cp['nd'], 1.0, )
if(cp['force_fresnel']):
r.enableForceFresnel(True)
r.setConductor(cp['k'])
if(cp['r2_enabled']):
r.setFresnelCustom(cp['r2_falloff_angle'], 0.0, True, )
def add_emitter(d, l):
e = l.createEmitter()
if(d['type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['ies_data'])
e.setIESLobeIntensity(d['ies_intensity'])
elif(d['type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['spot_map'] is not None):
t = self.texture(d['spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['spot_map_enabled'], t)
e.setSpotConeAngle(d['spot_cone_angle'])
e.setSpotFallOffAngle(d['spot_falloff_angle'])
e.setSpotFallOffType(d['spot_falloff_type'])
e.setSpotBlur(d['spot_blur'])
if(d['emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['color'])
ep.rgb.assign(c)
ep.temperature = d['color_black_body']
ep.watts = d['luminance_power']
ep.luminousEfficacy = d['luminance_efficacy']
ep.luminousPower = d['luminance_output']
ep.illuminance = d['luminance_output']
ep.luminousIntensity = d['luminance_output']
ep.luminance = d['luminance_output']
e.setPair(ep)
if(d['luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
if(d['color_black_body_enabled']):
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
e.setActivePair(EMISSION_RGB, u)
elif(d['emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['temperature_value'])
elif(d['emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['hdr_intensity']
e.setMXI(a)
e.setState(True)
def add_layer(d, m):
l = m.addLayer()
l.setName(d['name'])
lpd = d['layer_props']
if(not lpd['visible']):
l.setEnabled(False)
if(lpd['blending'] == 1):
l.setStackedBlendingMode(1)
if(lpd['opacity_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(lpd['opacity_map'])
if(t is not None):
a.textureMap = t
a.value = lpd['opacity']
else:
a = Cattribute()
a.activeType = MAP_TYPE_VALUE
a.value = lpd['opacity']
l.setAttribute('weight', a)
epd = d['emitter']
if(epd['enabled']):
add_emitter(epd, l)
for b in d['bsdfs']:
add_bsdf(b, l)
global_props(d['global_props'], m)
displacement(d['displacement'], m)
for layer in d['layers']:
add_layer(layer, m)
return m
def material(self, d, ):
s = self.mxs
if(d['subtype'] == 'EXTERNAL'):
if(d['path'] == ''):
m = self.material_placeholder(d['name'])
else:
m = self.material_external(d)
if(d['override']):
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
elif(d['subtype'] == 'EXTENSION'):
if(d['use'] == 'EMITTER'):
m = s.createMaterial(d['name'])
l = m.addLayer()
e = l.createEmitter()
if(d['emitter_type'] == 0):
e.setLobeType(EMISSION_LOBE_DEFAULT)
elif(d['emitter_type'] == 1):
e.setLobeType(EMISSION_LOBE_IES)
e.setLobeIES(d['emitter_ies_data'])
e.setIESLobeIntensity(d['emitter_ies_intensity'])
elif(d['emitter_type'] == 2):
e.setLobeType(EMISSION_LOBE_SPOTLIGHT)
if(d['emitter_spot_map'] is not None):
t = self.texture(d['emitter_spot_map'])
if(t is not None):
e.setLobeImageProjectedMap(d['emitter_spot_map_enabled'], t)
e.setSpotConeAngle(d['emitter_spot_cone_angle'])
e.setSpotFallOffAngle(d['emitter_spot_falloff_angle'])
e.setSpotFallOffType(d['emitter_spot_falloff_type'])
e.setSpotBlur(d['emitter_spot_blur'])
if(d['emitter_emission'] == 0):
e.setActiveEmissionType(EMISSION_TYPE_PAIR)
ep = CemitterPair()
c = Crgb()
c.assign(*d['emitter_color'])
ep.rgb.assign(c)
ep.temperature = d['emitter_color_black_body']
ep.watts = d['emitter_luminance_power']
ep.luminousEfficacy = d['emitter_luminance_efficacy']
ep.luminousPower = d['emitter_luminance_output']
ep.illuminance = d['emitter_luminance_output']
ep.luminousIntensity = d['emitter_luminance_output']
ep.luminance = d['emitter_luminance_output']
e.setPair(ep)
if(d['emitter_color_black_body_enabled']):
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
e.setActivePair(EMISSION_COLOR_TEMPERATURE, u)
else:
if(d['emitter_luminance'] == 0):
u = EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY
elif(d['emitter_luminance'] == 1):
u = EMISSION_UNITS_LUMINOUS_POWER
elif(d['emitter_luminance'] == 2):
u = EMISSION_UNITS_ILLUMINANCE
elif(d['emitter_luminance'] == 3):
u = EMISSION_UNITS_LUMINOUS_INTENSITY
elif(d['emitter_luminance'] == 4):
u = EMISSION_UNITS_LUMINANCE
e.setActivePair(EMISSION_RGB, u)
elif(d['emitter_emission'] == 1):
e.setActiveEmissionType(EMISSION_TYPE_TEMPERATURE)
e.setTemperature(d['emitter_temperature_value'])
elif(d['emitter_emission'] == 2):
e.setActiveEmissionType(EMISSION_TYPE_MXI)
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['emitter_hdr_map'])
if(t is not None):
a.textureMap = t
a.value = d['emitter_hdr_intensity']
e.setMXI(a)
e.setState(True)
def global_props(d, m):
# global properties
if(d['override_map']):
t = texture(d['override_map'], s, )
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = texture(d['bump_map'], s, )
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
global_props(d, m)
else:
m = CextensionManager.instance()
m.loadAllExtensions()
if(d['use'] == 'AGS'):
e = m.createDefaultMaterialModifierExtension('AGS')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['ags_color'])
p.setRgb('Color', c)
p.setFloat('Reflection', d['ags_reflection'])
p.setUInt('Type', d['ags_type'])
elif(d['use'] == 'OPAQUE'):
e = m.createDefaultMaterialModifierExtension('Opaque')
p = e.getExtensionData()
p.setByte('Color Type', d['opaque_color_type'])
c = Crgb()
c.assign(*d['opaque_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['opaque_color_map'], p, )
p.setByte('Shininess Type', d['opaque_shininess_type'])
p.setFloat('Shininess', d['opaque_shininess'])
self.texture_data_to_mxparams('Shininess Map', d['opaque_shininess_map'], p, )
p.setByte('Roughness Type', d['opaque_roughness_type'])
p.setFloat('Roughness', d['opaque_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['opaque_roughness_map'], p, )
p.setByte('Clearcoat', d['opaque_clearcoat'])
elif(d['use'] == 'TRANSPARENT'):
e = m.createDefaultMaterialModifierExtension('Transparent')
p = e.getExtensionData()
p.setByte('Color Type', d['transparent_color_type'])
c = Crgb()
c.assign(*d['transparent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['transparent_color_map'], p, )
p.setFloat('Ior', d['transparent_ior'])
p.setFloat('Transparency', d['transparent_transparency'])
p.setByte('Roughness Type', d['transparent_roughness_type'])
p.setFloat('Roughness', d['transparent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['transparent_roughness_map'], p, )
p.setFloat('Specular Tint', d['transparent_specular_tint'])
p.setFloat('Dispersion', d['transparent_dispersion'])
p.setByte('Clearcoat', d['transparent_clearcoat'])
elif(d['use'] == 'METAL'):
e = m.createDefaultMaterialModifierExtension('Metal')
p = e.getExtensionData()
p.setUInt('IOR', d['metal_ior'])
p.setFloat('Tint', d['metal_tint'])
p.setByte('Color Type', d['metal_color_type'])
c = Crgb()
c.assign(*d['metal_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['metal_color_map'], p, )
p.setByte('Roughness Type', d['metal_roughness_type'])
p.setFloat('Roughness', d['metal_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['metal_roughness_map'], p, )
p.setByte('Anisotropy Type', d['metal_anisotropy_type'])
p.setFloat('Anisotropy', d['metal_anisotropy'])
self.texture_data_to_mxparams('Anisotropy Map', d['metal_anisotropy_map'], p, )
p.setByte('Angle Type', d['metal_angle_type'])
p.setFloat('Angle', d['metal_angle'])
self.texture_data_to_mxparams('Angle Map', d['metal_angle_map'], p, )
p.setByte('Dust Type', d['metal_dust_type'])
p.setFloat('Dust', d['metal_dust'])
self.texture_data_to_mxparams('Dust Map', d['metal_dust_map'], p, )
p.setByte('Perforation Enabled', d['metal_perforation_enabled'])
self.texture_data_to_mxparams('Perforation Map', d['metal_perforation_map'], p, )
elif(d['use'] == 'TRANSLUCENT'):
e = m.createDefaultMaterialModifierExtension('Translucent')
p = e.getExtensionData()
p.setFloat('Scale', d['translucent_scale'])
p.setFloat('Ior', d['translucent_ior'])
p.setByte('Color Type', d['translucent_color_type'])
c = Crgb()
c.assign(*d['translucent_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['translucent_color_map'], p, )
p.setFloat('Hue Shift', d['translucent_hue_shift'])
p.setByte('Invert Hue', d['translucent_invert_hue'])
p.setFloat('Vibrance', d['translucent_vibrance'])
p.setFloat('Density', d['translucent_density'])
p.setFloat('Opacity', d['translucent_opacity'])
p.setByte('Roughness Type', d['translucent_roughness_type'])
p.setFloat('Roughness', d['translucent_roughness'])
self.texture_data_to_mxparams('Roughness Map', d['translucent_roughness_map'], p, )
p.setFloat('Specular Tint', d['translucent_specular_tint'])
p.setByte('Clearcoat', d['translucent_clearcoat'])
p.setFloat('Clearcoat Ior', d['translucent_clearcoat_ior'])
elif(d['use'] == 'CARPAINT'):
e = m.createDefaultMaterialModifierExtension('Car Paint')
p = e.getExtensionData()
c = Crgb()
c.assign(*d['carpaint_color'])
p.setRgb('Color', c)
p.setFloat('Metallic', d['carpaint_metallic'])
p.setFloat('Topcoat', d['carpaint_topcoat'])
elif(d['use'] == 'HAIR'):
e = m.createDefaultMaterialModifierExtension('Hair')
p = e.getExtensionData()
p.setByte('Color Type', d['hair_color_type'])
c = Crgb()
c.assign(*d['hair_color'])
p.setRgb('Color', c)
self.texture_data_to_mxparams('Color Map', d['hair_color_map'], p, )
self.texture_data_to_mxparams('Root-Tip Map', d['hair_root_tip_map'], p, )
p.setByte('Root-Tip Weight Type', d['hair_root_tip_weight_type'])
p.setFloat('Root-Tip Weight', d['hair_root_tip_weight'])
self.texture_data_to_mxparams('Root-Tip Weight Map', d['hair_root_tip_weight_map'], p, )
p.setFloat('Primary Highlight Strength', d['hair_primary_highlight_strength'])
p.setFloat('Primary Highlight Spread', d['hair_primary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_primary_highlight_tint'])
p.setRgb('Primary Highlight Tint', c)
p.setFloat('Secondary Highlight Strength', d['hair_secondary_highlight_strength'])
p.setFloat('Secondary Highlight Spread', d['hair_secondary_highlight_spread'])
c = Crgb()
c.assign(*d['hair_secondary_highlight_tint'])
p.setRgb('Secondary Highlight Tint', c)
m = s.createMaterial(d['name'])
m.applyMaterialModifierExtension(p)
# global properties
if(d['override_map']):
t = self.texture(d['override_map'])
if(t is not None):
m.setGlobalMap(t)
if(d['bump_map_enabled']):
a = Cattribute()
a.activeType = MAP_TYPE_BITMAP
t = self.texture(d['bump_map'])
if(t is not None):
a.textureMap = t
if(d['bump_map_use_normal']):
a.value = d['bump_normal']
else:
a.value = d['bump']
m.setAttribute('bump', a)
m.setNormalMapState(d['bump_map_use_normal'])
m.setDispersion(d['dispersion'])
m.setMatteShadow(d['shadow'])
m.setMatte(d['matte'])
m.setNestedPriority(d['priority'])
c = Crgb()
c.assign(*d['id'])
m.setColorID(c)
if(d['active_display_map']):
t = texture(d['active_display_map'], s, )
m.setActiveDisplayMap(t)
def displacement(d, m):
if(not d['enabled']):
return
m.enableDisplacement(True)
if(d['map'] is not None):
t = self.texture(d['map'])
m.setDisplacementMap(t)
m.setDisplacementCommonParameters(d['type'], d['subdivision'], int(d['smoothing']), d['offset'], d['subdivision_method'], d['uv_interpolation'], )
m.setHeightMapDisplacementParameters(d['height'], d['height_units'], d['adaptive'], )
v = Cvector(*d['v3d_scale'])
m.setVectorDisplacementParameters(v, d['v3d_transform'], d['v3d_rgb_mapping'], d['v3d_preset'], )
try:
displacement(d['displacement'], m)
except KeyError:
pass
elif(d['subtype'] == 'CUSTOM'):
m = self.material_custom(d)
else:
raise TypeError("Material '{}' {} is unknown type".format(d['name'], d['subtype']))
return m
def get_material(self, n, ):
"""get material by name from scene, if material is missing, create and return placeholder"""
def get_material_names(s):
it = CmaxwellMaterialIterator()
o = it.first(s)
l = []
while not o.isNull():
name = o.getName()
l.append(name)
o = it.next()
return l
s = self.mxs
names = get_material_names(s)
m = None
if(n in names):
m = s.getMaterial(n)
if(m is None):
# should not happen because i stopped changing material names.. but i leave it here
m = self.material_placeholder()
return m
class MXMEmitterCheck():
def __init__(self, path, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
self.emitter = False
m = self.mxs.readMaterial(self.path)
for i in range(m.getNumLayers()[0]):
l = m.getLayer(i)
e = l.getEmitter()
if(e.isNull()):
# no emitter layer
self.emitter = False
return
if(not e.getState()[0]):
# there is, but is disabled
self.emitter = False
return
# is emitter
self.emitter = True
class MXSReader():
def __init__(self, path, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell for Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
self.path = path
self.mxs = Cmaxwell(mwcallback)
log("loading {}".format(self.path), 2, prefix="* ", )
self.mxs.readMXS(self.path)
if(self.mxs.isProtectionEnabled()):
raise RuntimeError("Protected MXS")
self._prepare()
def _mxs_get_objects_names(self):
s = self.mxs
it = CmaxwellObjectIterator()
o = it.first(s)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
def _mxs_object(self, o):
object_name, _ = o.getName()
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(is_instance == 0 and is_mesh == 0):
log("{}: only empties, meshes and instances are supported..".format(object_name), 2, LogStyles.WARNING, )
return None
# skip not posrotscale initialized objects
is_init, _ = o.isPosRotScaleInitialized()
if(not is_init):
# log("{}: object is not initialized, skipping..".format(object_name), 2, LogStyles.WARNING, )
log("{}: object is not initialized..".format(object_name), 2, LogStyles.WARNING, )
# return None
r = {'name': o.getName()[0],
'vertices': [],
'normals': [],
'triangles': [],
'trianglesUVW': [],
'matrix': (),
'parent': None,
'type': '',
'materials': [],
'nmats': 0,
'matnames': [], }
if(is_instance == 1):
io = o.getInstanced()
ion = io.getName()[0]
b, p = self._base_and_pivot(o)
r = {'name': o.getName()[0],
'base': b,
'pivot': p,
'parent': None,
'type': 'INSTANCE',
'instanced': ion, }
# no multi material instances, always one material per instance
m, _ = o.getMaterial()
if(m.isNull() == 1):
r['material'] = None
else:
r['material'] = o.getName()
p, _ = o.getParent()
if(not p.isNull()):
r['parent'] = p.getName()[0]
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
h = []
if(o.getHideToCamera()):
h.append("C")
if(o.getHideToGI()):
h.append("GI")
if(o.getHideToReflectionsRefractions()):
h.append("RR")
r['hidden'] = ", ".join(h)
r['referenced_mxs'] = False
r['referenced_mxs_path'] = None
rmp = io.getReferencedScenePath()
if(rmp != ""):
r['referenced_mxs'] = True
r['referenced_mxs_path'] = rmp
return r
# counts
nv, _ = o.getVerticesCount()
nn, _ = o.getNormalsCount()
nt, _ = o.getTrianglesCount()
nppv, _ = o.getPositionsPerVertexCount()
ppv = 0
r['referenced_mxs'] = False
r['referenced_mxs_path'] = None
if(nv > 0):
r['type'] = 'MESH'
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
h = []
if(o.getHideToCamera()):
h.append("C")
if(o.getHideToGI()):
h.append("GI")
if(o.getHideToReflectionsRefractions()):
h.append("RR")
r['hidden'] = ", ".join(h)
else:
r['type'] = 'EMPTY'
rmp = o.getReferencedScenePath()
if(rmp != ""):
r['referenced_mxs'] = True
r['referenced_mxs_path'] = rmp
cid, _ = o.getColorID()
rgb8 = cid.toRGB8()
col = [str(rgb8.r()), str(rgb8.g()), str(rgb8.b())]
r['colorid'] = ", ".join(col)
if(nppv - 1 != ppv and nv != 0):
log("only one position per vertex is supported..", 2, LogStyles.WARNING, )
# vertices
for i in range(nv):
v, _ = o.getVertex(i, ppv)
# (float x, float y, float z)
r['vertices'].append((v.x(), v.y(), v.z()))
# normals
for i in range(nn):
v, _ = o.getNormal(i, ppv)
# (float x, float y, float z)
r['normals'].append((v.x(), v.y(), v.z()))
# triangles
for i in range(nt):
t = o.getTriangle(i)
# (int v1, int v2, int v3, int n1, int n2, int n3)
r['triangles'].append(t)
# materials
mats = []
for i in range(nt):
m, _ = o.getTriangleMaterial(i)
if(m.isNull() == 1):
n = None
else:
n = m.getName()
if(n not in mats):
mats.append(n)
r['materials'].append((i, n))
r['nmats'] = len(mats)
r['matnames'] = mats
# uv channels
ncuv, _ = o.getChannelsUVWCount()
for cuv in range(ncuv):
# uv triangles
r['trianglesUVW'].append([])
for i in range(nt):
t = o.getTriangleUVW(i, cuv)
# float u1, float v1, float w1, float u2, float v2, float w2, float u3, float v3, float w3
r['trianglesUVW'][cuv].append(t)
# base and pivot to matrix
b, p = self._base_and_pivot(o)
r['base'] = b
r['pivot'] = p
# parent
p, _ = o.getParent()
if(not p.isNull()):
r['parent'] = p.getName()[0]
return r
def _mxs_camera(self, c):
v = c.getValues()
v = {'name': v[0],
'nSteps': v[1],
'shutter': v[2],
'filmWidth': v[3],
'filmHeight': v[4],
'iso': v[5],
'pDiaphragmType': v[6],
'angle': v[7],
'nBlades': v[8],
'fps': v[9],
'xRes': v[10],
'yRes': v[11],
'pixelAspect': v[12],
'lensType': v[13], }
s = c.getStep(0)
o = s[0]
f = s[1]
u = s[2]
# skip weird cameras
flc = s[3]
co = s[0]
fp = s[1]
d = Cvector()
d.substract(fp, co)
fd = d.norm()
if(flc == 0.0 or fd == 0.0):
log("{}: impossible camera, skipping..".format(v['name']), 2, LogStyles.WARNING)
return None
r = {'name': v['name'],
'shutter': 1.0 / v['shutter'],
'iso': v['iso'],
'x_res': v['xRes'],
'y_res': v['yRes'],
'pixel_aspect': v['pixelAspect'],
'origin': (o.x(), o.y(), o.z()),
'focal_point': (f.x(), f.y(), f.z()),
'up': (u.x(), u.y(), u.z()),
'focal_length': self._uncorrect_focal_length(s) * 1000.0,
'f_stop': s[4],
'film_width': round(v['filmWidth'] * 1000.0, 3),
'film_height': round(v['filmHeight'] * 1000.0, 3),
'active': False,
'sensor_fit': None,
'shift_x': 0.0,
'shift_y': 0.0,
'zclip': False,
'zclip_near': 0.0,
'zclip_far': 1000000.0,
'type': 'CAMERA', }
if(r['film_width'] > r['film_height']):
r['sensor_fit'] = 'HORIZONTAL'
else:
r['sensor_fit'] = 'VERTICAL'
cp = c.getCutPlanes()
if(cp[2] is True):
r['zclip'] = True
r['zclip_near'] = cp[0]
r['zclip_far'] = cp[1]
sl = c.getShiftLens()
r['shift_x'] = sl[0]
r['shift_y'] = sl[1]
d = c.getDiaphragm()
r['diaphragm_type'] = d[0][0]
r['diaphragm_angle'] = d[1]
r['diaphragm_blades'] = d[2]
return r
def _base_and_pivot(self, o):
b, p, _ = o.getBaseAndPivot()
o = b.origin
x = b.xAxis
y = b.yAxis
z = b.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
def _uncorrect_focal_length(self, step):
flc = step[3]
o = step[0]
fp = step[1]
d = Cvector()
d.substract(fp, o)
fd = d.norm()
fluc = 1.0 / (1.0 / flc - 1 / fd)
return fluc
def _prepare(self):
s = self.mxs
self.object_names = self._mxs_get_objects_names()
def _is_emitter(self, o):
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(not is_mesh and not is_instance):
return False
if(is_mesh):
nt, _ = o.getTrianglesCount()
mats = []
for i in range(nt):
m, _ = o.getTriangleMaterial(i)
if(not m.isNull()):
if(m not in mats):
mats.append(m)
for m in mats:
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
e = l.getEmitter()
if(not e.isNull()):
return True
if(is_instance):
m, _ = o.getMaterial()
if(not m.isNull()):
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
e = l.getEmitter()
if(not e.isNull()):
return True
return False
def _global_transform(self, o):
cb, _ = o.getWorldTransform()
o = cb.origin
x = cb.xAxis
y = cb.yAxis
z = cb.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
def objects(self, only_emitters=False):
if(only_emitters):
s = self.mxs
data = []
log("converting emitters..", 2)
for n in self.object_names:
d = None
o = s.getObject(n)
if(self._is_emitter(o)):
d = self._mxs_object(o)
if(d is not None):
b, p = self._global_transform(o)
d['base'] = b
d['pivot'] = p
d['parent'] = None
data.append(d)
else:
s = self.mxs
data = []
log("converting empties, meshes and instances..", 2)
for n in self.object_names:
d = None
o = s.getObject(n)
d = self._mxs_object(o)
if(d is not None):
data.append(d)
return data
def cameras(self):
s = self.mxs
data = []
log("converting cameras..", 2)
nms = s.getCameraNames()
cams = []
if(type(nms) == list):
for n in nms:
cams.append(s.getCamera(n))
for c in cams:
d = self._mxs_camera(c)
if(d is not None):
data.append(d)
# set active camera
if(len(cams) > 1):
# if there is just one camera, this behaves badly.
# use it just when there are two or more cameras..
active_cam = s.getActiveCamera()
active_cam_name = active_cam.getName()
for o in data:
if(o['type'] == 'CAMERA'):
if(o['name'] == active_cam_name):
o['active'] = True
else:
for o in data:
if(o['type'] == 'CAMERA'):
o['active'] = True
return data
def sun(self):
s = self.mxs
data = []
env = s.getEnvironment()
if(env.getSunProperties()[0] == 1):
log("converting sun..", 2)
if(env.getSunPositionType() == 2):
v, _ = env.getSunDirection()
else:
v, _ = env.getSunDirectionUsedForRendering()
d = {'name': "The Sun",
'xyz': (v.x(), v.y(), v.z()),
'type': 'SUN', }
data.append(d)
return data
class MXSSceneWrapper():
def __init__(self, load_extensions=True, ):
if(__name__ != "__main__"):
if(platform.system() == 'Darwin'):
raise ImportError("No pymaxwell directly in Blender on Mac OS X..")
log(self.__class__.__name__, 1, LogStyles.MESSAGE, prefix="* ", )
log("creating new scene..", 2, prefix="* ", )
self.mxs = Cmaxwell(mwcallback)
pid = utils.get_plugin_id()
if(pid != ""):
# write here directly, even though it is also part of scene data, but api change just for this is pointless..
self.mxs.setPluginID(pid)
self.mgr = None
if(load_extensions):
log("loadinf extensions..", 2, prefix="* ", )
self.mgr = CextensionManager.instance()
self.mgr.loadAllExtensions()
class MXMReader():
def __init__(self, mxm_path, ):
def texture(t):
if(t is None):
return None
if(t.isEmpty()):
return None
d = {'path': t.getPath(),
'use_global_map': t.useGlobalMap,
'channel': t.uvwChannelID,
'brightness': t.brightness * 100,
'contrast': t.contrast * 100,
'saturation': t.saturation * 100,
'hue': t.hue * 180,
'rotation': t.rotation,
'invert': t.invert,
'interpolation': t.typeInterpolation,
'use_alpha': t.useAlpha,
'repeat': [t.scale.x(), t.scale.y()],
'mirror': [t.uIsMirrored, t.vIsMirrored],
'offset': [t.offset.x(), t.offset.y()],
'clamp': [int(t.clampMin * 255), int(t.clampMax * 255)],
'tiling_units': t.useAbsoluteUnits,
'tiling_method': [t.uIsTiled, t.vIsTiled],
'normal_mapping_flip_red': t.normalMappingFlipRed,
'normal_mapping_flip_green': t.normalMappingFlipGreen,
'normal_mapping_full_range_blue': t.normalMappingFullRangeBlue, }
# t.cosA
# t.doGammaCorrection
# t.sinA
# t.theTextureExtensions
d['procedural'] = []
if(t.hasProceduralTextures()):
n = t.getProceduralTexturesCount()
for i in range(n):
pd = extension(None, None, t, i)
d['procedural'].append(pd)
return d
def material(s, m):
data = {}
if(m.isNull()):
return data
# defaults
bsdfd = {'visible': True, 'weight': 100.0, 'weight_map_enabled': False, 'weight_map': None, 'ior': 0, 'complex_ior': "",
'reflectance_0': (0.6, 0.6, 0.6, ), 'reflectance_0_map_enabled': False, 'reflectance_0_map': None,
'reflectance_90': (1.0, 1.0, 1.0, ), 'reflectance_90_map_enabled': False, 'reflectance_90_map': None,
'transmittance': (0.0, 0.0, 0.0), 'transmittance_map_enabled': False, 'transmittance_map': None,
'attenuation': 1.0, 'attenuation_units': 0, 'nd': 3.0, 'force_fresnel': False, 'k': 0.0, 'abbe': 1.0,
'r2_enabled': False, 'r2_falloff_angle': 75.0, 'r2_influence': 0.0,
'roughness': 100.0, 'roughness_map_enabled': False, 'roughness_map': None,
'bump': 30.0, 'bump_map_enabled': False, 'bump_map': None, 'bump_map_use_normal': False, 'bump_normal': 100.0,
'anisotropy': 0.0, 'anisotropy_map_enabled': False, 'anisotropy_map': None,
'anisotropy_angle': 0.0, 'anisotropy_angle_map_enabled': False, 'anisotropy_angle_map': None,
'scattering': (0.5, 0.5, 0.5, ), 'coef': 0.0, 'asymmetry': 0.0,
'single_sided': False, 'single_sided_value': 1.0, 'single_sided_map_enabled': False, 'single_sided_map': None, 'single_sided_min': 0.001, 'single_sided_max': 10.0, }
coatingd = {'enabled': False,
'thickness': 500.0, 'thickness_map_enabled': False, 'thickness_map': None, 'thickness_map_min': 100.0, 'thickness_map_max': 1000.0,
'ior': 0, 'complex_ior': "",
'reflectance_0': (0.6, 0.6, 0.6, ), 'reflectance_0_map_enabled': False, 'reflectance_0_map': None,
'reflectance_90': (1.0, 1.0, 1.0, ), 'reflectance_90_map_enabled': False, 'reflectance_90_map': None,
'nd': 3.0, 'force_fresnel': False, 'k': 0.0, 'r2_enabled': False, 'r2_falloff_angle': 75.0, }
displacementd = {'enabled': False, 'map': None, 'type': 1, 'subdivision': 5, 'adaptive': False, 'subdivision_method': 0,
'offset': 0.5, 'smoothing': True, 'uv_interpolation': 2, 'height': 2.0, 'height_units': 0,
'v3d_preset': 0, 'v3d_transform': 0, 'v3d_rgb_mapping': 0, 'v3d_scale': (1.0, 1.0, 1.0), }
emitterd = {'enabled': False, 'type': 0, 'ies_data': "", 'ies_intensity': 1.0,
'spot_map_enabled': False, 'spot_map': "", 'spot_cone_angle': 45.0, 'spot_falloff_angle': 10.0, 'spot_falloff_type': 0, 'spot_blur': 1.0,
'emission': 0, 'color': (1.0, 1.0, 1.0, ), 'color_black_body_enabled': False, 'color_black_body': 6500.0,
'luminance': 0, 'luminance_power': 40.0, 'luminance_efficacy': 17.6, 'luminance_output': 100.0, 'temperature_value': 6500.0,
'hdr_map': None, 'hdr_intensity': 1.0, }
layerd = {'visible': True, 'opacity': 100.0, 'opacity_map_enabled': False, 'opacity_map': None, 'blending': 0, }
globald = {'override_map': None, 'bump': 30.0, 'bump_map_enabled': False, 'bump_map': None, 'bump_map_use_normal': False, 'bump_normal': 100.0,
'dispersion': False, 'shadow': False, 'matte': False, 'priority': 0, 'id': (0.0, 0.0, 0.0), 'active_display_map': None, }
# structure
structure = []
nl, _ = m.getNumLayers()
for i in range(nl):
l = m.getLayer(i)
ln, _ = l.getName()
nb, _ = l.getNumBSDFs()
bs = []
for j in range(nb):
b = l.getBSDF(j)
bn = b.getName()
bs.append([bn, b])
ls = [ln, l, bs]
structure.append(ls)
# default data
data['global_props'] = globald.copy()
data['displacement'] = displacementd.copy()
data['layers'] = []
for i, sl in enumerate(structure):
bsdfs = []
for j, sb in enumerate(sl[2]):
bsdfs.append({'name': sb[0],
'bsdf_props': bsdfd.copy(),
'coating': coatingd.copy(), })
layer = {'name': sl[0],
'layer_props': layerd.copy(),
'bsdfs': bsdfs,
'emitter': emitterd.copy(), }
data['layers'].append(layer)
# custom data
def global_props(m, d):
t, _ = m.getGlobalMap()
d['override_map'] = texture(t)
a, _ = m.getAttribute('bump')
if(a.activeType == MAP_TYPE_BITMAP):
d['bump_map_enabled'] = True
d['bump_map'] = texture(a.textureMap)
d['bump_map_use_normal'] = m.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'] = a.value
else:
d['bump'] = a.value
else:
d['bump_map_enabled'] = False
d['bump_map'] = None
d['bump_map_use_normal'] = m.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'] = a.value
else:
d['bump'] = a.value
d['dispersion'] = m.getDispersion()[0]
d['shadow'] = m.getMatteShadow()[0]
d['matte'] = m.getMatte()[0]
d['priority'] = m.getNestedPriority()[0]
c, _ = m.getColorID()
d['id'] = [c.r(), c.g(), c.b()]
return d
data['global_props'] = global_props(m, data['global_props'])
def displacement(m, d):
if(not m.isDisplacementEnabled()[0]):
return d
d['enabled'] = True
t, _ = m.getDisplacementMap()
d['map'] = texture(t)
displacementType, subdivisionLevel, smoothness, offset, subdivisionType, interpolationUvType, minLOD, maxLOD, _ = m.getDisplacementCommonParameters()
height, absoluteHeight, adaptive, _ = m.getHeightMapDisplacementParameters()
scale, transformType, mapping, preset, _ = m.getVectorDisplacementParameters()
d['type'] = displacementType
d['subdivision'] = subdivisionLevel
d['adaptive'] = adaptive
d['subdivision_method'] = subdivisionType
d['offset'] = offset
d['smoothing'] = bool(smoothness)
d['uv_interpolation'] = interpolationUvType
d['height'] = height
d['height_units'] = absoluteHeight
d['v3d_preset'] = preset
d['v3d_transform'] = transformType
d['v3d_rgb_mapping'] = mapping
d['v3d_scale'] = (scale.x(), scale.y(), scale.z(), )
return d
data['displacement'] = displacement(m, data['displacement'])
def cattribute_rgb(a):
if(a.activeType == MAP_TYPE_BITMAP):
c = (a.rgb.r(), a.rgb.g(), a.rgb.b())
e = True
m = texture(a.textureMap)
else:
c = (a.rgb.r(), a.rgb.g(), a.rgb.b())
e = False
m = None
return c, e, m
def cattribute_value(a):
if(a.activeType == MAP_TYPE_BITMAP):
v = a.value
e = True
m = texture(a.textureMap)
else:
v = a.value
e = False
m = None
return v, e, m
def layer_props(l, d):
d['visible'] = l.getEnabled()[0]
d['blending'] = l.getStackedBlendingMode()[0]
a, _ = l.getAttribute('weight')
if(a.activeType == MAP_TYPE_BITMAP):
d['opacity'] = a.value
d['opacity_map_enabled'] = True
d['opacity_map'] = texture(a.textureMap)
else:
d['opacity'] = a.value
d['opacity_map_enabled'] = False
d['opacity_map'] = None
return d
def emitter(l, d):
e = l.getEmitter()
if(e.isNull()):
d['enabled'] = False
return d
d['enabled'] = True
d['type'] = e.getLobeType()[0]
d['ies_data'] = e.getLobeIES()
d['ies_intensity'] = e.getIESLobeIntensity()[0]
t, _ = e.getLobeImageProjectedMap()
d['spot_map_enabled'] = (not t.isEmpty())
d['spot_map'] = texture(t)
d['spot_cone_angle'] = e.getSpotConeAngle()[0]
d['spot_falloff_angle'] = e.getSpotFallOffAngle()[0]
d['spot_falloff_type'] = e.getSpotFallOffType()[0]
d['spot_blur'] = e.getSpotBlur()[0]
d['emission'] = e.getActiveEmissionType()[0]
ep, _ = e.getPair()
colorType, units, _ = e.getActivePair()
d['color'] = (ep.rgb.r(), ep.rgb.g(), ep.rgb.b(), )
d['color_black_body'] = ep.temperature
d['luminance'] = units
if(units == EMISSION_UNITS_WATTS_AND_LUMINOUS_EFFICACY):
d['luminance_power'] = ep.watts
d['luminance_efficacy'] = ep.luminousEfficacy
elif(units == EMISSION_UNITS_LUMINOUS_POWER):
d['luminance_output'] = ep.luminousPower
elif(units == EMISSION_UNITS_ILLUMINANCE):
d['luminance_output'] = ep.illuminance
elif(units == EMISSION_UNITS_LUMINOUS_INTENSITY):
d['luminance_output'] = ep.luminousIntensity
elif(units == EMISSION_UNITS_LUMINANCE):
d['luminance_output'] = ep.luminance
if(colorType == EMISSION_COLOR_TEMPERATURE):
d['color_black_body_enabled'] = True
d['temperature_value'] = e.getTemperature()[0]
a, _ = e.getMXI()
if(a.activeType == MAP_TYPE_BITMAP):
d['hdr_map'] = texture(a.textureMap)
d['hdr_intensity'] = a.value
else:
d['hdr_map'] = None
d['hdr_intensity'] = a.value
return d
def bsdf_props(b, d):
d['visible'] = b.getState()[0]
a, _ = b.getWeight()
if(a.activeType == MAP_TYPE_BITMAP):
d['weight_map_enabled'] = True
d['weight'] = a.value
d['weight_map'] = texture(a.textureMap)
else:
d['weight_map_enabled'] = False
d['weight'] = a.value
d['weight_map'] = None
r = b.getReflectance()
d['ior'] = r.getActiveIorMode()[0]
d['complex_ior'] = r.getComplexIor()
d['reflectance_0'], d['reflectance_0_map_enabled'], d['reflectance_0_map'] = cattribute_rgb(r.getAttribute('color')[0])
d['reflectance_90'], d['reflectance_90_map_enabled'], d['reflectance_90_map'] = cattribute_rgb(r.getAttribute('color.tangential')[0])
d['transmittance'], d['transmittance_map_enabled'], d['transmittance_map'] = cattribute_rgb(r.getAttribute('transmittance.color')[0])
d['attenuation_units'], d['attenuation'] = r.getAbsorptionDistance()
d['nd'], d['abbe'], _ = r.getIOR()
d['force_fresnel'], _ = r.getForceFresnel()
d['k'], _ = r.getConductor()
d['r2_falloff_angle'], d['r2_influence'], d['r2_enabled'], _ = r.getFresnelCustom()
d['roughness'], d['roughness_map_enabled'], d['roughness_map'] = cattribute_value(b.getAttribute('roughness')[0])
d['bump_map_use_normal'] = b.getNormalMapState()[0]
if(d['bump_map_use_normal']):
d['bump_normal'], d['bump_map_enabled'], d['bump_map'] = cattribute_value(b.getAttribute('bump')[0])
else:
d['bump'], d['bump_map_enabled'], d['bump_map'] = cattribute_value(b.getAttribute('bump')[0])
d['anisotropy'], d['anisotropy_map_enabled'], d['anisotropy_map'] = cattribute_value(b.getAttribute('anisotropy')[0])
d['anisotropy_angle'], d['anisotropy_angle_map_enabled'], d['anisotropy_angle_map'] = cattribute_value(b.getAttribute('angle')[0])
a, _ = r.getAttribute('scattering')
d['scattering'] = (a.rgb.r(), a.rgb.g(), a.rgb.b(), )
d['coef'], d['asymmetry'], d['single_sided'], _ = r.getScatteringParameters()
d['single_sided_value'], d['single_sided_map_enabled'], d['single_sided_map'] = cattribute_value(r.getScatteringThickness()[0])
d['single_sided_min'], d['single_sided_max'], _ = r.getScatteringThicknessRange()
return d
def coating(b, d):
nc, _ = b.getNumCoatings()
if(nc > 0):
c = b.getCoating(0)
else:
d['enabled'] = False
return d
d['enabled'] = True
d['thickness'], d['thickness_map_enabled'], d['thickness_map'] = cattribute_value(c.getThickness()[0])
d['thickness_map_min'], d['thickness_map_max'], _ = c.getThicknessRange()
r = c.getReflectance()
d['ior'] = r.getActiveIorMode()[0]
d['complex_ior'] = r.getComplexIor()
d['reflectance_0'], d['reflectance_0_map_enabled'], d['reflectance_0_map'] = cattribute_rgb(r.getAttribute('color')[0])
d['reflectance_90'], d['reflectance_90_map_enabled'], d['reflectance_90_map'] = cattribute_rgb(r.getAttribute('color.tangential')[0])
d['nd'], _, _ = r.getIOR()
d['force_fresnel'], _ = r.getForceFresnel()
d['k'], _ = r.getConductor()
d['r2_falloff_angle'], _, d['r2_enabled'], _ = r.getFresnelCustom()
return d
for i, sl in enumerate(structure):
l = sl[1]
data['layers'][i]['layer_props'] = layer_props(l, data['layers'][i]['layer_props'])
data['layers'][i]['emitter'] = emitter(l, data['layers'][i]['emitter'])
for j, bs in enumerate(sl[2]):
b = bs[1]
data['layers'][i]['bsdfs'][j]['bsdf_props'] = bsdf_props(b, data['layers'][i]['bsdfs'][j]['bsdf_props'])
data['layers'][i]['bsdfs'][j]['coating'] = coating(b, data['layers'][i]['bsdfs'][j]['coating'])
return data
def extension(s, m, pt=None, pi=None, ):
def texture(t):
if(t is None):
return None
if(t.isEmpty()):
return None
d = {'path': t.getPath(),
'use_global_map': t.useGlobalMap,
'channel': t.uvwChannelID,
'brightness': t.brightness * 100,
'contrast': t.contrast * 100,
'saturation': t.saturation * 100,
'hue': t.hue * 180,
'rotation': t.rotation,
'invert': t.invert,
'interpolation': t.typeInterpolation,
'use_alpha': t.useAlpha,
'repeat': [t.scale.x(), t.scale.y()],
'mirror': [t.uIsMirrored, t.vIsMirrored],
'offset': [t.offset.x(), t.offset.y()],
'clamp': [int(t.clampMin * 255), int(t.clampMax * 255)],
'tiling_units': t.useAbsoluteUnits,
'tiling_method': [t.uIsTiled, t.vIsTiled],
'normal_mapping_flip_red': t.normalMappingFlipRed,
'normal_mapping_flip_green': t.normalMappingFlipGreen,
'normal_mapping_full_range_blue': t.normalMappingFullRangeBlue, }
return d
def mxparamlistarray(v):
return None
def rgb(v):
return (v.r(), v.g(), v.b())
if(pt is not None and pi is not None):
params = pt.getProceduralTexture(pi)
else:
params, _ = m.getMaterialModifierExtensionParams()
types = [(0, 'UCHAR', params.getByte, ),
(1, 'UINT', params.getUInt, ),
(2, 'INT', params.getInt, ),
(3, 'FLOAT', params.getFloat, ),
(4, 'DOUBLE', params.getDouble, ),
(5, 'STRING', params.getString, ),
(6, 'FLOATARRAY', params.getFloatArray, ),
(7, 'DOUBLEARRAY', params.getDoubleArray, ),
(8, 'BYTEARRAY', params.getByteArray, ),
(9, 'INTARRAY', params.getIntArray, ),
(10, 'MXPARAMLIST', params.getTextureMap, ),
(11, 'MXPARAMLISTARRAY', mxparamlistarray, ),
(12, 'RGB', params.getRgb, ), ]
d = {}
for i in range(params.getNumItems()):
name, data, _, _, data_type, _, data_count, _ = params.getByIndex(i)
_, _, f = types[data_type]
k = name
if(data_type not in [10, 11, 12]):
v, _ = f(name)
else:
if(data_type == 10):
v = texture(f(name)[0])
elif(data_type == 11):
pass
elif(data_type == 12):
v = rgb(f(name)[0])
d[k] = v
return d
log("{0} {1} {0}".format("-" * 30, self.__class__.__name__), 0, LogStyles.MESSAGE, prefix="", )
log("path: {}".format(mxm_path), 1, LogStyles.MESSAGE)
s = Cmaxwell(mwcallback)
m = s.readMaterial(mxm_path)
self.data = material(s, m)
if(m.hasMaterialModifier()):
self.data['extension'] = extension(s, m)
class MXSReferenceReader():
def __init__(self, path, ):
log("maxwell meshes to data:", 1)
log("reading mxs scene from: {0}".format(path), 2)
scene = Cmaxwell(mwcallback)
ok = scene.readMXS(path)
if(not ok):
raise RuntimeError("Error during reading scene {}".format(path))
nms = self.get_objects_names(scene)
data = []
log("reading meshes..", 2)
for n in nms:
d = None
o = scene.getObject(n)
if(not o.isNull()):
if(o.isMesh()[0] == 1 and o.isInstance()[0] == 0):
d = self.object(o)
if(d is not None):
data.append(d)
log("reading instances..", 2)
for n in nms:
d = None
o = scene.getObject(n)
if(not o.isNull()):
if(o.isMesh()[0] == 0 and o.isInstance()[0] == 1):
io = o.getInstanced()
ion = io.getName()[0]
for a in data:
if(a['name'] == ion):
b, p = self.global_transform(o)
d = {'name': o.getName()[0],
'base': b,
'pivot': p,
'vertices': a['vertices'][:], }
if(d is not None):
data.append(d)
self.data = data
log("done.", 2)
def get_objects_names(self, scene):
it = CmaxwellObjectIterator()
o = it.first(scene)
l = []
while not o.isNull():
name, _ = o.getName()
l.append(name)
o = it.next()
return l
def object(self, o):
is_instance, _ = o.isInstance()
is_mesh, _ = o.isMesh()
if(is_instance == 0 and is_mesh == 0):
return None
def get_verts(o):
vs = []
nv, _ = o.getVerticesCount()
for i in range(nv):
v, _ = o.getVertex(i, 0)
vs.append((v.x(), v.y(), v.z()))
return vs
b, p = self.global_transform(o)
r = {'name': o.getName()[0],
'base': b,
'pivot': p,
'vertices': [], }
if(is_instance == 1):
io = o.getInstanced()
r['vertices'] = get_verts(io)
else:
r['vertices'] = get_verts(o)
return r
def global_transform(self, o):
cb, _ = o.getWorldTransform()
o = cb.origin
x = cb.xAxis
y = cb.yAxis
z = cb.zAxis
rb = [[o.x(), o.y(), o.z()], [x.x(), x.y(), x.z()], [y.x(), y.y(), y.z()], [z.x(), z.y(), z.z()]]
rp = ((0.0, 0.0, 0.0), (1.0, 0.0, 0.0), (0.0, 1.0, 0.0), (0.0, 0.0, 1.0), )
return rb, rp
| gpl-2.0 |
albertomurillo/ansible | lib/ansible/modules/network/aci/aci_firmware_source.py | 27 | 7594 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Dag Wieers (dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_firmware_source
short_description: Manage firmware image sources (firmware:OSource)
description:
- Manage firmware image sources on Cisco ACI fabrics.
version_added: '2.5'
options:
source:
description:
- The identifying name for the outside source of images, such as an HTTP or SCP server.
type: str
required: yes
aliases: [ name, source_name ]
polling_interval:
description:
- Polling interval in minutes.
type: int
url_protocol:
description:
- The Firmware download protocol.
type: str
choices: [ http, local, scp, usbkey ]
default: scp
aliases: [ url_proto ]
url:
description:
The firmware URL for the image(s) on the source.
type: str
url_password:
description:
The Firmware password or key string.
type: str
url_username:
description:
The username for the source.
type: str
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(firmware:OSource).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Dag Wieers (@dagwieers)
'''
EXAMPLES = r'''
- name: Add firmware source
aci_firmware_source:
host: apic
username: admin
password: SomeSecretPassword
source: aci-msft-pkg-3.1.1i.zip
url: foo.bar.cisco.com/download/cisco/aci/aci-msft-pkg-3.1.1i.zip
url_protocol: http
state: present
delegate_to: localhost
- name: Remove firmware source
aci_firmware_source:
host: apic
username: admin
password: SomeSecretPassword
source: aci-msft-pkg-3.1.1i.zip
state: absent
delegate_to: localhost
- name: Query a specific firmware source
aci_firmware_source:
host: apic
username: admin
password: SomeSecretPassword
source: aci-msft-pkg-3.1.1i.zip
state: query
delegate_to: localhost
register: query_result
- name: Query all firmware sources
aci_firmware_source:
host: apic
username: admin
password: SomeSecretPassword
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
source=dict(type='str', aliases=['name', 'source_name']), # Not required for querying all objects
polling_interval=dict(type='int'),
url=dict(type='str'),
url_username=dict(type='str'),
url_password=dict(type='str', no_log=True),
url_protocol=dict(type='str', default='scp', choices=['http', 'local', 'scp', 'usbkey'], aliases=['url_proto']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['source']],
['state', 'present', ['url_protocol', 'source', 'url']],
],
)
polling_interval = module.params['polling_interval']
url_protocol = module.params['url_protocol']
state = module.params['state']
source = module.params['source']
url = module.params['url']
url_password = module.params['url_password']
url_username = module.params['url_username']
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='firmwareOSource',
aci_rn='fabric/fwrepop',
module_object=source,
target_filter={'name': source},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='firmwareOSource',
class_config=dict(
name=source,
url=url,
password=url_password,
pollingInterval=polling_interval,
proto=url_protocol,
user=url_username,
),
)
aci.get_diff(aci_class='firmwareOSource')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
mistercrunch/panoramix | superset/views/base_api.py | 2 | 21953 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import functools
import logging
from typing import Any, Callable, cast, Dict, List, Optional, Set, Tuple, Type, Union
from apispec import APISpec
from apispec.exceptions import DuplicateComponentNameError
from flask import Blueprint, g, Response
from flask_appbuilder import AppBuilder, Model, ModelRestApi
from flask_appbuilder.api import expose, protect, rison, safe
from flask_appbuilder.models.filters import BaseFilter, Filters
from flask_appbuilder.models.sqla.filters import FilterStartsWith
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_babel import lazy_gettext as _
from marshmallow import fields, Schema
from sqlalchemy import and_, distinct, func
from sqlalchemy.orm.query import Query
from superset.extensions import db, event_logger, security_manager
from superset.models.core import FavStar
from superset.models.dashboard import Dashboard
from superset.models.slice import Slice
from superset.schemas import error_payload_content
from superset.sql_lab import Query as SqllabQuery
from superset.stats_logger import BaseStatsLogger
from superset.typing import FlaskResponse
from superset.utils.core import time_function
logger = logging.getLogger(__name__)
get_related_schema = {
"type": "object",
"properties": {
"page_size": {"type": "integer"},
"page": {"type": "integer"},
"include_ids": {"type": "array", "items": {"type": "integer"}},
"filter": {"type": "string"},
},
}
class RelatedResultResponseSchema(Schema):
value = fields.Integer(description="The related item identifier")
text = fields.String(description="The related item string representation")
class RelatedResponseSchema(Schema):
count = fields.Integer(description="The total number of related values")
result = fields.List(fields.Nested(RelatedResultResponseSchema))
class DistinctResultResponseSchema(Schema):
text = fields.String(description="The distinct item")
class DistincResponseSchema(Schema):
count = fields.Integer(description="The total number of distinct values")
result = fields.List(fields.Nested(DistinctResultResponseSchema))
def statsd_metrics(f: Callable[..., Any]) -> Callable[..., Any]:
"""
Handle sending all statsd metrics from the REST API
"""
def wraps(self: "BaseSupersetModelRestApi", *args: Any, **kwargs: Any) -> Response:
try:
duration, response = time_function(f, self, *args, **kwargs)
except Exception as ex:
self.incr_stats("error", f.__name__)
raise ex
self.send_stats_metrics(response, f.__name__, duration)
return response
return functools.update_wrapper(wraps, f)
class RelatedFieldFilter:
# data class to specify what filter to use on a /related endpoint
# pylint: disable=too-few-public-methods
def __init__(self, field_name: str, filter_class: Type[BaseFilter]):
self.field_name = field_name
self.filter_class = filter_class
class BaseFavoriteFilter(BaseFilter): # pylint: disable=too-few-public-methods
"""
Base Custom filter for the GET list that filters all dashboards, slices
that a user has favored or not
"""
name = _("Is favorite")
arg_name = ""
class_name = ""
""" The FavStar class_name to user """
model: Type[Union[Dashboard, Slice, SqllabQuery]] = Dashboard
""" The SQLAlchemy model """
def apply(self, query: Query, value: Any) -> Query:
# If anonymous user filter nothing
if security_manager.current_user is None:
return query
users_favorite_query = db.session.query(FavStar.obj_id).filter(
and_(
FavStar.user_id == g.user.get_id(),
FavStar.class_name == self.class_name,
)
)
if value:
return query.filter(and_(self.model.id.in_(users_favorite_query)))
return query.filter(and_(~self.model.id.in_(users_favorite_query)))
class BaseSupersetModelRestApi(ModelRestApi):
"""
Extends FAB's ModelResApi to implement specific superset generic functionality
"""
csrf_exempt = False
method_permission_name = {
"bulk_delete": "delete",
"data": "list",
"data_from_cache": "list",
"delete": "delete",
"distinct": "list",
"export": "mulexport",
"import_": "add",
"get": "show",
"get_list": "list",
"info": "list",
"post": "add",
"put": "edit",
"refresh": "edit",
"related": "list",
"related_objects": "list",
"schemas": "list",
"select_star": "list",
"table_metadata": "list",
"test_connection": "post",
"thumbnail": "list",
"viz_types": "list",
}
order_rel_fields: Dict[str, Tuple[str, str]] = {}
"""
Impose ordering on related fields query::
order_rel_fields = {
"<RELATED_FIELD>": ("<RELATED_FIELD_FIELD>", "<asc|desc>"),
...
}
""" # pylint: disable=pointless-string-statement
related_field_filters: Dict[str, Union[RelatedFieldFilter, str]] = {}
"""
Declare the filters for related fields::
related_fields = {
"<RELATED_FIELD>": <RelatedFieldFilter>)
}
""" # pylint: disable=pointless-string-statement
filter_rel_fields: Dict[str, BaseFilter] = {}
"""
Declare the related field base filter::
filter_rel_fields_field = {
"<RELATED_FIELD>": "<FILTER>")
}
""" # pylint: disable=pointless-string-statement
allowed_rel_fields: Set[str] = set()
"""
Declare a set of allowed related fields that the `related` endpoint supports
""" # pylint: disable=pointless-string-statement
text_field_rel_fields: Dict[str, str] = {}
"""
Declare an alternative for the human readable representation of the Model object::
text_field_rel_fields = {
"<RELATED_FIELD>": "<RELATED_OBJECT_FIELD>"
}
""" # pylint: disable=pointless-string-statement
allowed_distinct_fields: Set[str] = set()
openapi_spec_component_schemas: Tuple[Type[Schema], ...] = tuple()
"""
Add extra schemas to the OpenAPI component schemas section
""" # pylint: disable=pointless-string-statement
add_columns: List[str]
edit_columns: List[str]
list_columns: List[str]
show_columns: List[str]
responses = {
"400": {"description": "Bad request", "content": error_payload_content},
"401": {"description": "Unauthorized", "content": error_payload_content},
"403": {"description": "Forbidden", "content": error_payload_content},
"404": {"description": "Not found", "content": error_payload_content},
"422": {
"description": "Could not process entity",
"content": error_payload_content,
},
"500": {"description": "Fatal error", "content": error_payload_content},
}
def __init__(self) -> None:
# Setup statsd
self.stats_logger = BaseStatsLogger()
# Add base API spec base query parameter schemas
if self.apispec_parameter_schemas is None: # type: ignore
self.apispec_parameter_schemas = {}
self.apispec_parameter_schemas["get_related_schema"] = get_related_schema
if self.openapi_spec_component_schemas is None:
self.openapi_spec_component_schemas = ()
self.openapi_spec_component_schemas = self.openapi_spec_component_schemas + (
RelatedResponseSchema,
DistincResponseSchema,
)
super().__init__()
def add_apispec_components(self, api_spec: APISpec) -> None:
"""
Adds extra OpenApi schema spec components, these are declared
on the `openapi_spec_component_schemas` class property
"""
for schema in self.openapi_spec_component_schemas:
try:
api_spec.components.schema(
schema.__name__, schema=schema,
)
except DuplicateComponentNameError:
pass
super().add_apispec_components(api_spec)
def create_blueprint(
self, appbuilder: AppBuilder, *args: Any, **kwargs: Any
) -> Blueprint:
self.stats_logger = self.appbuilder.get_app.config["STATS_LOGGER"]
return super().create_blueprint(appbuilder, *args, **kwargs)
def _init_properties(self) -> None:
model_id = self.datamodel.get_pk_name()
if self.list_columns is None and not self.list_model_schema:
self.list_columns = [model_id]
if self.show_columns is None and not self.show_model_schema:
self.show_columns = [model_id]
if self.edit_columns is None and not self.edit_model_schema:
self.edit_columns = [model_id]
if self.add_columns is None and not self.add_model_schema:
self.add_columns = [model_id]
super()._init_properties()
def _get_related_filter(
self, datamodel: SQLAInterface, column_name: str, value: str
) -> Filters:
filter_field = self.related_field_filters.get(column_name)
if isinstance(filter_field, str):
filter_field = RelatedFieldFilter(cast(str, filter_field), FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = datamodel.get_filters(search_columns)
base_filters = self.filter_rel_fields.get(column_name)
if base_filters:
filters.add_filter_list(base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_distinct_filter(self, column_name: str, value: str) -> Filters:
filter_field = RelatedFieldFilter(column_name, FilterStartsWith)
filter_field = cast(RelatedFieldFilter, filter_field)
search_columns = [filter_field.field_name] if filter_field else None
filters = self.datamodel.get_filters(search_columns)
filters.add_filter_list(self.base_filters)
if value and filter_field:
filters.add_filter(
filter_field.field_name, filter_field.filter_class, value
)
return filters
def _get_text_for_model(self, model: Model, column_name: str) -> str:
if column_name in self.text_field_rel_fields:
model_column_name = self.text_field_rel_fields.get(column_name)
if model_column_name:
return getattr(model, model_column_name)
return str(model)
def _get_result_from_rows(
self, datamodel: SQLAInterface, rows: List[Model], column_name: str
) -> List[Dict[str, Any]]:
return [
{
"value": datamodel.get_pk_value(row),
"text": self._get_text_for_model(row, column_name),
}
for row in rows
]
def _add_extra_ids_to_result(
self,
datamodel: SQLAInterface,
column_name: str,
ids: List[int],
result: List[Dict[str, Any]],
) -> None:
if ids:
# Filter out already present values on the result
values = [row["value"] for row in result]
ids = [id_ for id_ in ids if id_ not in values]
pk_col = datamodel.get_pk()
# Fetch requested values from ids
extra_rows = db.session.query(datamodel.obj).filter(pk_col.in_(ids)).all()
result += self._get_result_from_rows(datamodel, extra_rows, column_name)
def incr_stats(self, action: str, func_name: str) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
"""
self.stats_logger.incr(f"{self.__class__.__name__}.{func_name}.{action}")
def timing_stats(self, action: str, func_name: str, value: float) -> None:
"""
Proxy function for statsd.incr to impose a key structure for REST API's
:param action: String with an action name eg: error, success
:param func_name: The function name
:param value: A float with the time it took for the endpoint to execute
"""
self.stats_logger.timing(
f"{self.__class__.__name__}.{func_name}.{action}", value
)
def send_stats_metrics(
self, response: Response, key: str, time_delta: Optional[float] = None
) -> None:
"""
Helper function to handle sending statsd metrics
:param response: flask response object, will evaluate if it was an error
:param key: The function name
:param time_delta: Optional time it took for the endpoint to execute
"""
if 200 <= response.status_code < 400:
self.incr_stats("success", key)
else:
self.incr_stats("error", key)
if time_delta:
self.timing_stats("time", key, time_delta)
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.info",
object_ref=False,
log_to_statsd=False,
)
def info_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB _info endpoint
"""
duration, response = time_function(super().info_headless, **kwargs)
self.send_stats_metrics(response, self.info.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get",
object_ref=False,
log_to_statsd=False,
)
def get_headless(self, pk: int, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET endpoint
"""
duration, response = time_function(super().get_headless, pk, **kwargs)
self.send_stats_metrics(response, self.get.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.get_list",
object_ref=False,
log_to_statsd=False,
)
def get_list_headless(self, **kwargs: Any) -> Response:
"""
Add statsd metrics to builtin FAB GET list endpoint
"""
duration, response = time_function(super().get_list_headless, **kwargs)
self.send_stats_metrics(response, self.get_list.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.post",
object_ref=False,
log_to_statsd=False,
)
def post_headless(self) -> Response:
"""
Add statsd metrics to builtin FAB POST endpoint
"""
duration, response = time_function(super().post_headless)
self.send_stats_metrics(response, self.post.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.put",
object_ref=False,
log_to_statsd=False,
)
def put_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB PUT endpoint
"""
duration, response = time_function(super().put_headless, pk)
self.send_stats_metrics(response, self.put.__name__, duration)
return response
@event_logger.log_this_with_context(
action=lambda self, *args, **kwargs: f"{self.__class__.__name__}.delete",
object_ref=False,
log_to_statsd=False,
)
def delete_headless(self, pk: int) -> Response:
"""
Add statsd metrics to builtin FAB DELETE endpoint
"""
duration, response = time_function(super().delete_headless, pk)
self.send_stats_metrics(response, self.delete.__name__, duration)
return response
@expose("/related/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def related(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get related fields data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Related column data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/RelatedResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_rel_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._handle_page_args(args)
try:
datamodel = self.datamodel.get_related_interface(column_name)
except KeyError:
return self.response_404()
page, page_size = self._sanitize_page_args(page, page_size)
# handle ordering
order_field = self.order_rel_fields.get(column_name)
if order_field:
order_column, order_direction = order_field
else:
order_column, order_direction = "", ""
# handle filters
filters = self._get_related_filter(datamodel, column_name, args.get("filter"))
# Make the query
_, rows = datamodel.query(
filters, order_column, order_direction, page=page, page_size=page_size
)
# produce response
result = self._get_result_from_rows(datamodel, rows, column_name)
# If ids are specified make sure we fetch and include them on the response
ids = args.get("include_ids")
self._add_extra_ids_to_result(datamodel, column_name, ids, result)
return self.response(200, count=len(result), result=result)
@expose("/distinct/<column_name>", methods=["GET"])
@protect()
@safe
@statsd_metrics
@rison(get_related_schema)
def distinct(self, column_name: str, **kwargs: Any) -> FlaskResponse:
"""Get distinct values from field data
---
get:
parameters:
- in: path
schema:
type: string
name: column_name
- in: query
name: q
content:
application/json:
schema:
$ref: '#/components/schemas/get_related_schema'
responses:
200:
description: Distinct field data
content:
application/json:
schema:
schema:
$ref: "#/components/schemas/DistincResponseSchema"
400:
$ref: '#/components/responses/400'
401:
$ref: '#/components/responses/401'
404:
$ref: '#/components/responses/404'
500:
$ref: '#/components/responses/500'
"""
if column_name not in self.allowed_distinct_fields:
self.incr_stats("error", self.related.__name__)
return self.response_404()
args = kwargs.get("rison", {})
# handle pagination
page, page_size = self._sanitize_page_args(*self._handle_page_args(args))
# Create generic base filters with added request filter
filters = self._get_distinct_filter(column_name, args.get("filter"))
# Make the query
query_count = self.appbuilder.get_session.query(
func.count(distinct(getattr(self.datamodel.obj, column_name)))
)
count = self.datamodel.apply_filters(query_count, filters).scalar()
if count == 0:
return self.response(200, count=count, result=[])
query = self.appbuilder.get_session.query(
distinct(getattr(self.datamodel.obj, column_name))
)
# Apply generic base filters with added request filter
query = self.datamodel.apply_filters(query, filters)
# Apply sort
query = self.datamodel.apply_order_by(query, column_name, "asc")
# Apply pagination
result = self.datamodel.apply_pagination(query, page, page_size).all()
# produce response
result = [
{"text": item[0], "value": item[0]}
for item in result
if item[0] is not None
]
return self.response(200, count=count, result=result)
| apache-2.0 |
kustodian/ansible | test/units/modules/network/f5/test_bigip_smtp.py | 22 | 4984 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_smtp import ApiParameters
from library.modules.bigip_smtp import ModuleParameters
from library.modules.bigip_smtp import ModuleManager
from library.modules.bigip_smtp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_smtp import ApiParameters
from ansible.modules.network.f5.bigip_smtp import ModuleParameters
from ansible.modules.network.f5.bigip_smtp import ModuleManager
from ansible.modules.network.f5.bigip_smtp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='[email protected]',
authentication=True,
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.smtp_server == '1.1.1.1'
assert p.smtp_server_port == 25
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == 'password'
assert p.local_host_name == 'smtp.mydomain.com'
assert p.encryption == 'tls'
assert p.update_password == 'always'
assert p.from_address == '[email protected]'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
def test_api_parameters(self):
p = ApiParameters(params=load_fixture('load_sys_smtp_server.json'))
assert p.name == 'foo'
assert p.smtp_server == 'mail.foo.bar'
assert p.smtp_server_port == 465
assert p.smtp_server_username == 'admin'
assert p.smtp_server_password == '$M$Ch$this-is-encrypted=='
assert p.local_host_name == 'mail-host.foo.bar'
assert p.encryption == 'ssl'
assert p.from_address == '[email protected]'
assert p.authentication_disabled is None
assert p.authentication_enabled is True
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_monitor(self, *args):
set_module_args(dict(
name='foo',
smtp_server='1.1.1.1',
smtp_server_port='25',
smtp_server_username='admin',
smtp_server_password='password',
local_host_name='smtp.mydomain.com',
encryption='tls',
update_password='always',
from_address='[email protected]',
authentication=True,
partition='Common',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['encryption'] == 'tls'
assert results['smtp_server'] == '1.1.1.1'
assert results['smtp_server_port'] == 25
assert results['local_host_name'] == 'smtp.mydomain.com'
assert results['authentication'] is True
assert results['from_address'] == '[email protected]'
assert 'smtp_server_username' not in results
assert 'smtp_server_password' not in results
| gpl-3.0 |
nzavagli/UnrealPy | UnrealPyEmbed/Development/Python/2015.08.07-Python2710-x64-Source-vs2015/Python27/Source/Jinja2-2.7.3/docs/jinjaext.py | 17 | 6953 | # -*- coding: utf-8 -*-
"""
Jinja Documentation Extensions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Support for automatically documenting filters and tests.
:copyright: Copyright 2008 by Armin Ronacher.
:license: BSD.
"""
import collections
import os
import re
import inspect
import jinja2
from itertools import islice
from types import BuiltinFunctionType
from docutils import nodes
from docutils.statemachine import ViewList
from sphinx.ext.autodoc import prepare_docstring
from sphinx.application import TemplateBridge
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic
from jinja2 import Environment, FileSystemLoader
from jinja2.utils import next
def parse_rst(state, content_offset, doc):
node = nodes.section()
# hack around title style bookkeeping
surrounding_title_styles = state.memo.title_styles
surrounding_section_level = state.memo.section_level
state.memo.title_styles = []
state.memo.section_level = 0
state.nested_parse(doc, content_offset, node, match_titles=1)
state.memo.title_styles = surrounding_title_styles
state.memo.section_level = surrounding_section_level
return node.children
class JinjaStyle(Style):
title = 'Jinja Style'
default_style = ""
styles = {
Comment: 'italic #aaaaaa',
Comment.Preproc: 'noitalic #B11414',
Comment.Special: 'italic #505050',
Keyword: 'bold #B80000',
Keyword.Type: '#808080',
Operator.Word: 'bold #B80000',
Name.Builtin: '#333333',
Name.Function: '#333333',
Name.Class: 'bold #333333',
Name.Namespace: 'bold #333333',
Name.Entity: 'bold #363636',
Name.Attribute: '#686868',
Name.Tag: 'bold #686868',
Name.Decorator: '#686868',
String: '#AA891C',
Number: '#444444',
Generic.Heading: 'bold #000080',
Generic.Subheading: 'bold #800080',
Generic.Deleted: '#aa0000',
Generic.Inserted: '#00aa00',
Generic.Error: '#aa0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#555555',
Generic.Output: '#888888',
Generic.Traceback: '#aa0000',
Error: '#F00 bg:#FAA'
}
_sig_re = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*(\(.*?\))')
def format_function(name, aliases, func):
lines = inspect.getdoc(func).splitlines()
signature = '()'
if isinstance(func, BuiltinFunctionType):
match = _sig_re.match(lines[0])
if match is not None:
del lines[:1 + bool(lines and not lines[0])]
signature = match.group(1)
else:
try:
argspec = inspect.getargspec(func)
if getattr(func, 'environmentfilter', False) or \
getattr(func, 'contextfilter', False) or \
getattr(func, 'evalcontextfilter', False):
del argspec[0][0]
signature = inspect.formatargspec(*argspec)
except:
pass
result = ['.. function:: %s%s' % (name, signature), '']
result.extend(' ' + line for line in lines)
if aliases:
result.extend(('', ' :aliases: %s' % ', '.join(
'``%s``' % x for x in sorted(aliases))))
return result
def dump_functions(mapping):
def directive(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
reverse_mapping = {}
for name, func in mapping.items():
reverse_mapping.setdefault(func, []).append(name)
filters = []
for func, names in reverse_mapping.items():
aliases = sorted(names, key=lambda x: len(x))
name = aliases.pop()
filters.append((name, aliases, func))
filters.sort()
result = ViewList()
for name, aliases, func in filters:
for item in format_function(name, aliases, func):
result.append(item, '<jinjaext>')
node = nodes.paragraph()
state.nested_parse(result, content_offset, node)
return node.children
return directive
from jinja2.defaults import DEFAULT_FILTERS, DEFAULT_TESTS
jinja_filters = dump_functions(DEFAULT_FILTERS)
jinja_tests = dump_functions(DEFAULT_TESTS)
def jinja_nodes(dirname, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
from jinja2.nodes import Node
doc = ViewList()
def walk(node, indent):
p = ' ' * indent
sig = ', '.join(node.fields)
doc.append(p + '.. autoclass:: %s(%s)' % (node.__name__, sig), '')
if node.abstract:
members = []
for key, name in node.__dict__.items():
if not key.startswith('_') and \
not hasattr(node.__base__, key) and isinstance(name, collections.Callable):
members.append(key)
if members:
members.sort()
doc.append('%s :members: %s' % (p, ', '.join(members)), '')
if node.__base__ != object:
doc.append('', '')
doc.append('%s :Node type: :class:`%s`' %
(p, node.__base__.__name__), '')
doc.append('', '')
children = node.__subclasses__()
children.sort(key=lambda x: x.__name__.lower())
for child in children:
walk(child, indent)
walk(Node, 0)
return parse_rst(state, content_offset, doc)
def inject_toc(app, doctree, docname):
titleiter = iter(doctree.traverse(nodes.title))
try:
# skip first title, we are not interested in that one
next(titleiter)
title = next(titleiter)
# and check if there is at least another title
next(titleiter)
except StopIteration:
return
tocnode = nodes.section('')
tocnode['classes'].append('toc')
toctitle = nodes.section('')
toctitle['classes'].append('toctitle')
toctitle.append(nodes.title(text='Table Of Contents'))
tocnode.append(toctitle)
tocnode += doctree.document.settings.env.get_toc_for(docname)[0][1]
title.parent.insert(title.parent.children.index(title), tocnode)
def setup(app):
app.add_directive('jinjafilters', jinja_filters, 0, (0, 0, 0))
app.add_directive('jinjatests', jinja_tests, 0, (0, 0, 0))
app.add_directive('jinjanodes', jinja_nodes, 0, (0, 0, 0))
# uncomment for inline toc. links are broken unfortunately
##app.connect('doctree-resolved', inject_toc)
| mit |
catherinemoresco/feedme | venv/lib/python2.7/site-packages/gunicorn/app/base.py | 24 | 4153 | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
import os
import sys
import traceback
from gunicorn import util
from gunicorn.arbiter import Arbiter
from gunicorn.config import Config, get_default_config_file
from gunicorn import debug
from gunicorn.six import execfile_
class Application(object):
"""\
An application interface for configuring and loading
the various necessities for any given web framework.
"""
def __init__(self, usage=None, prog=None):
self.usage = usage
self.cfg = None
self.callable = None
self.prog = prog
self.logger = None
self.do_load_config()
def do_load_config(self):
try:
self.load_config()
except Exception as e:
sys.stderr.write("\nError: %s\n" % str(e))
sys.stderr.flush()
sys.exit(1)
def load_config_from_file(self, filename):
if not os.path.exists(filename):
raise RuntimeError("%r doesn't exist" % filename)
cfg = {
"__builtins__": __builtins__,
"__name__": "__config__",
"__file__": filename,
"__doc__": None,
"__package__": None
}
try:
execfile_(filename, cfg, cfg)
except Exception:
print("Failed to read config file: %s" % filename)
traceback.print_exc()
sys.exit(1)
for k, v in cfg.items():
# Ignore unknown names
if k not in self.cfg.settings:
continue
try:
self.cfg.set(k.lower(), v)
except:
sys.stderr.write("Invalid value for %s: %s\n\n" % (k, v))
raise
return cfg
def load_config(self):
# init configuration
self.cfg = Config(self.usage, prog=self.prog)
# parse console args
parser = self.cfg.parser()
args = parser.parse_args()
# optional settings from apps
cfg = self.init(parser, args, args.args)
# Load up the any app specific configuration
if cfg and cfg is not None:
for k, v in cfg.items():
self.cfg.set(k.lower(), v)
if args.config:
self.load_config_from_file(args.config)
else:
default_config = get_default_config_file()
if default_config is not None:
self.load_config_from_file(default_config)
# Lastly, update the configuration with any command line
# settings.
for k, v in args.__dict__.items():
if v is None:
continue
if k == "args":
continue
self.cfg.set(k.lower(), v)
def init(self, parser, opts, args):
raise NotImplementedError
def load(self):
raise NotImplementedError
def reload(self):
self.do_load_config()
if self.cfg.spew:
debug.spew()
def wsgi(self):
if self.callable is None:
self.callable = self.load()
return self.callable
def run(self):
if self.cfg.check_config:
try:
self.load()
except:
sys.stderr.write("\nError while loading the application:\n\n")
traceback.print_exc()
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
if self.cfg.spew:
debug.spew()
if self.cfg.daemon:
util.daemonize(self.cfg.enable_stdio_inheritance)
# set python paths
if self.cfg.pythonpath and self.cfg.pythonpath is not None:
paths = self.cfg.pythonpath.split(",")
for path in paths:
pythonpath = os.path.abspath(path)
if pythonpath not in sys.path:
sys.path.insert(0, pythonpath)
try:
Arbiter(self).run()
except RuntimeError as e:
sys.stderr.write("\nError: %s\n\n" % e)
sys.stderr.flush()
sys.exit(1)
| gpl-2.0 |
snasoft/QtCreatorPluginsPack | Bin/3rdParty/vera/bin/lib/ast.py | 255 | 11805 | # -*- coding: utf-8 -*-
"""
ast
~~~
The `ast` module helps Python applications to process trees of the Python
abstract syntax grammar. The abstract syntax itself might change with
each Python release; this module helps to find out programmatically what
the current grammar looks like and allows modifications of it.
An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
a flag to the `compile()` builtin function or by using the `parse()`
function from this module. The result will be a tree of objects whose
classes all inherit from `ast.AST`.
A modified abstract syntax tree can be compiled into a Python code object
using the built-in `compile()` function.
Additionally various helper functions are provided that make working with
the trees simpler. The main intention of the helper functions and this
module in general is to provide an easy to use interface for libraries
that work tightly with the python syntax (template engines for example).
:copyright: Copyright 2008 by Armin Ronacher.
:license: Python License.
"""
from _ast import *
from _ast import __version__
def parse(source, filename='<unknown>', mode='exec'):
"""
Parse the source into an AST node.
Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
"""
return compile(source, filename, mode, PyCF_ONLY_AST)
def literal_eval(node_or_string):
"""
Safely evaluate an expression node or a string containing a Python
expression. The string or node provided may only consist of the following
Python literal structures: strings, numbers, tuples, lists, dicts, booleans,
and None.
"""
_safe_names = {'None': None, 'True': True, 'False': False}
if isinstance(node_or_string, basestring):
node_or_string = parse(node_or_string, mode='eval')
if isinstance(node_or_string, Expression):
node_or_string = node_or_string.body
def _convert(node):
if isinstance(node, Str):
return node.s
elif isinstance(node, Num):
return node.n
elif isinstance(node, Tuple):
return tuple(map(_convert, node.elts))
elif isinstance(node, List):
return list(map(_convert, node.elts))
elif isinstance(node, Dict):
return dict((_convert(k), _convert(v)) for k, v
in zip(node.keys, node.values))
elif isinstance(node, Name):
if node.id in _safe_names:
return _safe_names[node.id]
elif isinstance(node, BinOp) and \
isinstance(node.op, (Add, Sub)) and \
isinstance(node.right, Num) and \
isinstance(node.right.n, complex) and \
isinstance(node.left, Num) and \
isinstance(node.left.n, (int, long, float)):
left = node.left.n
right = node.right.n
if isinstance(node.op, Add):
return left + right
else:
return left - right
raise ValueError('malformed string')
return _convert(node_or_string)
def dump(node, annotate_fields=True, include_attributes=False):
"""
Return a formatted dump of the tree in *node*. This is mainly useful for
debugging purposes. The returned string will show the names and the values
for fields. This makes the code impossible to evaluate, so if evaluation is
wanted *annotate_fields* must be set to False. Attributes such as line
numbers and column offsets are not dumped by default. If this is wanted,
*include_attributes* can be set to True.
"""
def _format(node):
if isinstance(node, AST):
fields = [(a, _format(b)) for a, b in iter_fields(node)]
rv = '%s(%s' % (node.__class__.__name__, ', '.join(
('%s=%s' % field for field in fields)
if annotate_fields else
(b for a, b in fields)
))
if include_attributes and node._attributes:
rv += fields and ', ' or ' '
rv += ', '.join('%s=%s' % (a, _format(getattr(node, a)))
for a in node._attributes)
return rv + ')'
elif isinstance(node, list):
return '[%s]' % ', '.join(_format(x) for x in node)
return repr(node)
if not isinstance(node, AST):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
return _format(node)
def copy_location(new_node, old_node):
"""
Copy source location (`lineno` and `col_offset` attributes) from
*old_node* to *new_node* if possible, and return *new_node*.
"""
for attr in 'lineno', 'col_offset':
if attr in old_node._attributes and attr in new_node._attributes \
and hasattr(old_node, attr):
setattr(new_node, attr, getattr(old_node, attr))
return new_node
def fix_missing_locations(node):
"""
When you compile a node tree with compile(), the compiler expects lineno and
col_offset attributes for every node that supports them. This is rather
tedious to fill in for generated nodes, so this helper adds these attributes
recursively where not already set, by setting them to the values of the
parent node. It works recursively starting at *node*.
"""
def _fix(node, lineno, col_offset):
if 'lineno' in node._attributes:
if not hasattr(node, 'lineno'):
node.lineno = lineno
else:
lineno = node.lineno
if 'col_offset' in node._attributes:
if not hasattr(node, 'col_offset'):
node.col_offset = col_offset
else:
col_offset = node.col_offset
for child in iter_child_nodes(node):
_fix(child, lineno, col_offset)
_fix(node, 1, 0)
return node
def increment_lineno(node, n=1):
"""
Increment the line number of each node in the tree starting at *node* by *n*.
This is useful to "move code" to a different location in a file.
"""
for child in walk(node):
if 'lineno' in child._attributes:
child.lineno = getattr(child, 'lineno', 0) + n
return node
def iter_fields(node):
"""
Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
that is present on *node*.
"""
for field in node._fields:
try:
yield field, getattr(node, field)
except AttributeError:
pass
def iter_child_nodes(node):
"""
Yield all direct child nodes of *node*, that is, all fields that are nodes
and all items of fields that are lists of nodes.
"""
for name, field in iter_fields(node):
if isinstance(field, AST):
yield field
elif isinstance(field, list):
for item in field:
if isinstance(item, AST):
yield item
def get_docstring(node, clean=True):
"""
Return the docstring for the given node or None if no docstring can
be found. If the node provided does not have docstrings a TypeError
will be raised.
"""
if not isinstance(node, (FunctionDef, ClassDef, Module)):
raise TypeError("%r can't have docstrings" % node.__class__.__name__)
if node.body and isinstance(node.body[0], Expr) and \
isinstance(node.body[0].value, Str):
if clean:
import inspect
return inspect.cleandoc(node.body[0].value.s)
return node.body[0].value.s
def walk(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([node])
while todo:
node = todo.popleft()
todo.extend(iter_child_nodes(node))
yield node
class NodeVisitor(object):
"""
A node visitor base class that walks the abstract syntax tree and calls a
visitor function for every node found. This function may return a value
which is forwarded by the `visit` method.
This class is meant to be subclassed, with the subclass adding visitor
methods.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `visit` method. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
Don't use the `NodeVisitor` if you want to apply changes to nodes during
traversing. For this a special visitor exists (`NodeTransformer`) that
allows modifications.
"""
def visit(self, node):
"""Visit a node."""
method = 'visit_' + node.__class__.__name__
visitor = getattr(self, method, self.generic_visit)
return visitor(node)
def generic_visit(self, node):
"""Called if no explicit visitor function exists for a node."""
for field, value in iter_fields(node):
if isinstance(value, list):
for item in value:
if isinstance(item, AST):
self.visit(item)
elif isinstance(value, AST):
self.visit(value)
class NodeTransformer(NodeVisitor):
"""
A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
allows modification of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor methods to replace or remove the old node. If the return value of
the visitor method is ``None``, the node will be removed from its location,
otherwise it is replaced with the return value. The return value may be the
original node in which case no replacement takes place.
Here is an example transformer that rewrites all occurrences of name lookups
(``foo``) to ``data['foo']``::
class RewriteName(NodeTransformer):
def visit_Name(self, node):
return copy_location(Subscript(
value=Name(id='data', ctx=Load()),
slice=Index(value=Str(s=node.id)),
ctx=node.ctx
), node)
Keep in mind that if the node you're operating on has child nodes you must
either transform the child nodes yourself or call the :meth:`generic_visit`
method for the node first.
For nodes that were part of a collection of statements (that applies to all
statement nodes), the visitor may also return a list of nodes rather than
just a single node.
Usually you use the transformer like this::
node = YourTransformer().visit(node)
"""
def generic_visit(self, node):
for field, old_value in iter_fields(node):
old_value = getattr(node, field, None)
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, AST):
value = self.visit(value)
if value is None:
continue
elif not isinstance(value, AST):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, AST):
new_node = self.visit(old_value)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
| lgpl-3.0 |
mezz64/home-assistant | homeassistant/components/xiaomi/device_tracker.py | 12 | 5680 | """Support for Xiaomi Mi routers."""
import logging
import requests
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default="admin"): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Xiaomi Device Scanner."""
scanner = XiaomiDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class XiaomiDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi router.
Adapted from Luci scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
self.token = _get_token(self.host, self.username, self.password)
self.mac2name = None
self.success_init = self.token is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
result = self._retrieve_list_with_retry()
if result:
hosts = [x for x in result if "mac" in x and "name" in x]
mac2name_list = [(x["mac"].upper(), x["name"]) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _retrieve_list_with_retry
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the router are up to date.
Returns true if scanning successful.
"""
if not self.success_init:
return False
result = self._retrieve_list_with_retry()
if result:
self._store_result(result)
return True
return False
def _retrieve_list_with_retry(self):
"""Retrieve the device list with a retry if token is invalid.
Return the list if successful.
"""
_LOGGER.info("Refreshing device list")
result = _retrieve_list(self.host, self.token)
if result:
return result
_LOGGER.info("Refreshing token and retrying device list refresh")
self.token = _get_token(self.host, self.username, self.password)
return _retrieve_list(self.host, self.token)
def _store_result(self, result):
"""Extract and store the device list in self.last_results."""
self.last_results = []
for device_entry in result:
# Check if the device is marked as connected
if int(device_entry["online"]) == 1:
self.last_results.append(device_entry["mac"])
def _retrieve_list(host, token, **kwargs):
"""Get device list for the given host."""
url = "http://{}/cgi-bin/luci/;stok={}/api/misystem/devicelist"
url = url.format(host, token)
try:
res = requests.get(url, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out at URL %s", url)
return
if res.status_code != HTTP_OK:
_LOGGER.exception("Connection failed with http code %s", res.status_code)
return
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
xiaomi_code = result["code"]
except KeyError:
_LOGGER.exception("No field code in response from mi router. %s", result)
return
if xiaomi_code == 0:
try:
return result["list"]
except KeyError:
_LOGGER.exception("No list in response from mi router. %s", result)
return
else:
_LOGGER.info(
"Receive wrong Xiaomi code %s, expected 0 in response %s",
xiaomi_code,
result,
)
return
def _get_token(host, username, password):
"""Get authentication token for the given host+username+password."""
url = f"http://{host}/cgi-bin/luci/api/xqsystem/login"
data = {"username": username, "password": password}
try:
res = requests.post(url, data=data, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == HTTP_OK:
try:
result = res.json()
except ValueError:
# If JSON decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
return result["token"]
except KeyError:
error_message = (
"Xiaomi token cannot be refreshed, response from "
+ "url: [%s] \nwith parameter: [%s] \nwas: [%s]"
)
_LOGGER.exception(error_message, url, data, result)
return
else:
_LOGGER.error(
"Invalid response: [%s] at url: [%s] with data [%s]", res, url, data
)
| apache-2.0 |
xiaolonginfo/decode-Django | Django-1.5.1/tests/regressiontests/expressions_regress/tests.py | 46 | 15966 | """
Spanning tests for all the operations that F() expressions can perform.
"""
from __future__ import absolute_import
import datetime
from django.db import connection
from django.db.models import F
from django.test import TestCase, Approximate, skipUnlessDBFeature
from .models import Number, Experiment
class ExpressionsRegressTests(TestCase):
def setUp(self):
Number(integer=-1).save()
Number(integer=42).save()
Number(integer=1337).save()
self.assertEqual(Number.objects.update(float=F('integer')), 3)
def test_fill_with_value_from_same_object(self):
"""
We can fill a value in all objects with an other value of the
same object.
"""
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 42, 42.000>',
'<Number: 1337, 1337.000>'
]
)
def test_increment_value(self):
"""
We can increment a value of all objects in a query set.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.all(),
[
'<Number: -1, -1.000>',
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
]
)
def test_filter_not_equals_other_field(self):
"""
We can filter for objects, where a value is not equals the value
of an other field.
"""
self.assertEqual(
Number.objects.filter(integer__gt=0)
.update(integer=F('integer') + 1),
2)
self.assertQuerysetEqual(
Number.objects.exclude(float=F('integer')),
[
'<Number: 43, 42.000>',
'<Number: 1338, 1337.000>'
]
)
def test_complex_expressions(self):
"""
Complex expressions of different connection types are possible.
"""
n = Number.objects.create(integer=10, float=123.45)
self.assertEqual(Number.objects.filter(pk=n.pk)
.update(float=F('integer') + F('float') * 2),
1)
self.assertEqual(Number.objects.get(pk=n.pk).integer, 10)
self.assertEqual(Number.objects.get(pk=n.pk).float, Approximate(256.900, places=3))
class ExpressionOperatorTests(TestCase):
def setUp(self):
self.n = Number.objects.create(integer=42, float=15.5)
def test_lefthand_addition(self):
# LH Addition of floats and integers
Number.objects.filter(pk=self.n.pk).update(
integer=F('integer') + 15,
float=F('float') + 42.7
)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_lefthand_subtraction(self):
# LH Subtraction of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') - 15,
float=F('float') - 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(-27.200, places=3))
def test_lefthand_multiplication(self):
# Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') * 15,
float=F('float') * 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_lefthand_division(self):
# LH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') / 2,
float=F('float') / 42.7)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 21)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(0.363, places=3))
def test_lefthand_modulo(self):
# LH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer') % 20)
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 2)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_lefthand_bitwise_and(self):
# LH Bitwise ands on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitand(56))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 40)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
@skipUnlessDBFeature('supports_bitwise_or')
def test_lefthand_bitwise_or(self):
# LH Bitwise or on integers
Number.objects.filter(pk=self.n.pk).update(integer=F('integer').bitor(48))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 58)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
def test_right_hand_addition(self):
# Right hand operators
Number.objects.filter(pk=self.n.pk).update(integer=15 + F('integer'),
float=42.7 + F('float'))
# RH Addition of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 57)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(58.200, places=3))
def test_right_hand_subtraction(self):
Number.objects.filter(pk=self.n.pk).update(integer=15 - F('integer'),
float=42.7 - F('float'))
# RH Subtraction of floats and integers
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, -27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(27.200, places=3))
def test_right_hand_multiplication(self):
# RH Multiplication of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=15 * F('integer'),
float=42.7 * F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 630)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(661.850, places=3))
def test_right_hand_division(self):
# RH Division of floats and integers
Number.objects.filter(pk=self.n.pk).update(integer=640 / F('integer'),
float=42.7 / F('float'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 15)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(2.755, places=3))
def test_right_hand_modulo(self):
# RH Modulo arithmetic on integers
Number.objects.filter(pk=self.n.pk).update(integer=69 % F('integer'))
self.assertEqual(Number.objects.get(pk=self.n.pk).integer, 27)
self.assertEqual(Number.objects.get(pk=self.n.pk).float, Approximate(15.500, places=3))
class FTimeDeltaTests(TestCase):
def setUp(self):
sday = datetime.date(2010, 6, 25)
stime = datetime.datetime(2010, 6, 25, 12, 15, 30, 747000)
midnight = datetime.time(0)
delta0 = datetime.timedelta(0)
delta1 = datetime.timedelta(microseconds=253000)
delta2 = datetime.timedelta(seconds=44)
delta3 = datetime.timedelta(hours=21, minutes=8)
delta4 = datetime.timedelta(days=10)
# Test data is set so that deltas and delays will be
# strictly increasing.
self.deltas = []
self.delays = []
self.days_long = []
# e0: started same day as assigned, zero duration
end = stime+delta0
e0 = Experiment.objects.create(name='e0', assigned=sday, start=stime,
end=end, completed=end.date())
self.deltas.append(delta0)
self.delays.append(e0.start-
datetime.datetime.combine(e0.assigned, midnight))
self.days_long.append(e0.completed-e0.assigned)
# e1: started one day after assigned, tiny duration, data
# set so that end time has no fractional seconds, which
# tests an edge case on sqlite. This Experiment is only
# included in the test data when the DB supports microsecond
# precision.
if connection.features.supports_microsecond_precision:
delay = datetime.timedelta(1)
end = stime + delay + delta1
e1 = Experiment.objects.create(name='e1', assigned=sday,
start=stime+delay, end=end, completed=end.date())
self.deltas.append(delta1)
self.delays.append(e1.start-
datetime.datetime.combine(e1.assigned, midnight))
self.days_long.append(e1.completed-e1.assigned)
# e2: started three days after assigned, small duration
end = stime+delta2
e2 = Experiment.objects.create(name='e2',
assigned=sday-datetime.timedelta(3), start=stime, end=end,
completed=end.date())
self.deltas.append(delta2)
self.delays.append(e2.start-
datetime.datetime.combine(e2.assigned, midnight))
self.days_long.append(e2.completed-e2.assigned)
# e3: started four days after assigned, medium duration
delay = datetime.timedelta(4)
end = stime + delay + delta3
e3 = Experiment.objects.create(name='e3',
assigned=sday, start=stime+delay, end=end, completed=end.date())
self.deltas.append(delta3)
self.delays.append(e3.start-
datetime.datetime.combine(e3.assigned, midnight))
self.days_long.append(e3.completed-e3.assigned)
# e4: started 10 days after assignment, long duration
end = stime + delta4
e4 = Experiment.objects.create(name='e4',
assigned=sday-datetime.timedelta(10), start=stime, end=end,
completed=end.date())
self.deltas.append(delta4)
self.delays.append(e4.start-
datetime.datetime.combine(e4.assigned, midnight))
self.days_long.append(e4.completed-e4.assigned)
self.expnames = [e.name for e in Experiment.objects.all()]
def test_delta_add(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(end__lt=F('start')+delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(end__lte=F('start')+delta)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_delta_subtract(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.filter(start__gt=F('end')-delta)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__gte=F('end')-delta)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_exclude(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
test_set = [e.name for e in
Experiment.objects.exclude(end__lt=F('start')+delta)]
self.assertEqual(test_set, self.expnames[i:])
test_set = [e.name for e in
Experiment.objects.exclude(end__lte=F('start')+delta)]
self.assertEqual(test_set, self.expnames[i+1:])
def test_date_comparison(self):
for i in range(len(self.days_long)):
days = self.days_long[i]
test_set = [e.name for e in
Experiment.objects.filter(completed__lt=F('assigned')+days)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(completed__lte=F('assigned')+days)]
self.assertEqual(test_set, self.expnames[:i+1])
@skipUnlessDBFeature("supports_mixed_date_datetime_comparisons")
def test_mixed_comparisons1(self):
for i in range(len(self.delays)):
delay = self.delays[i]
if not connection.features.supports_microsecond_precision:
delay = datetime.timedelta(delay.days, delay.seconds)
test_set = [e.name for e in
Experiment.objects.filter(assigned__gt=F('start')-delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(assigned__gte=F('start')-delay)]
self.assertEqual(test_set, self.expnames[:i+1])
def test_mixed_comparisons2(self):
delays = [datetime.timedelta(delay.days) for delay in self.delays]
for i in range(len(delays)):
delay = delays[i]
test_set = [e.name for e in
Experiment.objects.filter(start__lt=F('assigned')+delay)]
self.assertEqual(test_set, self.expnames[:i])
test_set = [e.name for e in
Experiment.objects.filter(start__lte=F('assigned')+delay+
datetime.timedelta(1))]
self.assertEqual(test_set, self.expnames[:i+1])
def test_delta_update(self):
for i in range(len(self.deltas)):
delta = self.deltas[i]
exps = Experiment.objects.all()
expected_durations = [e.duration() for e in exps]
expected_starts = [e.start+delta for e in exps]
expected_ends = [e.end+delta for e in exps]
Experiment.objects.update(start=F('start')+delta, end=F('end')+delta)
exps = Experiment.objects.all()
new_starts = [e.start for e in exps]
new_ends = [e.end for e in exps]
new_durations = [e.duration() for e in exps]
self.assertEqual(expected_starts, new_starts)
self.assertEqual(expected_ends, new_ends)
self.assertEqual(expected_durations, new_durations)
def test_delta_invalid_op_mult(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')*self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to multiply datetime by timedelta.")
def test_delta_invalid_op_div(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')/self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to divide datetime by timedelta.")
def test_delta_invalid_op_mod(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start')%self.deltas[0]))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to modulo divide datetime by timedelta.")
def test_delta_invalid_op_and(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start').bitand(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary and a datetime with a timedelta.")
def test_delta_invalid_op_or(self):
raised = False
try:
r = repr(Experiment.objects.filter(end__lt=F('start').bitor(self.deltas[0])))
except TypeError:
raised = True
self.assertTrue(raised, "TypeError not raised on attempt to binary or a datetime with a timedelta.")
| gpl-2.0 |
yewang15215/django | tests/auth_tests/test_basic.py | 12 | 7419 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.contrib.auth import get_user, get_user_model
from django.contrib.auth.models import AnonymousUser, User
from django.core.exceptions import ImproperlyConfigured
from django.db import IntegrityError
from django.http import HttpRequest
from django.test import TestCase, override_settings
from django.utils import translation
from .models import CustomUser
class BasicTestCase(TestCase):
def test_user(self):
"Users can be created and can set their password"
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.assertTrue(u.has_usable_password())
self.assertFalse(u.check_password('bad'))
self.assertTrue(u.check_password('testpw'))
# Check we can manually set an unusable password
u.set_unusable_password()
u.save()
self.assertFalse(u.check_password('testpw'))
self.assertFalse(u.has_usable_password())
u.set_password('testpw')
self.assertTrue(u.check_password('testpw'))
u.set_password(None)
self.assertFalse(u.has_usable_password())
# Check username getter
self.assertEqual(u.get_username(), 'testuser')
# Check authentication/permissions
self.assertFalse(u.is_anonymous)
self.assertTrue(u.is_authenticated)
self.assertFalse(u.is_staff)
self.assertTrue(u.is_active)
self.assertFalse(u.is_superuser)
# Check API-based user creation with no password
u2 = User.objects.create_user('testuser2', '[email protected]')
self.assertFalse(u2.has_usable_password())
def test_unicode_username(self):
User.objects.create_user('jörg')
User.objects.create_user('Григорий')
# Two equivalent unicode normalized usernames should be duplicates
omega_username = 'iamtheΩ' # U+03A9 GREEK CAPITAL LETTER OMEGA
ohm_username = 'iamtheΩ' # U+2126 OHM SIGN
User.objects.create_user(ohm_username)
with self.assertRaises(IntegrityError):
User.objects.create_user(omega_username)
def test_is_anonymous_authenticated_method_deprecation(self):
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
u = User.objects.create_user('testuser', '[email protected]', 'testpw')
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertFalse(u.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always')
self.assertTrue(u.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_user_no_email(self):
"Users can be created without an email"
u = User.objects.create_user('testuser1')
self.assertEqual(u.email, '')
u2 = User.objects.create_user('testuser2', email='')
self.assertEqual(u2.email, '')
u3 = User.objects.create_user('testuser3', email=None)
self.assertEqual(u3.email, '')
def test_anonymous_user(self):
"Check the properties of the anonymous user"
a = AnonymousUser()
self.assertIsNone(a.pk)
self.assertEqual(a.username, '')
self.assertEqual(a.get_username(), '')
self.assertTrue(a.is_anonymous)
self.assertFalse(a.is_authenticated)
self.assertFalse(a.is_staff)
self.assertFalse(a.is_active)
self.assertFalse(a.is_superuser)
self.assertEqual(a.groups.all().count(), 0)
self.assertEqual(a.user_permissions.all().count(), 0)
def test_anonymous_user_is_anonymous_authenticated_method_deprecation(self):
a = AnonymousUser()
deprecation_message = (
'Using user.is_authenticated() and user.is_anonymous() as a '
'method is deprecated. Remove the parentheses to use it as an '
'attribute.'
)
# Backwards-compatibility callables
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertTrue(a.is_anonymous())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter('always') # prevent warnings from appearing as errors
self.assertFalse(a.is_authenticated())
self.assertEqual(len(warns), 1)
self.assertEqual(str(warns[0].message), deprecation_message)
def test_superuser(self):
"Check the creation and properties of a superuser"
super = User.objects.create_superuser('super', '[email protected]', 'super')
self.assertTrue(super.is_superuser)
self.assertTrue(super.is_active)
self.assertTrue(super.is_staff)
def test_get_user_model(self):
"The current user model can be retrieved"
self.assertEqual(get_user_model(), User)
@override_settings(AUTH_USER_MODEL='auth_tests.CustomUser')
def test_swappable_user(self):
"The current user model can be swapped out for another"
self.assertEqual(get_user_model(), CustomUser)
with self.assertRaises(AttributeError):
User.objects.all()
@override_settings(AUTH_USER_MODEL='badsetting')
def test_swappable_user_bad_setting(self):
"The alternate user setting must point to something in the format app.model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
@override_settings(AUTH_USER_MODEL='thismodel.doesntexist')
def test_swappable_user_nonexistent_model(self):
"The current user model must point to an installed model"
with self.assertRaises(ImproperlyConfigured):
get_user_model()
def test_user_verbose_names_translatable(self):
"Default User model verbose names are translatable (#19945)"
with translation.override('en'):
self.assertEqual(User._meta.verbose_name, 'user')
self.assertEqual(User._meta.verbose_name_plural, 'users')
with translation.override('es'):
self.assertEqual(User._meta.verbose_name, 'usuario')
self.assertEqual(User._meta.verbose_name_plural, 'usuarios')
class TestGetUser(TestCase):
def test_get_user_anonymous(self):
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, AnonymousUser)
def test_get_user(self):
created_user = User.objects.create_user('testuser', '[email protected]', 'testpw')
self.client.login(username='testuser', password='testpw')
request = HttpRequest()
request.session = self.client.session
user = get_user(request)
self.assertIsInstance(user, User)
self.assertEqual(user.username, created_user.username)
| bsd-3-clause |
vdemeester/docker-py | tests/integration/regression_test.py | 4 | 2232 | import io
import random
import docker
import six
from .base import BaseAPIIntegrationTest, TEST_IMG
import pytest
class TestRegressions(BaseAPIIntegrationTest):
def test_443_handle_nonchunked_response_in_stream(self):
dfile = io.BytesIO()
with pytest.raises(docker.errors.APIError) as exc:
for line in self.client.build(fileobj=dfile, tag="a/b/c"):
pass
assert exc.value.is_error()
dfile.close()
def test_542_truncate_ids_client_side(self):
self.client.start(
self.client.create_container(TEST_IMG, ['true'])
)
result = self.client.containers(all=True, trunc=True)
assert len(result[0]['Id']) == 12
def test_647_support_doubleslash_in_image_names(self):
with pytest.raises(docker.errors.APIError):
self.client.inspect_image('gensokyo.jp//kirisame')
def test_649_handle_timeout_value_none(self):
self.client.timeout = None
ctnr = self.client.create_container(TEST_IMG, ['sleep', '2'])
self.client.start(ctnr)
self.client.stop(ctnr)
def test_715_handle_user_param_as_int_value(self):
ctnr = self.client.create_container(TEST_IMG, ['id', '-u'], user=1000)
self.client.start(ctnr)
self.client.wait(ctnr)
logs = self.client.logs(ctnr)
if six.PY3:
logs = logs.decode('utf-8')
assert logs == '1000\n'
def test_792_explicit_port_protocol(self):
tcp_port, udp_port = random.sample(range(9999, 32000), 2)
ctnr = self.client.create_container(
TEST_IMG, ['sleep', '9999'], ports=[2000, (2000, 'udp')],
host_config=self.client.create_host_config(
port_bindings={'2000/tcp': tcp_port, '2000/udp': udp_port}
)
)
self.tmp_containers.append(ctnr)
self.client.start(ctnr)
assert self.client.port(
ctnr, 2000
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/tcp'
)[0]['HostPort'] == six.text_type(tcp_port)
assert self.client.port(
ctnr, '2000/udp'
)[0]['HostPort'] == six.text_type(udp_port)
| apache-2.0 |
Workday/OpenFrame | tools/telemetry/catapult_base/refactor/offset_token.py | 16 | 3155 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import itertools
import token
import tokenize
def _Pairwise(iterable):
"""s -> (None, s0), (s0, s1), (s1, s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
a = itertools.chain((None,), a)
return itertools.izip(a, b)
class OffsetToken(object):
"""A Python token with a relative position.
A token is represented by a type defined in Python's token module, a string
representing the content, and an offset. Using relative positions makes it
easy to insert and remove tokens.
"""
def __init__(self, token_type, string, offset):
self._type = token_type
self._string = string
self._offset = offset
@property
def type(self):
return self._type
@property
def type_name(self):
return token.tok_name[self._type]
@property
def string(self):
return self._string
@string.setter
def string(self, value):
self._string = value
@property
def offset(self):
return self._offset
def __str__(self):
return str((self.type_name, self.string, self.offset))
def Tokenize(f):
"""Read tokens from a file-like object.
Args:
f: Any object that has a readline method.
Returns:
A collections.deque containing OffsetTokens. Deques are cheaper and easier
to manipulate sequentially than lists.
"""
f.seek(0)
tokenize_tokens = tokenize.generate_tokens(f.readline)
offset_tokens = collections.deque()
for prev_token, next_token in _Pairwise(tokenize_tokens):
token_type, string, (srow, scol), _, _ = next_token
if not prev_token:
offset_tokens.append(OffsetToken(token_type, string, (0, 0)))
else:
erow, ecol = prev_token[3]
if erow == srow:
offset_tokens.append(OffsetToken(token_type, string, (0, scol-ecol)))
else:
offset_tokens.append(OffsetToken(token_type, string, (srow-erow, scol)))
return offset_tokens
def Untokenize(offset_tokens):
"""Return the string representation of an iterable of OffsetTokens."""
# Make a copy. Don't modify the original.
offset_tokens = collections.deque(offset_tokens)
# Strip leading NL tokens.
while offset_tokens[0].type == tokenize.NL:
offset_tokens.popleft()
# Strip leading vertical whitespace.
first_token = offset_tokens.popleft()
# Take care not to modify the existing token. Create a new one in its place.
first_token = OffsetToken(first_token.type, first_token.string,
(0, first_token.offset[1]))
offset_tokens.appendleft(first_token)
# Convert OffsetTokens to tokenize tokens.
tokenize_tokens = []
row = 1
col = 0
for t in offset_tokens:
offset_row, offset_col = t.offset
if offset_row == 0:
col += offset_col
else:
row += offset_row
col = offset_col
tokenize_tokens.append((t.type, t.string, (row, col), (row, col), None))
# tokenize can't handle whitespace before line continuations.
# So add a space.
return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
| bsd-3-clause |
sippy/voiptests | test_cases/reinv_brkn2.py | 1 | 2000 | # Copyright (c) 2016 Sippy Software, Inc. All rights reserved.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from test_cases.reinv_fail import a_test_reinv_fail
from test_cases.reinvite import b_test_reinvite
class a_test_reinv_brkn2(a_test_reinv_fail):
cld = 'bob_reinv_brkn2'
cli = 'alice_reinv_brkn2'
def reinvite(self, ua):
if not self.connect_done or self.disconnect_done:
return
sdp_body_bak = ua.lSDP
ua.lSDP = sdp_body_bak.getCopy()
for sect in ua.lSDP.content.sections:
sect.c_header = None
rval = a_test_reinv_fail.reinvite(self, ua)
ua.lSDP = sdp_body_bak
return rval
class b_test_reinv_brkn2(b_test_reinvite):
cli = 'bob_reinv_brkn2'
| bsd-2-clause |
akosyakov/intellij-community | python/lib/Lib/stat.py | 145 | 1667 | """Constants/functions for interpreting results of os.stat() and os.lstat().
Suggested usage: from stat import *
"""
# XXX Strictly spoken, this module may have to be adapted for each POSIX
# implementation; in practice, however, the numeric constants used by
# stat() are almost universal (even for stat() emulations on non-UNIX
# systems like MS-DOS).
# Indices for stat struct members in tuple returned by os.stat()
ST_MODE = 0
ST_INO = 1
ST_DEV = 2
ST_NLINK = 3
ST_UID = 4
ST_GID = 5
ST_SIZE = 6
ST_ATIME = 7
ST_MTIME = 8
ST_CTIME = 9
# Extract bits from the mode
def S_IMODE(mode):
return mode & 07777
def S_IFMT(mode):
return mode & 0170000
# Constants used as S_IFMT() for various file types
# (not all are implemented on all systems)
S_IFDIR = 0040000
S_IFCHR = 0020000
S_IFBLK = 0060000
S_IFREG = 0100000
S_IFIFO = 0010000
S_IFLNK = 0120000
S_IFSOCK = 0140000
# Functions to test for each file type
def S_ISDIR(mode):
return S_IFMT(mode) == S_IFDIR
def S_ISCHR(mode):
return S_IFMT(mode) == S_IFCHR
def S_ISBLK(mode):
return S_IFMT(mode) == S_IFBLK
def S_ISREG(mode):
return S_IFMT(mode) == S_IFREG
def S_ISFIFO(mode):
return S_IFMT(mode) == S_IFIFO
def S_ISLNK(mode):
return S_IFMT(mode) == S_IFLNK
def S_ISSOCK(mode):
return S_IFMT(mode) == S_IFSOCK
# Names for permission bits
S_ISUID = 04000
S_ISGID = 02000
S_ENFMT = S_ISGID
S_ISVTX = 01000
S_IREAD = 00400
S_IWRITE = 00200
S_IEXEC = 00100
S_IRWXU = 00700
S_IRUSR = 00400
S_IWUSR = 00200
S_IXUSR = 00100
S_IRWXG = 00070
S_IRGRP = 00040
S_IWGRP = 00020
S_IXGRP = 00010
S_IRWXO = 00007
S_IROTH = 00004
S_IWOTH = 00002
S_IXOTH = 00001
| apache-2.0 |
gameduell/duell | bin/win/python2.7.9/Lib/site-packages/pip/_vendor/requests/packages/chardet/langbulgarianmodel.py | 2965 | 12784 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Character Mapping Table:
# this table is modified base on win1251BulgarianCharToOrderMap, so
# only number <64 is sure valid
Latin5_BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209, # 80
210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225, # 90
81,226,227,228,229,230,105,231,232,233,234,235,236, 45,237,238, # a0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # b0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,239, 67,240, 60, 56, # c0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # d0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,241, 42, 16, # e0
62,242,243,244, 58,245, 98,246,247,248,249,250,251, 91,252,253, # f0
)
win1251BulgarianCharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 77, 90, 99,100, 72,109,107,101, 79,185, 81,102, 76, 94, 82, # 40
110,186,108, 91, 74,119, 84, 96,111,187,115,253,253,253,253,253, # 50
253, 65, 69, 70, 66, 63, 68,112,103, 92,194,104, 95, 86, 87, 71, # 60
116,195, 85, 93, 97,113,196,197,198,199,200,253,253,253,253,253, # 70
206,207,208,209,210,211,212,213,120,214,215,216,217,218,219,220, # 80
221, 78, 64, 83,121, 98,117,105,222,223,224,225,226,227,228,229, # 90
88,230,231,232,233,122, 89,106,234,235,236,237,238, 45,239,240, # a0
73, 80,118,114,241,242,243,244,245, 62, 58,246,247,248,249,250, # b0
31, 32, 35, 43, 37, 44, 55, 47, 40, 59, 33, 46, 38, 36, 41, 30, # c0
39, 28, 34, 51, 48, 49, 53, 50, 54, 57, 61,251, 67,252, 60, 56, # d0
1, 18, 9, 20, 11, 3, 23, 15, 2, 26, 12, 10, 14, 6, 4, 13, # e0
7, 8, 5, 19, 29, 25, 22, 21, 27, 24, 17, 75, 52,253, 42, 16, # f0
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 96.9392%
# first 1024 sequences:3.0618%
# rest sequences: 0.2992%
# negative sequences: 0.0020%
BulgarianLangModel = (
0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,2,3,3,3,3,3,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,2,2,1,2,2,
3,1,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,0,1,
0,0,0,0,0,0,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,3,3,0,3,1,0,
0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,1,3,2,3,3,3,3,3,3,3,3,0,3,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,2,2,1,3,3,3,3,2,2,2,1,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,2,2,3,3,1,1,2,3,3,2,3,3,3,3,2,1,2,0,2,0,3,0,0,
0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,1,3,3,3,3,3,2,3,2,3,3,3,3,3,2,3,3,1,3,0,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,1,3,3,2,3,3,3,1,3,3,2,3,2,2,2,0,0,2,0,2,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,0,3,3,3,2,2,3,3,3,1,2,2,3,2,1,1,2,0,2,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,2,3,3,1,2,3,2,2,2,3,3,3,3,3,2,2,3,1,2,0,2,1,2,0,0,
0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,3,3,3,3,2,3,3,3,2,3,3,2,3,2,2,2,3,1,2,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,1,1,1,2,2,1,3,1,3,2,2,3,0,0,1,0,1,0,1,0,0,
0,0,0,1,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,2,3,2,2,3,1,2,1,1,1,2,3,1,3,1,2,2,0,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,1,3,2,2,3,3,1,2,3,1,1,3,3,3,3,1,2,2,1,1,1,0,2,0,2,0,1,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,2,2,3,3,3,2,2,1,1,2,0,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,0,1,2,1,3,3,2,3,3,3,3,3,2,3,2,1,0,3,1,2,1,2,1,2,3,2,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,2,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,1,3,3,2,3,3,2,2,2,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,0,3,3,3,3,3,2,1,1,2,1,3,3,0,3,1,1,1,1,3,2,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,2,2,3,3,3,3,3,3,3,3,3,3,3,1,1,3,1,3,3,2,3,2,2,2,3,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,2,3,2,1,1,1,1,1,3,1,3,1,1,0,0,0,1,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,2,0,3,2,0,3,0,2,0,0,2,1,3,1,0,0,1,0,0,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,1,1,1,2,1,1,2,1,1,1,2,2,1,2,1,1,1,0,1,1,0,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,2,1,3,1,1,2,1,3,2,1,1,0,1,2,3,2,1,1,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,3,3,3,2,2,1,0,1,0,0,1,0,0,0,2,1,0,3,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,2,3,2,3,3,1,3,2,1,1,1,2,1,1,2,1,3,0,1,0,0,0,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,2,2,3,3,2,3,2,2,2,3,1,2,2,1,1,2,1,1,2,2,0,1,1,0,1,0,2,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,1,0,2,2,1,3,2,1,0,0,2,0,2,0,1,0,0,0,0,0,0,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,3,1,2,0,2,3,1,2,3,2,0,1,3,1,2,1,1,1,0,0,1,0,0,2,2,2,3,
2,2,2,2,1,2,1,1,2,2,1,1,2,0,1,1,1,0,0,1,1,0,0,1,1,0,0,0,1,1,0,1,
3,3,3,3,3,2,1,2,2,1,2,0,2,0,1,0,1,2,1,2,1,1,0,0,0,1,0,1,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,
3,3,2,3,3,1,1,3,1,0,3,2,1,0,0,0,1,2,0,2,0,1,0,0,0,1,0,1,2,1,2,2,
1,1,1,1,1,1,1,2,2,2,1,1,1,1,1,1,1,0,1,2,1,1,1,0,0,0,0,0,1,1,0,0,
3,1,0,1,0,2,3,2,2,2,3,2,2,2,2,2,1,0,2,1,2,1,1,1,0,1,2,1,2,2,2,1,
1,1,2,2,2,2,1,2,1,1,0,1,2,1,2,2,2,1,1,1,0,1,1,1,1,2,0,1,0,0,0,0,
2,3,2,3,3,0,0,2,1,0,2,1,0,0,0,0,2,3,0,2,0,0,0,0,0,1,0,0,2,0,1,2,
2,1,2,1,2,2,1,1,1,2,1,1,1,0,1,2,2,1,1,1,1,1,0,1,1,1,0,0,1,2,0,0,
3,3,2,2,3,0,2,3,1,1,2,0,0,0,1,0,0,2,0,2,0,0,0,1,0,1,0,1,2,0,2,2,
1,1,1,1,2,1,0,1,2,2,2,1,1,1,1,1,1,1,0,1,1,1,0,0,0,0,0,0,1,1,0,0,
2,3,2,3,3,0,0,3,0,1,1,0,1,0,0,0,2,2,1,2,0,0,0,0,0,0,0,0,2,0,1,2,
2,2,1,1,1,1,1,2,2,2,1,0,2,0,1,0,1,0,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
3,3,3,3,2,2,2,2,2,0,2,1,1,1,1,2,1,2,1,1,0,2,0,1,0,1,0,0,2,0,1,2,
1,1,1,1,1,1,1,2,2,1,1,0,2,0,1,0,2,0,0,1,1,1,0,0,2,0,0,0,1,1,0,0,
2,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,0,0,0,1,2,0,1,2,
2,2,2,1,1,2,1,1,2,2,2,1,2,0,1,1,1,1,1,1,0,1,1,1,1,0,0,1,1,1,0,0,
2,3,3,3,3,0,2,2,0,2,1,0,0,0,1,1,1,2,0,2,0,0,0,3,0,0,0,0,2,0,2,2,
1,1,1,2,1,2,1,1,2,2,2,1,2,0,1,1,1,0,1,1,1,1,0,2,1,0,0,0,1,1,0,0,
2,3,3,3,3,0,2,1,0,0,2,0,0,0,0,0,1,2,0,2,0,0,0,0,0,0,0,0,2,0,1,2,
1,1,1,2,1,1,1,1,2,2,2,0,1,0,1,1,1,0,0,1,1,1,0,0,1,0,0,0,0,1,0,0,
3,3,2,2,3,0,1,0,1,0,0,0,0,0,0,0,1,1,0,3,0,0,0,0,0,0,0,0,1,0,2,2,
1,1,1,1,1,2,1,1,2,2,1,2,2,1,0,1,1,1,1,1,0,1,0,0,1,0,0,0,1,1,0,0,
3,1,0,1,0,2,2,2,2,3,2,1,1,1,2,3,0,0,1,0,2,1,1,0,1,1,1,1,2,1,1,1,
1,2,2,1,2,1,2,2,1,1,0,1,2,1,2,2,1,1,1,0,0,1,1,1,2,1,0,1,0,0,0,0,
2,1,0,1,0,3,1,2,2,2,2,1,2,2,1,1,1,0,2,1,2,2,1,1,2,1,1,0,2,1,1,1,
1,2,2,2,2,2,2,2,1,2,0,1,1,0,2,1,1,1,1,1,0,0,1,1,1,1,0,1,0,0,0,0,
2,1,1,1,1,2,2,2,2,1,2,2,2,1,2,2,1,1,2,1,2,3,2,2,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,1,2,0,1,2,1,1,0,1,0,1,2,1,2,0,0,0,1,1,0,0,0,1,0,0,2,
1,1,0,0,1,1,0,1,1,1,1,0,2,0,1,1,1,0,0,1,1,0,0,0,0,1,0,0,0,1,0,0,
2,0,0,0,0,1,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,2,1,1,1,
1,2,2,2,2,1,1,2,1,2,1,1,1,0,2,1,2,1,1,1,0,2,1,1,1,1,0,1,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,0,1,0,1,1,1,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,3,2,0,0,0,0,1,0,0,0,0,0,0,1,1,0,2,0,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,1,1,0,0,2,2,2,2,2,0,1,1,0,1,1,1,1,1,0,0,1,0,0,0,1,1,0,1,
2,3,1,2,1,0,1,1,0,2,2,2,0,0,1,0,0,1,1,1,1,0,0,0,0,0,0,0,1,0,1,2,
1,1,1,1,2,1,1,1,1,1,1,1,1,0,1,1,0,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,
2,2,2,2,2,0,0,2,0,0,2,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,0,2,2,
1,1,1,1,1,0,0,1,2,1,1,0,1,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,0,2,0,1,1,0,0,0,1,0,0,2,0,2,0,0,0,0,0,0,0,0,0,0,1,1,
0,0,0,1,1,1,1,1,1,1,1,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,3,2,0,0,1,0,0,1,0,0,0,0,0,0,1,0,2,0,0,0,1,0,0,0,0,0,0,0,2,
1,1,0,0,1,0,0,0,1,1,0,0,1,0,1,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,2,2,1,2,1,2,2,1,1,2,1,1,1,0,1,1,1,1,2,0,1,0,1,1,1,1,0,1,1,
1,1,2,1,1,1,1,1,1,0,0,1,2,1,1,1,1,1,1,0,0,1,1,1,0,0,0,0,0,0,0,0,
1,0,0,1,3,1,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,1,0,0,1,0,2,0,0,0,0,0,1,1,1,0,1,0,0,0,0,0,0,0,0,2,0,0,1,
0,2,0,1,0,0,1,1,2,0,1,0,1,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,2,2,2,0,1,1,0,2,1,0,1,1,1,0,0,1,0,2,0,1,0,0,0,0,0,0,0,0,0,1,
0,1,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,0,0,1,0,0,0,1,0,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,1,0,1,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,0,1,2,1,1,1,1,1,1,2,2,1,0,0,1,0,1,0,0,0,0,1,1,1,1,0,0,0,
1,1,2,1,1,1,1,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,1,2,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
0,1,1,0,1,1,1,0,0,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
1,0,1,0,0,1,1,1,1,1,1,1,1,1,1,1,0,0,1,0,2,0,0,2,0,1,0,0,1,0,0,1,
1,1,0,0,1,1,0,1,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,1,0,
1,1,1,1,1,1,1,2,0,0,0,0,0,0,2,1,0,1,1,0,0,1,1,1,0,1,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,1,1,0,1,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
)
Latin5BulgarianModel = {
'charToOrderMap': Latin5_BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "ISO-8859-5"
}
Win1251BulgarianModel = {
'charToOrderMap': win1251BulgarianCharToOrderMap,
'precedenceMatrix': BulgarianLangModel,
'mTypicalPositiveRatio': 0.969392,
'keepEnglishLetter': False,
'charsetName': "windows-1251"
}
# flake8: noqa
| bsd-2-clause |
mringel/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
spacewalkproject/spacewalk | java/scripts/api/managechannel.py | 16 | 2371 | #!/usr/bin/python
"""
Script to :
- create unique channels for given users
- Push Content to the same for each user
"""
import os
import xmlrpclib
# Setup
SATELLITE_HOST = "test10-64.rhndev.redhat.com"
SATELLITE_URL = "http://%s/rpc/api" % SATELLITE_HOST
SATELLITE_LOGIN_HASH ={'prad03':'redhat', 'prad02' : 'redhat'}
SUFFIX_HASH = {'prad03' : '03', 'prad02' : '02'}
CHANNEL_INFO = {'label' : 'channel-',
'name' : 'channel-',
'summary' : 'dummy channel',
'archLabel' : 'channel-ia32',
'parentLabel' : ''}
PKG_CONTENT_DIR = '/tmp/upload/'
client = xmlrpclib.Server(SATELLITE_URL, verbose=0)
def getKeys(users):
"""
Generate session key for each user
"""
keylist = {}
for login,password in users.items():
sessionkey = client.auth.login(login, password)
keylist[login] = sessionkey
return keylist
def createChannels(keylist, info):
"""
Create unique channels per user
"""
channel_list = {}
for login,key in keylist.items():
# create channel under each org
# Channel label,name should be unique
label = info['label'] + SUFFIX_HASH[login]
name = info['name'] + SUFFIX_HASH[login]
try:
print "Creating Channel: ",label
client.channel.software.create(key, label, name, \
info['summary'], info['archLabel'], \
info['parentLabel'])
except xmlrpclib.Fault, e:
print e
channel_list[login] = label
return channel_list
def pushContent(users, channels):
"""
Invoke rhnpush to push packages to channels
"""
for login,password in users.items():
print "Pushing Content to %s" % channels[login]
push_cmd = 'rhnpush --server=%s/APP --username=%s --password=%s \
--dir=%s --channel=%s -vvvv --tolerant --nosig' % \
(SATELLITE_HOST, login, password, PKG_CONTENT_DIR, \
channels[login])
os.system(push_cmd)
def main():
# Create Session keys
keys = getKeys(SATELLITE_LOGIN_HASH)
# Create channels
channel_list = createChannels(keys, CHANNEL_INFO)
# push content to channels
pushContent(SATELLITE_LOGIN_HASH, channel_list)
if __name__ == '__main__':
main()
| gpl-2.0 |
moreati/django | django/utils/jslex.py | 335 | 7778 | """JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
| bsd-3-clause |
puremourning/YouCompleteMe | python/ycm/client/completer_available_request.py | 7 | 1464 | # Copyright (C) 2013 Google Inc.
#
# This file is part of YouCompleteMe.
#
# YouCompleteMe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# YouCompleteMe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>.
from ycm.client.base_request import BaseRequest, BuildRequestData
class CompleterAvailableRequest( BaseRequest ):
def __init__( self, filetypes ):
super( CompleterAvailableRequest, self ).__init__()
self.filetypes = filetypes
self._response = None
def Start( self ):
request_data = BuildRequestData()
request_data.update( { 'filetypes': self.filetypes } )
self._response = self.PostDataToHandler( request_data,
'semantic_completion_available' )
def Response( self ):
return self._response
def SendCompleterAvailableRequest( filetypes ):
request = CompleterAvailableRequest( filetypes )
# This is a blocking call.
request.Start()
return request.Response()
| gpl-3.0 |
rizar/attention-lvcsr | libs/blocks/tests/monitoring/test_monitored_quantity.py | 2 | 1638 | import numpy
import theano
from fuel.datasets import IterableDataset
from blocks.monitoring.evaluators import DatasetEvaluator
from blocks.monitoring.aggregation import MonitoredQuantity
from blocks.bricks.cost import CategoricalCrossEntropy
class CrossEntropy(MonitoredQuantity):
def __init__(self, **kwargs):
super(CrossEntropy, self).__init__(**kwargs)
def initialize(self):
self.total_cross_entropy, self.examples_seen = 0.0, 0
def accumulate(self, target, predicted):
import numpy
self.total_cross_entropy += -(target * numpy.log(predicted)).sum()
self.examples_seen += 1
def readout(self):
res = self.total_cross_entropy / self.examples_seen
return res
def test_dataset_evaluators():
X = theano.tensor.vector('X')
Y = theano.tensor.vector('Y')
data = [numpy.arange(1, 7, dtype=theano.config.floatX).reshape(3, 2),
numpy.arange(11, 17, dtype=theano.config.floatX).reshape(3, 2)]
data_stream = IterableDataset(dict(X=data[0],
Y=data[1])).get_example_stream()
validator = DatasetEvaluator([
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy0"),
# to test two same quantities and make sure that state will be reset
CrossEntropy(requires=[X, Y],
name="monitored_cross_entropy1"),
CategoricalCrossEntropy().apply(X, Y), ])
values = validator.evaluate(data_stream)
numpy.testing.assert_allclose(
values['monitored_cross_entropy1'],
values['categoricalcrossentropy_apply_cost'])
| mit |
CatsAndDogsbvba/odoo | openerp/report/render/html2html/html2html.py | 443 | 4238 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.report.render.rml2pdf import utils
import copy
import base64
import cStringIO
import re
from reportlab.lib.utils import ImageReader
_regex = re.compile('\[\[(.+?)\]\]')
utils._regex = re.compile('\[\[\s*(.+?)\s*\]\]',re.DOTALL)
class html2html(object):
def __init__(self, html, localcontext):
self.localcontext = localcontext
self.etree = html
self._node = None
def render(self):
def process_text(node,new_node):
if new_node.tag in ['story','tr','section']:
new_node.attrib.clear()
for child in utils._child_get(node, self):
new_child = copy.deepcopy(child)
new_node.append(new_child)
if len(child):
for n in new_child:
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
new_child.remove(n)
process_text(child, new_child)
else:
if new_child.tag=='img' and new_child.get('name'):
if _regex.findall(new_child.get('name')) :
src = utils._process_text(self, new_child.get('name'))
if src :
new_child.set('src','data:image/gif;base64,%s'%src)
output = cStringIO.StringIO(base64.decodestring(src))
img = ImageReader(output)
(width,height) = img.getSize()
if not new_child.get('width'):
new_child.set('width',str(width))
if not new_child.get('height') :
new_child.set('height',str(height))
else :
new_child.getparent().remove(new_child)
new_child.text = utils._process_text(self, child.text)
new_child.tail = utils._process_text(self, child.tail)
self._node = copy.deepcopy(self.etree)
for n in self._node:
self._node.remove(n)
process_text(self.etree, self._node)
return self._node
def url_modify(self,root):
for n in root:
if (n.text.find('<a ')>=0 or n.text.find('<a')>=0) and n.text.find('href')>=0 and n.text.find('style')<=0 :
node = (n.tag=='span' and n.getparent().tag=='u') and n.getparent().getparent() or ((n.tag=='span') and n.getparent()) or n
style = node.get('color') and "style='color:%s; text-decoration: none;'"%node.get('color') or ''
if n.text.find('<a')>=0:
t = '<a '
else :
t = '<a '
href = n.text.split(t)[-1]
n.text = ' '.join([t,style,href])
self.url_modify(n)
return root
def parseString(node, localcontext = {}):
r = html2html(node, localcontext)
root = r.render()
root = r.url_modify(root)
return root
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
cnrat/fail2ban | fail2ban/tests/action_d/test_badips.py | 19 | 2735 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
# vi: set ft=python sts=4 ts=4 sw=4 noet :
# This file is part of Fail2Ban.
#
# Fail2Ban is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Fail2Ban is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Fail2Ban; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import os
import unittest
import sys
from ..dummyjail import DummyJail
from ..utils import CONFIG_DIR
if sys.version_info >= (2,7):
class BadIPsActionTest(unittest.TestCase):
def setUp(self):
"""Call before every test case."""
self.jail = DummyJail()
self.jail.actions.add("test")
pythonModule = os.path.join(CONFIG_DIR, "action.d", "badips.py")
self.jail.actions.add("badips", pythonModule, initOpts={
'category': "ssh",
'banaction': "test",
})
self.action = self.jail.actions["badips"]
def tearDown(self):
"""Call after every test case."""
# Must cancel timer!
if self.action._timer:
self.action._timer.cancel()
def testCategory(self):
categories = self.action.getCategories()
self.assertTrue("ssh" in categories)
self.assertTrue(len(categories) >= 10)
self.assertRaises(
ValueError, setattr, self.action, "category",
"invalid-category")
# Not valid for reporting category...
self.assertRaises(
ValueError, setattr, self.action, "category", "mail")
# but valid for blacklisting.
self.action.bancategory = "mail"
def testScore(self):
self.assertRaises(ValueError, setattr, self.action, "score", -5)
self.action.score = 5
self.action.score = "5"
def testBanaction(self):
self.assertRaises(
ValueError, setattr, self.action, "banaction",
"invalid-action")
self.action.banaction = "test"
def testUpdateperiod(self):
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", -50)
self.assertRaises(
ValueError, setattr, self.action, "updateperiod", 0)
self.action.updateperiod = 900
self.action.updateperiod = "900"
def testStart(self):
self.action.start()
self.assertTrue(len(self.action._bannedips) > 10)
def testStop(self):
self.testStart()
self.action.stop()
self.assertTrue(len(self.action._bannedips) == 0)
| gpl-2.0 |
nemesisdesign/django | tests/null_queries/tests.py | 55 | 2973 | from __future__ import unicode_literals
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Choice, Inner, OuterA, OuterB, Poll
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact and __iexact
queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# The same behavior for iexact query.
self.assertQuerysetEqual(Choice.objects.filter(choice__iexact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
with self.assertRaises(FieldError):
Choice.objects.filter(foo__exact=None)
# Can't use None on anything other than __exact and __iexact
with self.assertRaises(ValueError):
Choice.objects.filter(id__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '<QuerySet []>')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__third__data=None),
['<OuterA: OuterA object>']
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__third=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause |
renesugar/arrow | python/pyarrow/tests/test_jvm.py | 5 | 13848 | # -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
import os
import pyarrow as pa
import pyarrow.jvm as pa_jvm
import pytest
import six
import sys
import xml.etree.ElementTree as ET
jpype = pytest.importorskip("jpype")
@pytest.fixture(scope="session")
def root_allocator():
# This test requires Arrow Java to be built in the same source tree
pom_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'pom.xml')
tree = ET.parse(pom_path)
version = tree.getroot().find(
'POM:version',
namespaces={
'POM': 'http://maven.apache.org/POM/4.0.0'
}).text
jar_path = os.path.join(
os.path.dirname(__file__), '..', '..', '..',
'java', 'tools', 'target',
'arrow-tools-{}-jar-with-dependencies.jar'.format(version))
jar_path = os.getenv("ARROW_TOOLS_JAR", jar_path)
jpype.startJVM(jpype.getDefaultJVMPath(), "-Djava.class.path=" + jar_path)
return jpype.JPackage("org").apache.arrow.memory.RootAllocator(sys.maxsize)
def test_jvm_buffer(root_allocator):
# Create a buffer
jvm_buffer = root_allocator.buffer(8)
for i in range(8):
jvm_buffer.setByte(i, 8 - i)
# Convert to Python
buf = pa_jvm.jvm_buffer(jvm_buffer)
# Check its content
assert buf.to_pybytes() == b'\x08\x07\x06\x05\x04\x03\x02\x01'
def _jvm_field(jvm_spec):
om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
pojo_Field = jpype.JClass('org.apache.arrow.vector.types.pojo.Field')
return om.readValue(jvm_spec, pojo_Field)
def _jvm_schema(jvm_spec, metadata=None):
field = _jvm_field(jvm_spec)
schema_cls = jpype.JClass('org.apache.arrow.vector.types.pojo.Schema')
fields = jpype.JClass('java.util.ArrayList')()
fields.add(field)
if metadata:
dct = jpype.JClass('java.util.HashMap')()
for k, v in six.iteritems(metadata):
dct.put(k, v)
return schema_cls(fields, dct)
else:
return schema_cls(fields)
# In the following, we use the JSON serialization of the Field objects in Java.
# This ensures that we neither rely on the exact mechanics on how to construct
# them using Java code as well as enables us to define them as parameters
# without to invoke the JVM.
#
# The specifications were created using:
#
# om = jpype.JClass('com.fasterxml.jackson.databind.ObjectMapper')()
# field = … # Code to instantiate the field
# jvm_spec = om.writeValueAsString(field)
@pytest.mark.parametrize('pa_type,jvm_spec', [
(pa.null(), '{"name":"null"}'),
(pa.bool_(), '{"name":"bool"}'),
(pa.int8(), '{"name":"int","bitWidth":8,"isSigned":true}'),
(pa.int16(), '{"name":"int","bitWidth":16,"isSigned":true}'),
(pa.int32(), '{"name":"int","bitWidth":32,"isSigned":true}'),
(pa.int64(), '{"name":"int","bitWidth":64,"isSigned":true}'),
(pa.uint8(), '{"name":"int","bitWidth":8,"isSigned":false}'),
(pa.uint16(), '{"name":"int","bitWidth":16,"isSigned":false}'),
(pa.uint32(), '{"name":"int","bitWidth":32,"isSigned":false}'),
(pa.uint64(), '{"name":"int","bitWidth":64,"isSigned":false}'),
(pa.float16(), '{"name":"floatingpoint","precision":"HALF"}'),
(pa.float32(), '{"name":"floatingpoint","precision":"SINGLE"}'),
(pa.float64(), '{"name":"floatingpoint","precision":"DOUBLE"}'),
(pa.time32('s'), '{"name":"time","unit":"SECOND","bitWidth":32}'),
(pa.time32('ms'), '{"name":"time","unit":"MILLISECOND","bitWidth":32}'),
(pa.time64('us'), '{"name":"time","unit":"MICROSECOND","bitWidth":64}'),
(pa.time64('ns'), '{"name":"time","unit":"NANOSECOND","bitWidth":64}'),
(pa.timestamp('s'), '{"name":"timestamp","unit":"SECOND",'
'"timezone":null}'),
(pa.timestamp('ms'), '{"name":"timestamp","unit":"MILLISECOND",'
'"timezone":null}'),
(pa.timestamp('us'), '{"name":"timestamp","unit":"MICROSECOND",'
'"timezone":null}'),
(pa.timestamp('ns'), '{"name":"timestamp","unit":"NANOSECOND",'
'"timezone":null}'),
(pa.timestamp('ns', tz='UTC'), '{"name":"timestamp","unit":"NANOSECOND"'
',"timezone":"UTC"}'),
(pa.timestamp('ns', tz='Europe/Paris'), '{"name":"timestamp",'
'"unit":"NANOSECOND","timezone":"Europe/Paris"}'),
(pa.date32(), '{"name":"date","unit":"DAY"}'),
(pa.date64(), '{"name":"date","unit":"MILLISECOND"}'),
(pa.decimal128(19, 4), '{"name":"decimal","precision":19,"scale":4}'),
(pa.string(), '{"name":"utf8"}'),
(pa.binary(), '{"name":"binary"}'),
(pa.binary(10), '{"name":"fixedsizebinary","byteWidth":10}'),
# TODO(ARROW-2609): complex types that have children
# pa.list_(pa.int32()),
# pa.struct([pa.field('a', pa.int32()),
# pa.field('b', pa.int8()),
# pa.field('c', pa.string())]),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_DENSE),
# pa.union([pa.field('a', pa.binary(10)),
# pa.field('b', pa.string())], mode=pa.lib.UnionMode_SPARSE),
# TODO: DictionaryType requires a vector in the type
# pa.dictionary(pa.int32(), pa.array(['a', 'b', 'c'])),
])
@pytest.mark.parametrize('nullable', [True, False])
def test_jvm_types(root_allocator, pa_type, jvm_spec, nullable):
spec = {
'name': 'field_name',
'nullable': nullable,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
result = pa_jvm.field(jvm_field)
expected_field = pa.field('field_name', pa_type, nullable=nullable)
assert result == expected_field
jvm_schema = _jvm_schema(json.dumps(spec))
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field])
# Schema with custom metadata
jvm_schema = _jvm_schema(json.dumps(spec), {'meta': 'data'})
result = pa_jvm.schema(jvm_schema)
assert result == pa.schema([expected_field], {'meta': 'data'})
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type', [
(pa.bool_(), [True, False, True, True], 'BitVector'),
(pa.uint8(), list(range(128)), 'UInt1Vector'),
(pa.uint16(), list(range(128)), 'UInt2Vector'),
(pa.int32(), list(range(128)), 'IntVector'),
(pa.int64(), list(range(128)), 'BigIntVector'),
(pa.float32(), list(range(128)), 'Float4Vector'),
(pa.float64(), list(range(128)), 'Float8Vector'),
(pa.timestamp('s'), list(range(128)), 'TimeStampSecVector'),
(pa.timestamp('ms'), list(range(128)), 'TimeStampMilliVector'),
(pa.timestamp('us'), list(range(128)), 'TimeStampMicroVector'),
(pa.timestamp('ns'), list(range(128)), 'TimeStampNanoVector'),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(pa.date32(), list(range(128)), 'DateDayVector'),
(pa.date64(), list(range(128)), 'DateMilliVector'),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_array(root_allocator, pa_type, py_data, jvm_type):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
py_array = pa.array(py_data, type=pa_type)
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
# These test parameters mostly use an integer range as an input as this is
# often the only type that is understood by both Python and Java
# implementations of Arrow.
@pytest.mark.parametrize('pa_type,py_data,jvm_type,jvm_spec', [
# TODO: null
(pa.bool_(), [True, False, True, True], 'BitVector', '{"name":"bool"}'),
(
pa.uint8(),
list(range(128)),
'UInt1Vector',
'{"name":"int","bitWidth":8,"isSigned":false}'
),
(
pa.uint16(),
list(range(128)),
'UInt2Vector',
'{"name":"int","bitWidth":16,"isSigned":false}'
),
(
pa.uint32(),
list(range(128)),
'UInt4Vector',
'{"name":"int","bitWidth":32,"isSigned":false}'
),
(
pa.uint64(),
list(range(128)),
'UInt8Vector',
'{"name":"int","bitWidth":64,"isSigned":false}'
),
(
pa.int8(),
list(range(128)),
'TinyIntVector',
'{"name":"int","bitWidth":8,"isSigned":true}'
),
(
pa.int16(),
list(range(128)),
'SmallIntVector',
'{"name":"int","bitWidth":16,"isSigned":true}'
),
(
pa.int32(),
list(range(128)),
'IntVector',
'{"name":"int","bitWidth":32,"isSigned":true}'
),
(
pa.int64(),
list(range(128)),
'BigIntVector',
'{"name":"int","bitWidth":64,"isSigned":true}'
),
# TODO: float16
(
pa.float32(),
list(range(128)),
'Float4Vector',
'{"name":"floatingpoint","precision":"SINGLE"}'
),
(
pa.float64(),
list(range(128)),
'Float8Vector',
'{"name":"floatingpoint","precision":"DOUBLE"}'
),
(
pa.timestamp('s'),
list(range(128)),
'TimeStampSecVector',
'{"name":"timestamp","unit":"SECOND","timezone":null}'
),
(
pa.timestamp('ms'),
list(range(128)),
'TimeStampMilliVector',
'{"name":"timestamp","unit":"MILLISECOND","timezone":null}'
),
(
pa.timestamp('us'),
list(range(128)),
'TimeStampMicroVector',
'{"name":"timestamp","unit":"MICROSECOND","timezone":null}'
),
(
pa.timestamp('ns'),
list(range(128)),
'TimeStampNanoVector',
'{"name":"timestamp","unit":"NANOSECOND","timezone":null}'
),
# TODO(ARROW-2605): These types miss a conversion from pure Python objects
# * pa.time32('s')
# * pa.time32('ms')
# * pa.time64('us')
# * pa.time64('ns')
(
pa.date32(),
list(range(128)),
'DateDayVector',
'{"name":"date","unit":"DAY"}'
),
(
pa.date64(),
list(range(128)),
'DateMilliVector',
'{"name":"date","unit":"MILLISECOND"}'
),
# TODO(ARROW-2606): pa.decimal128(19, 4)
])
def test_jvm_record_batch(root_allocator, pa_type, py_data, jvm_type,
jvm_spec):
# Create vector
cls = "org.apache.arrow.vector.{}".format(jvm_type)
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew(len(py_data))
for i, val in enumerate(py_data):
jvm_vector.setSafe(i, val)
jvm_vector.setValueCount(len(py_data))
# Create field
spec = {
'name': 'field_name',
'nullable': False,
'type': json.loads(jvm_spec),
# TODO: This needs to be set for complex types
'children': []
}
jvm_field = _jvm_field(json.dumps(spec))
# Create VectorSchemaRoot
jvm_fields = jpype.JClass('java.util.ArrayList')()
jvm_fields.add(jvm_field)
jvm_vectors = jpype.JClass('java.util.ArrayList')()
jvm_vectors.add(jvm_vector)
jvm_vsr = jpype.JClass('org.apache.arrow.vector.VectorSchemaRoot')
jvm_vsr = jvm_vsr(jvm_fields, jvm_vectors, len(py_data))
py_record_batch = pa.RecordBatch.from_arrays(
[pa.array(py_data, type=pa_type)],
['col']
)
jvm_record_batch = pa_jvm.record_batch(jvm_vsr)
assert py_record_batch.equals(jvm_record_batch)
def _string_to_varchar_holder(ra, string):
nvch_cls = "org.apache.arrow.vector.holders.NullableVarCharHolder"
holder = jpype.JClass(nvch_cls)()
if string is None:
holder.isSet = 0
else:
holder.isSet = 1
value = jpype.JClass("java.lang.String")("string")
std_charsets = jpype.JClass("java.nio.charset.StandardCharsets")
bytes_ = value.getBytes(std_charsets.UTF_8)
holder.buffer = ra.buffer(len(bytes_))
holder.buffer.setBytes(0, bytes_, 0, len(bytes_))
holder.start = 0
holder.end = len(bytes_)
return holder
# TODO(ARROW-2607)
@pytest.mark.xfail(reason="from_buffers is only supported for "
"primitive arrays yet")
def test_jvm_string_array(root_allocator):
data = [u"string", None, u"töst"]
cls = "org.apache.arrow.vector.VarCharVector"
jvm_vector = jpype.JClass(cls)("vector", root_allocator)
jvm_vector.allocateNew()
for i, string in enumerate(data):
holder = _string_to_varchar_holder(root_allocator, "string")
jvm_vector.setSafe(i, holder)
jvm_vector.setValueCount(i + 1)
py_array = pa.array(data, type=pa.string())
jvm_array = pa_jvm.array(jvm_vector)
assert py_array.equals(jvm_array)
| apache-2.0 |
ol-loginov/intellij-community | python/lib/Lib/distutils/command/install_egg_info.py | 438 | 2587 | """distutils.command.install_egg_info
Implements the Distutils 'install_egg_info' command, for installing
a package's PKG-INFO metadata."""
from distutils.cmd import Command
from distutils import log, dir_util
import os, sys, re
class install_egg_info(Command):
"""Install an .egg-info file for the package"""
description = "Install package's PKG-INFO metadata as an .egg-info file"
user_options = [
('install-dir=', 'd', "directory to install to"),
]
def initialize_options(self):
self.install_dir = None
def finalize_options(self):
self.set_undefined_options('install_lib',('install_dir','install_dir'))
basename = "%s-%s-py%s.egg-info" % (
to_filename(safe_name(self.distribution.get_name())),
to_filename(safe_version(self.distribution.get_version())),
sys.version[:3]
)
self.target = os.path.join(self.install_dir, basename)
self.outputs = [self.target]
def run(self):
target = self.target
if os.path.isdir(target) and not os.path.islink(target):
dir_util.remove_tree(target, dry_run=self.dry_run)
elif os.path.exists(target):
self.execute(os.unlink,(self.target,),"Removing "+target)
elif not os.path.isdir(self.install_dir):
self.execute(os.makedirs, (self.install_dir,),
"Creating "+self.install_dir)
log.info("Writing %s", target)
if not self.dry_run:
f = open(target, 'w')
self.distribution.metadata.write_pkg_file(f)
f.close()
def get_outputs(self):
return self.outputs
# The following routines are taken from setuptools' pkg_resources module and
# can be replaced by importing them from pkg_resources once it is included
# in the stdlib.
def safe_name(name):
"""Convert an arbitrary string to a standard distribution name
Any runs of non-alphanumeric/. characters are replaced with a single '-'.
"""
return re.sub('[^A-Za-z0-9.]+', '-', name)
def safe_version(version):
"""Convert an arbitrary string to a standard version string
Spaces become dots, and all other non-alphanumeric characters become
dashes, with runs of multiple dashes condensed to a single dash.
"""
version = version.replace(' ','.')
return re.sub('[^A-Za-z0-9.]+', '-', version)
def to_filename(name):
"""Convert a project or version name to its filename-escaped form
Any '-' characters are currently replaced with '_'.
"""
return name.replace('-','_')
| apache-2.0 |
pycroscopy/pycroscopy | tests/io/test_hdf_writer.py | 1 | 36224 | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 3 15:07:16 2017
@author: Suhas Somnath
"""
from __future__ import division, print_function, unicode_literals, absolute_import
import unittest
import os
import h5py
import numpy as np
import sys
sys.path.append("../../../pycroscopy/")
from pycroscopy.io.virtual_data import VirtualGroup, VirtualDataset
from pycroscopy.io.hdf_writer import HDFwriter
from pyUSID.io.hdf_utils import get_attr, get_h5_obj_refs # Until an elegant solution presents itself
class TestHDFWriter(unittest.TestCase):
@staticmethod
def __delete_existing_file(file_path):
if os.path.exists(file_path):
os.remove(file_path)
def test_init_invalid_input(self):
with self.assertRaises(TypeError):
_ = HDFwriter(4)
def test_init_path_non_existant_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_path_existing_file_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file
writer = HDFwriter(file_path)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_r_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r')
# hdf handle but of mode r
with self.assertRaises(TypeError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_init_h5_handle_r_plus_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='r+')
# open h5 file handle or mode r+
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_w_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
h5_f = h5py.File(file_path, mode='w')
# open h5 file handle or mode w
writer = HDFwriter(h5_f)
self.assertIsInstance(writer, HDFwriter, "writer should be an HDFwriter")
writer.close()
os.remove(file_path)
def test_init_h5_handle_closed(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
h5_f = h5py.File(file_path)
h5_f.close()
# Existing h5 file but closed
with self.assertRaises(ValueError):
_ = HDFwriter(h5_f)
os.remove(file_path)
def test_simple_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dtype = np.uint16
dset_name = 'test'
data = np.random.randint(0, high=15, size=5, dtype=dtype)
microdset = VirtualDataset(dset_name, data)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
os.remove(file_path)
def test_simple_dset_write_success_more_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = data.dtype
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertTrue(np.allclose(h5_d[()], data))
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
os.remove(file_path)
def test_simple_dset_write_success_more_options_03(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
data = np.random.rand(16, 1024)
dtype = np.float16
compression = 'gzip'
chunking=(1, 1024)
microdset = VirtualDataset(dset_name, data, dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_simple_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertTrue(np.all(h5_d[()] - data < 1E-3))
os.remove(file_path)
def test_empty_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
microdset = VirtualDataset(dset_name, None, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
# dtype is assigned automatically by h5py. Not to be tested here
os.remove(file_path)
def test_empty_dset_write_success_w_options_02(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (16, 1024)
chunking = (1, 1024)
compression = 'gzip'
dtype = np.float16
microdset = VirtualDataset(dset_name, None, maxshape=maxshape,
dtype=dtype, compression=compression, chunking=chunking)
writer = HDFwriter(h5_f)
h5_d = writer._create_empty_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.dtype, dtype)
self.assertEqual(h5_d.compression, compression)
self.assertEqual(h5_d.chunks, chunking)
self.assertEqual(h5_d.shape, maxshape)
self.assertEqual(h5_d.maxshape, maxshape)
os.remove(file_path)
def test_expandable_dset_write_success_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
dset_name = 'test'
maxshape = (None, 1024)
data = np.random.rand(1, 1024)
microdset = VirtualDataset(dset_name, data, maxshape=maxshape)
writer = HDFwriter(h5_f)
h5_d = writer._create_resizeable_dset(h5_f, microdset)
self.assertIsInstance(h5_d, h5py.Dataset)
self.assertEqual(h5_d.parent, h5_f)
self.assertEqual(h5_d.name, '/' + dset_name)
self.assertEqual(h5_d.shape, data.shape)
self.assertEqual(h5_d.maxshape, maxshape)
self.assertTrue(np.allclose(h5_d[()], data))
# Now test to make sure that the dataset can be expanded:
# TODO: add this to the example!
expansion_axis = 0
h5_d.resize(h5_d.shape[expansion_axis] + 1, axis=expansion_axis)
self.assertEqual(h5_d.shape, (data.shape[0]+1, data.shape[1]))
self.assertEqual(h5_d.maxshape, maxshape)
# Finally try checking to see if this new data is also present in the file
new_data = np.random.rand(1024)
h5_d[1] = new_data
data = np.vstack((np.squeeze(data), new_data))
self.assertTrue(np.allclose(h5_d[()], data))
os.remove(file_path)
# TODO: will have to check to see if the parent is correctly declared for the group
def test_group_create_non_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name)
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_indexed_simple_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = 'test_'
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
h5_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_grp, h5py.Group)
self.assertEqual(h5_grp.parent, h5_f)
self.assertEqual(h5_grp.name, '/' + grp_name + '000')
# self.assertEqual(len(h5_grp.items), 0)
os.remove(file_path)
def test_group_create_root_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
grp_name = ''
micro_group = VirtualGroup(grp_name)
writer = HDFwriter(h5_f)
with self.assertRaises(ValueError):
_ = writer._create_group(h5_f, micro_group)
os.remove(file_path)
def test_group_create_indexed_nested_01(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
outer_grp_name = 'outer_'
micro_group = VirtualGroup(outer_grp_name)
writer = HDFwriter(h5_f)
h5_outer_grp = writer._create_group(h5_f, micro_group)
self.assertIsInstance(h5_outer_grp, h5py.Group)
self.assertEqual(h5_outer_grp.parent, h5_f)
self.assertEqual(h5_outer_grp.name, '/' + outer_grp_name + '000')
inner_grp_name = 'inner_'
micro_group = VirtualGroup(inner_grp_name)
h5_inner_grp = writer._create_group(h5_outer_grp, micro_group)
self.assertIsInstance(h5_inner_grp, h5py.Group)
self.assertEqual(h5_inner_grp.parent, h5_outer_grp)
self.assertEqual(h5_inner_grp.name, h5_outer_grp.name + '/' + inner_grp_name + '000')
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_multi_dim_data_2nd_dim(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 3)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(None), slice(0, None, 2)),
'odd_rows': (slice(None), slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:, 0:None:2], data[:, 1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_legal_reg_ref_one_dim_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_generate_and_write_reg_ref_legal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': ['row_1', 'row_2']}
if sys.version_info.major == 3:
with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, attrs.copy())
else:
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels']) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[0], data[1]]
written_data = [h5_dset[h5_dset.attrs['row_1']], h5_dset[h5_dset.attrs['row_2']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(np.squeeze(exp), np.squeeze(act)))
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(3, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
writer._write_dset_attributes(h5_dset, {'labels': ['row_1', 'row_2']})
self.assertEqual(len(h5_dset.attrs), 0)
h5_f.flush()
os.remove(file_path)
def test_generate_and_write_reg_ref_illegal(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(2, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
# with self.assertWarns(UserWarning):
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, {'labels': [1, np.arange(3)]})
os.remove(file_path)
def test_write_illegal_reg_ref_too_many_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), slice(None), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None), slice(None))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_illegal_reg_ref_too_few_slices(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2)),
'odd_rows': (slice(1, None, 2))}}
with self.assertRaises(ValueError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_reg_ref_slice_dim_larger_than_data(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, 15, 2), slice(None)),
'odd_rows': (slice(1, 15, 2), slice(None))}}
writer._write_dset_attributes(h5_dset, attrs.copy())
h5_f.flush()
# two atts point to region references. one for labels
self.assertEqual(len(h5_dset.attrs), 1 + len(attrs['labels']))
# check if the labels attribute was written:
self.assertTrue(np.all([x in list(attrs['labels'].keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_illegal_reg_ref_not_slice_objs(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'labels': {'even_rows': (slice(0, None, 2), 15),
'odd_rows': (slice(1, None, 2), 'hello')}}
with self.assertRaises(TypeError):
writer._write_dset_attributes(h5_dset, attrs.copy())
os.remove(file_path)
def test_write_simple_atts_reg_ref_to_dset(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
h5_dset = writer._create_simple_dset(h5_f, VirtualDataset('test', data))
self.assertIsInstance(h5_dset, h5py.Dataset)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
writer._write_dset_attributes(h5_dset, attrs.copy())
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_invalid_input(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(TypeError):
_ = writer.write(np.arange(5))
def test_write_dset_under_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data)
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_existing_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
h5_g = writer._create_group(h5_f, VirtualGroup('test_group'))
self.assertIsInstance(h5_g, h5py.Group)
data = np.random.rand(5, 7)
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
micro_dset = VirtualDataset('test', data, parent='/test_group')
micro_dset.attrs = attrs.copy()
[h5_dset] = writer.write(micro_dset)
self.assertIsInstance(h5_dset, h5py.Dataset)
self.assertEqual(h5_dset.parent, h5_g)
reg_ref = attrs.pop('labels')
self.assertEqual(len(h5_dset.attrs), len(attrs) + 1 + len(reg_ref))
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_dset, 'labels')]))
expected_data = [data[:None:2], data[1:None:2]]
written_data = [h5_dset[h5_dset.attrs['even_rows']], h5_dset[h5_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
os.remove(file_path)
def test_write_dset_under_invalid_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
with self.assertRaises(KeyError):
_ = writer.write(VirtualDataset('test', np.random.rand(5, 7), parent='/does_not_exist'))
os.remove(file_path)
def test_write_root(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[ret_val] = writer.write(micro_group)
self.assertIsInstance(ret_val, h5py.File)
self.assertEqual(h5_f, ret_val)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_f, key) == expected_val))
os.remove(file_path)
def test_write_single_group(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
micro_group = VirtualGroup('Test_')
micro_group.attrs = attrs
writer = HDFwriter(h5_f)
[h5_group] = writer.write(micro_group)
for key, expected_val in attrs.items():
self.assertTrue(np.all(get_attr(h5_group, key) == expected_val))
os.remove(file_path)
def test_group_indexing_sequential(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
writer = HDFwriter(h5_f)
micro_group_0 = VirtualGroup('Test_', attrs={'att_1': 'string_val', 'att_2': 1.2345})
[h5_group_0] = writer.write(micro_group_0)
_ = writer.write(VirtualGroup('blah'))
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
[h5_group_1] = writer.write(micro_group_1)
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_group_indexing_simultaneous(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
micro_group_0 = VirtualGroup('Test_', attrs = {'att_1': 'string_val', 'att_2': 1.2345})
micro_group_1 = VirtualGroup('Test_', attrs={'att_3': [1, 2, 3, 4], 'att_4': ['str_1', 'str_2', 'str_3']})
root_group = VirtualGroup('', children=[VirtualGroup('blah'), micro_group_0,
VirtualGroup('meh'), micro_group_1])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(root_group)
[h5_group_1] = get_h5_obj_refs(['Test_001'], h5_refs_list)
[h5_group_0] = get_h5_obj_refs(['Test_000'], h5_refs_list)
self.assertIsInstance(h5_group_0, h5py.Group)
self.assertEqual(h5_group_0.name, '/Test_000')
for key, expected_val in micro_group_0.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_0, key) == expected_val))
self.assertIsInstance(h5_group_1, h5py.Group)
self.assertEqual(h5_group_1.name, '/Test_001')
for key, expected_val in micro_group_1.attrs.items():
self.assertTrue(np.all(get_attr(h5_group_1, key) == expected_val))
os.remove(file_path)
def test_write_simple_tree(self):
file_path = 'test.h5'
self.__delete_existing_file(file_path)
with h5py.File(file_path) as h5_f:
inner_dset_data = np.random.rand(5, 7)
inner_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
inner_dset = VirtualDataset('inner_dset', inner_dset_data)
inner_dset.attrs = inner_dset_attrs.copy()
attrs_inner_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
inner_group = VirtualGroup('indexed_inner_group_')
inner_group.attrs = attrs_inner_grp
inner_group.add_children(inner_dset)
outer_dset_data = np.random.rand(5, 7)
outer_dset_attrs = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3'],
'labels': {'even_rows': (slice(0, None, 2), slice(None)),
'odd_rows': (slice(1, None, 2), slice(None))}
}
outer_dset = VirtualDataset('test', outer_dset_data, parent='/test_group')
outer_dset.attrs = outer_dset_attrs.copy()
attrs_outer_grp = {'att_1': 'string_val',
'att_2': 1.2345,
'att_3': [1, 2, 3, 4],
'att_4': ['str_1', 'str_2', 'str_3']}
outer_group = VirtualGroup('unindexed_outer_group')
outer_group.attrs = attrs_outer_grp
outer_group.add_children([inner_group, outer_dset])
writer = HDFwriter(h5_f)
h5_refs_list = writer.write(outer_group)
# I don't know of a more elegant way to do this:
[h5_outer_dset] = get_h5_obj_refs([outer_dset.name], h5_refs_list)
[h5_inner_dset] = get_h5_obj_refs([inner_dset.name], h5_refs_list)
[h5_outer_group] = get_h5_obj_refs([outer_group.name], h5_refs_list)
[h5_inner_group] = get_h5_obj_refs(['indexed_inner_group_000'], h5_refs_list)
self.assertIsInstance(h5_outer_dset, h5py.Dataset)
self.assertIsInstance(h5_inner_dset, h5py.Dataset)
self.assertIsInstance(h5_outer_group, h5py.Group)
self.assertIsInstance(h5_inner_group, h5py.Group)
# check assertions for the inner dataset first
self.assertEqual(h5_inner_dset.parent, h5_inner_group)
reg_ref = inner_dset_attrs.pop('labels')
self.assertEqual(len(h5_inner_dset.attrs), len(inner_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in inner_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_inner_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_inner_dset, 'labels')]))
expected_data = [inner_dset_data[:None:2], inner_dset_data[1:None:2]]
written_data = [h5_inner_dset[h5_inner_dset.attrs['even_rows']], h5_inner_dset[h5_inner_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# check assertions for the inner data group next:
self.assertEqual(h5_inner_group.parent, h5_outer_group)
for key, expected_val in attrs_inner_grp.items():
self.assertTrue(np.all(get_attr(h5_inner_group, key) == expected_val))
# check the outer dataset next:
self.assertEqual(h5_outer_dset.parent, h5_outer_group)
reg_ref = outer_dset_attrs.pop('labels')
self.assertEqual(len(h5_outer_dset.attrs), len(outer_dset_attrs) + 1 + len(reg_ref))
for key, expected_val in outer_dset_attrs.items():
self.assertTrue(np.all(get_attr(h5_outer_dset, key) == expected_val))
self.assertTrue(np.all([x in list(reg_ref.keys()) for x in get_attr(h5_outer_dset, 'labels')]))
expected_data = [outer_dset_data[:None:2], outer_dset_data[1:None:2]]
written_data = [h5_outer_dset[h5_outer_dset.attrs['even_rows']],
h5_outer_dset[h5_outer_dset.attrs['odd_rows']]]
for exp, act in zip(expected_data, written_data):
self.assertTrue(np.allclose(exp, act))
# Finally check the outer group:
self.assertEqual(h5_outer_group.parent, h5_f)
for key, expected_val in attrs_outer_grp.items():
self.assertTrue(np.all(get_attr(h5_outer_group, key) == expected_val))
os.remove(file_path)
if __name__ == '__main__':
unittest.main()
| mit |
gmalmquist/pants | src/python/pants/backend/python/interpreter_cache.py | 5 | 8302 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pex.interpreter import PythonIdentity, PythonInterpreter
from pex.package import EggPackage, Package, SourcePackage
from pex.resolver import resolve
from pants.util.dirutil import safe_concurrent_creation, safe_mkdir
# TODO(wickman) Create a safer version of this and add to twitter.common.dirutil
def _safe_link(src, dst):
try:
os.unlink(dst)
except OSError:
pass
os.symlink(src, dst)
class PythonInterpreterCache(object):
@staticmethod
def _matches(interpreter, filters):
return any(interpreter.identity.matches(filt) for filt in filters)
@classmethod
def _matching(cls, interpreters, filters):
for interpreter in interpreters:
if cls._matches(interpreter, filters):
yield interpreter
@classmethod
def select_interpreter(cls, compatibilities, allow_multiple=False):
"""Given a set of interpreters, either return them all if ``allow_multiple`` is ``True``;
otherwise, return the lowest compatible interpreter.
"""
if allow_multiple:
return compatibilities
return [min(compatibilities)] if compatibilities else []
def __init__(self, python_setup, python_repos, logger=None):
self._python_setup = python_setup
self._python_repos = python_repos
self._cache_dir = python_setup.interpreter_cache_dir
safe_mkdir(self._cache_dir)
self._interpreters = set()
self._logger = logger or (lambda msg: True)
self._default_filters = (python_setup.interpreter_requirement or b'',)
@property
def interpreters(self):
"""Returns the set of cached interpreters."""
return self._interpreters
def _interpreter_from_path(self, path, filters):
interpreter_dir = os.path.basename(path)
identity = PythonIdentity.from_path(interpreter_dir)
try:
executable = os.readlink(os.path.join(path, 'python'))
except OSError:
return None
interpreter = PythonInterpreter(executable, identity)
if self._matches(interpreter, filters):
return self._resolve(interpreter)
return None
def _setup_interpreter(self, interpreter, cache_target_path):
with safe_concurrent_creation(cache_target_path) as safe_path:
os.mkdir(safe_path) # Parent will already have been created by safe_concurrent_creation.
os.symlink(interpreter.binary, os.path.join(safe_path, 'python'))
return self._resolve(interpreter, safe_path)
def _setup_cached(self, filters):
"""Find all currently-cached interpreters."""
for interpreter_dir in os.listdir(self._cache_dir):
path = os.path.join(self._cache_dir, interpreter_dir)
pi = self._interpreter_from_path(path, filters)
if pi:
self._logger('Detected interpreter {}: {}'.format(pi.binary, str(pi.identity)))
self._interpreters.add(pi)
def _setup_paths(self, paths, filters):
"""Find interpreters under paths, and cache them."""
for interpreter in self._matching(PythonInterpreter.all(paths), filters):
identity_str = str(interpreter.identity)
cache_path = os.path.join(self._cache_dir, identity_str)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
self._setup_interpreter(interpreter, cache_path)
pi = self._interpreter_from_path(cache_path, filters)
if pi is None:
continue
self._interpreters.add(pi)
def matched_interpreters(self, filters):
"""Given some filters, yield any interpreter that matches at least one of them.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
for match in self._matching(self._interpreters, filters):
yield match
def setup(self, paths=(), force=False, filters=(b'',)):
"""Sets up a cache of python interpreters.
NB: Must be called prior to accessing the ``interpreters`` property or the ``matches`` method.
:param paths: The paths to search for a python interpreter; the system ``PATH`` by default.
:param bool force: When ``True`` the interpreter cache is always re-built.
:param filters: A sequence of strings that constrain the interpreter compatibility for this
cache, using the Requirement-style format, e.g. ``'CPython>=3', or just ['>=2.7','<3']``
for requirements agnostic to interpreter class.
"""
filters = self._default_filters if not any(filters) else filters
setup_paths = paths or os.getenv('PATH').split(os.pathsep)
self._setup_cached(filters)
def unsatisfied_filters():
return filter(lambda filt: len(list(self._matching(self._interpreters, [filt]))) == 0, filters)
if force or len(unsatisfied_filters()) > 0:
self._setup_paths(setup_paths, filters)
for filt in unsatisfied_filters():
self._logger('No valid interpreters found for {}!'.format(filt))
matches = list(self.matched_interpreters(filters))
if len(matches) == 0:
self._logger('Found no valid interpreters!')
return matches
def _resolve(self, interpreter, interpreter_dir=None):
"""Resolve and cache an interpreter with a setuptools and wheel capability."""
interpreter = self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.setuptools_requirement())
if interpreter:
return self._resolve_interpreter(interpreter, interpreter_dir,
self._python_setup.wheel_requirement())
def _resolve_interpreter(self, interpreter, interpreter_dir, requirement):
"""Given a :class:`PythonInterpreter` and a requirement, return an interpreter with the
capability of resolving that requirement or ``None`` if it's not possible to install a
suitable requirement.
If interpreter_dir is unspecified, operates on the default location.
"""
if interpreter.satisfies([requirement]):
return interpreter
if not interpreter_dir:
interpreter_dir = os.path.join(self._cache_dir, str(interpreter.identity))
target_link = os.path.join(interpreter_dir, requirement.key)
bdist = self._resolve_and_link(interpreter, requirement, target_link)
if bdist:
return interpreter.with_extra(bdist.name, bdist.raw_version, bdist.path)
else:
self._logger('Failed to resolve requirement {} for {}'.format(requirement, interpreter))
def _resolve_and_link(self, interpreter, requirement, target_link):
# Short-circuit if there is a local copy.
if os.path.exists(target_link) and os.path.exists(os.path.realpath(target_link)):
bdist = Package.from_href(os.path.realpath(target_link))
if bdist.satisfies(requirement):
return bdist
# Since we're resolving to bootstrap a bare interpreter, we won't have wheel available.
# Explicitly set the precedence to avoid resolution of wheels or distillation of sdists into
# wheels.
precedence = (EggPackage, SourcePackage)
distributions = resolve(requirements=[requirement],
fetchers=self._python_repos.get_fetchers(),
interpreter=interpreter,
context=self._python_repos.get_network_context(),
precedence=precedence)
if not distributions:
return None
assert len(distributions) == 1, ('Expected exactly 1 distribution to be resolved for {}, '
'found:\n\t{}'.format(requirement,
'\n\t'.join(map(str, distributions))))
dist_location = distributions[0].location
target_location = os.path.join(os.path.dirname(target_link), os.path.basename(dist_location))
shutil.move(dist_location, target_location)
_safe_link(target_location, target_link)
self._logger(' installed {}'.format(target_location))
return Package.from_href(target_location)
| apache-2.0 |
Vauxoo/maintainer-tools | tools/set_repo_labels.py | 13 | 2539 | # -*- coding: utf-8 -*-
"""
Create and modify labels on github to have same labels and same color
on all repo
"""
from .github_login import login
REPO_TO_IGNORE = [
'odoo-community.org',
'community-data-files',
'contribute-md-template',
'website',
]
# here is the list of labels we need in each repo
all_labels = {
'7.0': '000000',
'8.0': '000000',
'bug': 'fc2929',
'duplicate': 'cccccc',
'enhancement': '84b6eb',
'help wanted': '159818',
'invalid': 'e6e6e6',
'question': 'cc317c',
'needs fixing': 'eb6420',
'needs review': 'fbca04',
'work in progress': '0052cc',
'wontfix': 'ffffff',
}
def main():
gh = login()
all_repos = gh.iter_user_repos('OCA')
for repo in all_repos:
if repo.name in REPO_TO_IGNORE:
continue
labels = repo.iter_labels()
existing_labels = dict((l.name, l.color) for l in labels)
to_create = []
to_change_color = []
for needed_label in all_labels:
if needed_label not in existing_labels.keys():
to_create.append(needed_label)
elif existing_labels[needed_label] != all_labels[needed_label]:
to_change_color.append(needed_label)
extra_labels = [l for l in existing_labels if l not in all_labels]
if to_create:
print ('Repo %s - Create %s missing labels'
% (repo.name, len(to_create)))
for label_name in to_create:
success = repo.create_label(label_name, all_labels[label_name])
if not success:
print ("Failed to create a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if to_change_color:
print ('Repo %s - Update %s labels with wrong color'
% (repo.name, len(to_change_color)))
for label_name in to_change_color:
success = repo.update_label(label_name, all_labels[label_name])
if not success:
print ("Failed to update a label on '%s'!"
" Please check you access right to this repository."
% repo.name)
if extra_labels:
print ('Repo %s - Found %s extra labels'
% (repo.name, len(extra_labels)))
for label_name in extra_labels:
print label_name
if __name__ == '__main__':
main()
| agpl-3.0 |
ppizarror/Hero-of-Antair | data/images/pil/ImageChops.py | 2 | 7410 | #
# The Python Imaging Library.
# $Id$
#
# standard channel operations
#
# History:
# 1996-03-24 fl Created
# 1996-08-13 fl Added logical operations (for "1" images)
# 2000-10-12 fl Added offset method (from Image.py)
#
# Copyright (c) 1997-2000 by Secret Labs AB
# Copyright (c) 1996-2000 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
import Image
##
# The <b>ImageChops</b> module contains a number of arithmetical image
# operations, called <i>channel operations</i> ("chops"). These can be
# used for various purposes, including special effects, image
# compositions, algorithmic painting, and more.
# <p>
# At this time, channel operations are only implemented for 8-bit
# images (e.g. "L" and "RGB").
# <p>
# Most channel operations take one or two image arguments and returns
# a new image. Unless otherwise noted, the result of a channel
# operation is always clipped to the range 0 to MAX (which is 255 for
# all modes supported by the operations in this module).
##
##
# Return an image with the same size as the given image, but filled
# with the given pixel value.
#
# @param image Reference image.
# @param value Pixel value.
# @return An image object.
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
##
# Copy image.
#
# @param image Source image.
# @return A copy of the source image.
def duplicate(image):
"Create a copy of a channel"
return image.copy()
##
# Inverts an image
# (MAX - image).
#
# @param image Source image.
# @return An image object.
def invert(image):
"Invert a channel"
image.load()
return image._new(image.im.chop_invert())
##
# Compare images, and return lighter pixel value
# (max(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the lighter values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_lighter(image2.im))
##
# Compare images, and return darker pixel value
# (min(image1, image2)).
# <p>
# Compares the two images, pixel by pixel, and returns a new image
# containing the darker values.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def darker(image1, image2):
"Select the darker pixels from each image"
image1.load()
image2.load()
return image1._new(image1.im.chop_darker(image2.im))
##
# Calculate absolute difference
# (abs(image1 - image2)).
# <p>
# Returns the absolute value of the difference between the two images.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def difference(image1, image2):
"Subtract one image from another"
image1.load()
image2.load()
return image1._new(image1.im.chop_difference(image2.im))
##
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def multiply(image1, image2):
"Superimpose two positive images"
image1.load()
image2.load()
return image1._new(image1.im.chop_multiply(image2.im))
##
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def screen(image1, image2):
"Superimpose two negative images"
image1.load()
image2.load()
return image1._new(image1.im.chop_screen(image2.im))
##
# Add images
# ((image1 + image2) / scale + offset).
# <p>
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_add(image2.im, scale, offset))
##
# Subtract images
# ((image1 - image2) / scale + offset).
# <p>
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract(image2.im, scale, offset))
##
# Add images without clipping
# ((image1 + image2) % MAX).
# <p>
# Adds two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def add_modulo(image1, image2):
"Add two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_add_modulo(image2.im))
##
# Subtract images without clipping
# ((image1 - image2) % MAX).
# <p>
# Subtracts two images, without clipping the result.
#
# @param image1 First image.
# @param image1 Second image.
# @return An image object.
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
image1.load()
image2.load()
return image1._new(image1.im.chop_subtract_modulo(image2.im))
##
# Logical AND
# (image1 and image2).
def logical_and(image1, image2):
"Logical and between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_and(image2.im))
##
# Logical OR
# (image1 or image2).
def logical_or(image1, image2):
"Logical or between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_or(image2.im))
##
# Logical XOR
# (image1 xor image2).
def logical_xor(image1, image2):
"Logical xor between two images"
image1.load()
image2.load()
return image1._new(image1.im.chop_xor(image2.im))
##
# Blend images using constant transparency weight.
# <p>
# Same as the <b>blend</b> function in the <b>Image</b> module.
def blend(image1, image2, alpha):
"Blend two images using a constant transparency weight"
return Image.blend(image1, image2, alpha)
##
# Create composite using transparency mask.
# <p>
# Same as the <b>composite</b> function in the <b>Image</b> module.
def composite(image1, image2, mask):
"Create composite image by blending images using a transparency mask"
return Image.composite(image1, image2, mask)
##
# Offset image data.
# <p>
# Returns a copy of the image where data has been offset by the given
# distances. Data wraps around the edges. If yoffset is omitted, it
# is assumed to be equal to xoffset.
#
# @param image Source image.
# @param xoffset The horizontal distance.
# @param yoffset The vertical distance. If omitted, both
# distances are set to the same value.
# @return An Image object.
def offset(image, xoffset, yoffset=None):
"Offset image in horizontal and/or vertical direction"
if yoffset is None:
yoffset = xoffset
image.load()
return image._new(image.im.offset(xoffset, yoffset))
| gpl-2.0 |
BMJHayward/numpy | numpy/polynomial/hermite_e.py | 49 | 57120 | """
Objects for dealing with Hermite_e series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite_e series, including a `HermiteE` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermedomain` -- Hermite_e series default domain, [-1,1].
- `hermezero` -- Hermite_e series that evaluates identically to 0.
- `hermeone` -- Hermite_e series that evaluates identically to 1.
- `hermex` -- Hermite_e series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermemulx` -- multiply a Hermite_e series in ``P_i(x)`` by ``x``.
- `hermeadd` -- add two Hermite_e series.
- `hermesub` -- subtract one Hermite_e series from another.
- `hermemul` -- multiply two Hermite_e series.
- `hermediv` -- divide one Hermite_e series by another.
- `hermeval` -- evaluate a Hermite_e series at given points.
- `hermeval2d` -- evaluate a 2D Hermite_e series at given points.
- `hermeval3d` -- evaluate a 3D Hermite_e series at given points.
- `hermegrid2d` -- evaluate a 2D Hermite_e series on a Cartesian product.
- `hermegrid3d` -- evaluate a 3D Hermite_e series on a Cartesian product.
Calculus
--------
- `hermeder` -- differentiate a Hermite_e series.
- `hermeint` -- integrate a Hermite_e series.
Misc Functions
--------------
- `hermefromroots` -- create a Hermite_e series with specified roots.
- `hermeroots` -- find the roots of a Hermite_e series.
- `hermevander` -- Vandermonde-like matrix for Hermite_e polynomials.
- `hermevander2d` -- Vandermonde-like matrix for 2D power series.
- `hermevander3d` -- Vandermonde-like matrix for 3D power series.
- `hermegauss` -- Gauss-Hermite_e quadrature, points and weights.
- `hermeweight` -- Hermite_e weight function.
- `hermecompanion` -- symmetrized companion matrix in Hermite_e form.
- `hermefit` -- least-squares fit returning a Hermite_e series.
- `hermetrim` -- trim leading coefficients from a Hermite_e series.
- `hermeline` -- Hermite_e series of given straight line.
- `herme2poly` -- convert a Hermite_e series to a polynomial.
- `poly2herme` -- convert a polynomial to a Hermite_e series.
Classes
-------
- `HermiteE` -- A Hermite_e series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
import numpy.linalg as la
from . import polyutils as pu
from ._polybase import ABCPolyBase
__all__ = [
'hermezero', 'hermeone', 'hermex', 'hermedomain', 'hermeline',
'hermeadd', 'hermesub', 'hermemulx', 'hermemul', 'hermediv',
'hermepow', 'hermeval', 'hermeder', 'hermeint', 'herme2poly',
'poly2herme', 'hermefromroots', 'hermevander', 'hermefit', 'hermetrim',
'hermeroots', 'HermiteE', 'hermeval2d', 'hermeval3d', 'hermegrid2d',
'hermegrid3d', 'hermevander2d', 'hermevander3d', 'hermecompanion',
'hermegauss', 'hermeweight']
hermetrim = pu.trimcoef
def poly2herme(pol):
"""
poly2herme(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herme2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herme(np.arange(4))
array([ 2., 10., 2., 3.])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1):
res = hermeadd(hermemulx(res), pol[i])
return res
def herme2poly(c):
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herme
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
array([ 0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1):
tmp = c0
c0 = polysub(c[i - 2], c1*(i - 1))
c1 = polyadd(tmp, polymulx(c1))
return polyadd(c0, polymulx(c1))
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermedomain = np.array([-1, 1])
# Hermite coefficients representing zero.
hermezero = np.array([0])
# Hermite coefficients representing one.
hermeone = np.array([1])
# Hermite coefficients representing the identity x.
hermex = np.array([0, 1])
def hermeline(off, scl):
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeline
>>> from numpy.polynomial.hermite_e import hermeline, hermeval
>>> hermeval(0,hermeline(3, 2))
3.0
>>> hermeval(1,hermeline(3, 2))
5.0
"""
if scl != 0:
return np.array([off, scl])
else:
return np.array([off])
def hermefromroots(roots):
"""
Generate a HermiteE series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in HermiteE form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in HermiteE form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, hermfromroots,
chebfromroots.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0:
return np.ones(1)
else:
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermeline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermemul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermeadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermesub, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] += c2
ret = c1
else:
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermesub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermeadd, hermemul, hermediv, hermepow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c1[:c2.size] -= c2
ret = c1
else:
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermemulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x) + iP_{i - 1}(x)))
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
array([ 2., 7., 2., 3.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]
for i in range(1, len(c)):
prd[i + 1] = c[i]
prd[i - 1] += c[i]*i
return prd
def hermemul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermeadd, hermesub, hermediv, hermepow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
array([ 14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else:
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = hermesub(c[-i]*xs, c1*(nd - 1))
c1 = hermeadd(tmp, hermemulx(c1))
return hermeadd(c0, hermemulx(c1))
def hermediv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermeadd, hermesub, hermemul, hermepow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 2.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0:
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
return c1[:1]*0, c1
elif lc2 == 1:
return c1/c2[-1], c1[:1]*0
else:
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermemul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermepow(c, pow, maxpower=16):
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermeadd, hermesub, hermemul, hermediv
Examples
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
array([ 23., 28., 46., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0:
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower:
raise ValueError("Power is too large")
elif power == 0:
return np.array([1], dtype=c.dtype)
elif power == 1:
return c
else:
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1):
prd = hermemul(prd, c)
return prd
def hermeder(c, m=1, scl=1, axis=0):
"""
Differentiate a Hermite_e series.
Returns the series coefficients `c` differentiated `m` times along
`axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*He_0 + 2*He_1 + 3*He_2``
while [[1,2],[1,2]] represents ``1*He_0(x)*He_0(y) + 1*He_1(x)*He_0(y)
+ 2*He_0(x)*He_1(y) + 2*He_1(x)*He_1(y)`` if axis=0 is ``x`` and axis=1
is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If `c` is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermeint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
array([ 1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
return c[:1]*0
else:
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = j*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite_e series.
Returns the Hermite_e series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite_e series coefficients. If c is multidimensional
the different axis correspond to different variables with the
degree in each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite_e series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermeder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
array([ 1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
array([ 2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt):
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]
for j in range(1, n):
tmp[j + 1] = c[j]/(j + 1)
tmp[0] += k[i] - hermeval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermeval(x, c, tensor=True):
"""
Evaluate an HermiteE series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * He_0(x) + c_1 * He_1(x) + ... + c_n * He_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermeval2d, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeval
>>> coef = [1,2,3]
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
array([[ 3., 14.],
[ 31., 54.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
if len(c) == 1:
c0 = c[0]
c1 = 0
elif len(c) == 2:
c0 = c[0]
c1 = c[1]
else:
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1):
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(nd - 1)
c1 = tmp + c1*x
return c0 + c1*x
def hermeval2d(x, y, c):
"""
Evaluate a 2-D HermiteE series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * He_i(x) * He_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermeval, hermegrid2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
return c
def hermegrid2d(x, y, c):
"""
Evaluate a 2-D HermiteE series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermeval3d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
return c
def hermeval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite_e series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * He_i(x) * He_j(y) * He_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermegrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermeval(x, c)
c = hermeval(y, c, tensor=False)
c = hermeval(z, c, tensor=False)
return c
def hermegrid3d(x, y, z, c):
"""
Evaluate a 3-D HermiteE series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * He_i(a) * He_j(b) * He_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermeval, hermeval2d, hermegrid2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermeval(x, c)
c = hermeval(y, c)
c = hermeval(z, c)
return c
def hermevander(x, deg):
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = He_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the HermiteE polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermevander(x, n)``, then ``np.dot(V, c)`` and
``hermeval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of HermiteE series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander : ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding HermiteE polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermevander
>>> x = np.array([-1, 0, 1])
>>> hermevander(x, 3)
array([[ 1., -1., 0., 2.],
[ 1., 0., -1., -0.],
[ 1., 1., 0., -2.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0:
v[1] = x
for i in range(2, ideg + 1):
v[i] = (v[i-1]*x - v[i-2]*(i - 1))
return np.rollaxis(v, 0, v.ndim)
def hermevander2d(x, y, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = He_i(x) * He_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the HermiteE polynomials.
If ``V = hermevander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermeval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to
1-D arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
v = vx[..., None]*vy[..., None,:]
return v.reshape(v.shape[:-2] + (-1,))
def hermevander3d(x, y, z, deg):
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then Hehe pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = He_i(x)*He_j(y)*He_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the HermiteE polynomials.
If ``V = hermevander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermeval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D HermiteE
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermevander, hermevander3d. hermeval2d, hermeval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermevander(x, degx)
vy = hermevander(y, degy)
vz = hermevander(z, degz)
v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
return v.reshape(v.shape[:-3] + (-1,))
def hermefit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a HermiteE series of degree `deg` that is
the least squares fit to the data values `y` given at points `x`. If
`y` is 1-D the returned coefficients will also be 1-D. If `y` is 2-D
multiple fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * He_1(x) + ... + c_n * He_n(x),
where `n` is `deg`.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : list
These values are only returned if `full` = True
resid -- sum of squared residuals of the least squares fit
rank -- the numerical rank of the scaled Vandermonde matrix
sv -- singular values of the scaled Vandermonde matrix
rcond -- value of `rcond`.
For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, polyfit, hermfit, polyfit
hermeval : Evaluates a Hermite series.
hermevander : pseudo Vandermonde matrix of Hermite series.
hermeweight : HermiteE weight function.
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the HermiteE series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the pseudo Vandermonde matrix of `x`, the elements of `c`
are the coefficients to be solved for, and the elements of `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using HermiteE series are probably most useful when the data can
be approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the HermiteE
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermeweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefik, hermeval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
array([ 1.01690445, 1.99951418, 2.99948696])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermevander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None:
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full:
return c, [resids, rank, s, rcond]
else:
return c
def hermecompanion(c):
"""
Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an HermiteE basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of HermiteE series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array([[-c[0]/c[1]]])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., 1./np.sqrt(np.arange(n - 1, 0, -1))))
scl = np.multiply.accumulate(scl)[::-1]
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(np.arange(1, n))
bot[...] = top
mat[:, -1] -= scl*c[:-1]/c[-1]
return mat
def hermeroots(c):
"""
Compute the roots of a HermiteE series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * He_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, hermroots, chebroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The HermiteE series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
array([ 0., 2., 0., 1.])
>>> hermeroots(coef)
array([-1., 0., 1.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1:
return np.array([], dtype=c.dtype)
if len(c) == 2:
return np.array([-c[0]/c[1]])
m = hermecompanion(c)
r = la.eigvals(m)
r.sort()
return r
def _normed_hermite_e_n(x, n):
"""
Evaluate a normalized HermiteE polynomial.
Compute the value of the normalized HermiteE polynomial of degree ``n``
at the points ``x``.
Parameters
----------
x : ndarray of double.
Points at which to evaluate the function
n : int
Degree of the normalized HermiteE function to be evaluated.
Returns
-------
values : ndarray
The shape of the return value is described above.
Notes
-----
.. versionadded:: 1.10.0
This function is needed for finding the Gauss points and integration
weights for high degrees. The values of the standard HermiteE functions
overflow when n >= 207.
"""
if n == 0:
return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
nd = float(n)
for i in range(n - 1):
tmp = c0
c0 = -c1*np.sqrt((nd - 1.)/nd)
c1 = tmp + c1*x*np.sqrt(1./nd)
nd = nd - 1.0
return c0 + c1*x
def hermegauss(deg):
"""
Gauss-HermiteE quadrature.
Computes the sample points and weights for Gauss-HermiteE quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2/2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (He'_n(x_k) * He_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`He_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermecompanion(c)
x = la.eigvalsh(m)
x.sort()
# improve roots by one application of Newton
dy = _normed_hermite_e_n(x, ideg)
df = _normed_hermite_e_n(x, ideg - 1) * np.sqrt(ideg)
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = _normed_hermite_e_n(x, ideg - 1)
fm /= np.abs(fm).max()
w = 1/(fm * fm)
# for Hermite_e we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(2*np.pi) / w.sum()
return x, w
def hermeweight(x):
"""Weight function of the Hermite_e polynomials.
The weight function is :math:`\exp(-x^2/2)` and the interval of
integration is :math:`[-\inf, \inf]`. the HermiteE polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-.5*x**2)
return w
#
# HermiteE series class
#
class HermiteE(ABCPolyBase):
"""An HermiteE series class.
The HermiteE class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed in the `ABCPolyBase` documentation.
Parameters
----------
coef : array_like
HermiteE coefficients in order of increasing degree, i.e,
``(1, 2, 3)`` gives ``1*He_0(x) + 2*He_1(X) + 3*He_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1, 1].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1, 1].
.. versionadded:: 1.6.0
"""
# Virtual Functions
_add = staticmethod(hermeadd)
_sub = staticmethod(hermesub)
_mul = staticmethod(hermemul)
_div = staticmethod(hermediv)
_pow = staticmethod(hermepow)
_val = staticmethod(hermeval)
_int = staticmethod(hermeint)
_der = staticmethod(hermeder)
_fit = staticmethod(hermefit)
_line = staticmethod(hermeline)
_roots = staticmethod(hermeroots)
_fromroots = staticmethod(hermefromroots)
# Virtual properties
nickname = 'herme'
domain = np.array(hermedomain)
window = np.array(hermedomain)
| bsd-3-clause |
entomb/CouchPotatoServer | libs/xmpp/commands.py | 200 | 16116 | ## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
| gpl-3.0 |
yl565/statsmodels | statsmodels/stats/contingency_tables.py | 4 | 43623 | """
Methods for analyzing two-way contingency tables (i.e. frequency
tables for observations that are cross-classified with respect to two
categorical variables).
The main classes are:
* Table : implements methods that can be applied to any two-way
contingency table.
* SquareTable : implements methods that can be applied to a square
two-way contingency table.
* Table2x2 : implements methods that can be applied to a 2x2
contingency table.
* StratifiedTable : implements methods that can be applied to a
collection of contingency tables.
Also contains functions for conducting Mcnemar's test and Cochran's q
test.
Note that the inference procedures may depend on how the data were
sampled. In general the observed units are independent and
identically distributed.
"""
from __future__ import division
from statsmodels.tools.decorators import cache_readonly, resettable_cache
import numpy as np
from scipy import stats
import pandas as pd
from statsmodels import iolib
from statsmodels.tools.sm_exceptions import SingularMatrixWarning
def _make_df_square(table):
"""
Reindex a pandas DataFrame so that it becomes square, meaning that
the row and column indices contain the same values, in the same
order. The row and column index are extended to achieve this.
"""
if not isinstance(table, pd.DataFrame):
return table
# If the table is not square, make it square
if table.shape[0] != table.shape[1]:
ix = list(set(table.index) | set(table.columns))
table = table.reindex(ix, axis=0)
table = table.reindex(ix, axis=1)
# Ensures that the rows and columns are in the same order.
table = table.reindex(table.columns)
return table
class _Bunch(object):
def __repr__(self):
return "<bunch object containing statsmodels results>"
class Table(object):
"""
Analyses that can be performed on a two-way contingency table.
Parameters
----------
table : array-like
A contingency table.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Attributes
----------
table_orig : array-like
The original table is cached as `table_orig`.
marginal_probabilities : tuple of two ndarrays
The estimated row and column marginal distributions.
independence_probabilities : ndarray
Estimated cell probabilities under row/column independence.
fittedvalues : ndarray
Fitted values under independence.
resid_pearson : ndarray
The Pearson residuals under row/column independence.
standardized_resids : ndarray
Residuals for the independent row/column model with approximate
unit variance.
chi2_contribs : ndarray
The contribution of each cell to the chi^2 statistic.
local_logodds_ratios : ndarray
The local log odds ratios are calculated for each 2x2 subtable
formed from adjacent rows and columns.
local_oddsratios : ndarray
The local odds ratios are calculated from each 2x2 subtable
formed from adjacent rows and columns.
cumulative_log_oddsratios : ndarray
The cumulative log odds ratio at a given pair of thresholds is
calculated by reducing the table to a 2x2 table based on
dichotomizing the rows and columns at the given thresholds.
The table of cumulative log odds ratios presents all possible
cumulative log odds ratios that can be formed from a given
table.
cumulative_oddsratios : ndarray
The cumulative odds ratios are calculated by reducing the
table to a 2x2 table based on cutting the rows and columns at
a given point. The table of cumulative odds ratios presents
all possible cumulative odds ratios that can be formed from a
given table.
See also
--------
statsmodels.graphics.mosaicplot.mosaic
scipy.stats.chi2_contingency
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
References
----------
Definitions of residuals:
https://onlinecourses.science.psu.edu/stat504/node/86
"""
def __init__(self, table, shift_zeros=True):
self.table_orig = table
self.table = np.asarray(table, dtype=np.float64)
if shift_zeros and (self.table.min() == 0):
self.table = self.table + 0.5
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, from which a contingency table is constructed
using the first two columns.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
Returns
-------
A Table instance.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
def test_nominal_association(self):
"""
Assess independence for nominal factors.
Assessment of independence between rows and columns using
chi^2 testing. The rows and columns are treated as nominal
(unordered) categorical variables.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
df : integer
The degrees of freedom of the reference distribution
pvalue : float
The p-value for the test.
"""
statistic = np.asarray(self.chi2_contribs).sum()
df = np.prod(np.asarray(self.table.shape) - 1)
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.df = df
b.pvalue = pvalue
return b
def test_ordinal_association(self, row_scores=None, col_scores=None):
"""
Assess independence between two ordinal variables.
This is the 'linear by linear' association test, which uses
weights or scores to target the test to have more power
against ordered alternatives.
Parameters
----------
row_scores : array-like
An array of numeric row scores
col_scores : array-like
An array of numeric column scores
Returns
-------
A bunch with the following attributes:
statistic : float
The test statistic.
null_mean : float
The expected value of the test statistic under the null
hypothesis.
null_sd : float
The standard deviation of the test statistic under the
null hypothesis.
zscore : float
The Z-score for the test statistic.
pvalue : float
The p-value for the test.
Notes
-----
The scores define the trend to which the test is most sensitive.
Using the default row and column scores gives the
Cochran-Armitage trend test.
"""
if row_scores is None:
row_scores = np.arange(self.table.shape[0])
if col_scores is None:
col_scores = np.arange(self.table.shape[1])
if len(row_scores) != self.table.shape[0]:
raise ValueError("The length of `row_scores` must match the first dimension of `table`.")
if len(col_scores) != self.table.shape[1]:
raise ValueError("The length of `col_scores` must match the second dimension of `table`.")
# The test statistic
statistic = np.dot(row_scores, np.dot(self.table, col_scores))
# Some needed quantities
n_obs = self.table.sum()
rtot = self.table.sum(1)
um = np.dot(row_scores, rtot)
u2m = np.dot(row_scores**2, rtot)
ctot = self.table.sum(0)
vn = np.dot(col_scores, ctot)
v2n = np.dot(col_scores**2, ctot)
# The null mean and variance of the test statistic
e_stat = um * vn / n_obs
v_stat = (u2m - um**2 / n_obs) * (v2n - vn**2 / n_obs) / (n_obs - 1)
sd_stat = np.sqrt(v_stat)
zscore = (statistic - e_stat) / sd_stat
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
b = _Bunch()
b.statistic = statistic
b.null_mean = e_stat
b.null_sd = sd_stat
b.zscore = zscore
b.pvalue = pvalue
return b
@cache_readonly
def marginal_probabilities(self):
# docstring for cached attributes in init above
n = self.table.sum()
row = self.table.sum(1) / n
col = self.table.sum(0) / n
if isinstance(self.table_orig, pd.DataFrame):
row = pd.Series(row, self.table_orig.index)
col = pd.Series(col, self.table_orig.columns)
return row, col
@cache_readonly
def independence_probabilities(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
itab = np.outer(row, col)
if isinstance(self.table_orig, pd.DataFrame):
itab = pd.DataFrame(itab, self.table_orig.index,
self.table_orig.columns)
return itab
@cache_readonly
def fittedvalues(self):
# docstring for cached attributes in init above
probs = self.independence_probabilities
fit = self.table.sum() * probs
return fit
@cache_readonly
def resid_pearson(self):
# docstring for cached attributes in init above
fit = self.fittedvalues
resids = (self.table - fit) / np.sqrt(fit)
return resids
@cache_readonly
def standardized_resids(self):
# docstring for cached attributes in init above
row, col = self.marginal_probabilities
sresids = self.resid_pearson / np.sqrt(np.outer(1 - row, 1 - col))
return sresids
@cache_readonly
def chi2_contribs(self):
# docstring for cached attributes in init above
return self.resid_pearson**2
@cache_readonly
def local_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.copy()
a = ta[0:-1, 0:-1]
b = ta[0:-1, 1:]
c = ta[1:, 0:-1]
d = ta[1:, 1:]
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def local_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.local_log_oddsratios)
@cache_readonly
def cumulative_log_oddsratios(self):
# docstring for cached attributes in init above
ta = self.table.cumsum(0).cumsum(1)
a = ta[0:-1, 0:-1]
b = ta[0:-1, -1:] - a
c = ta[-1:, 0:-1] - a
d = ta[-1, -1] - (a + b + c)
tab = np.log(a) + np.log(d) - np.log(b) - np.log(c)
rslt = np.empty(self.table.shape, np.float64)
rslt *= np.nan
rslt[0:-1, 0:-1] = tab
if isinstance(self.table_orig, pd.DataFrame):
rslt = pd.DataFrame(rslt, index=self.table_orig.index,
columns=self.table_orig.columns)
return rslt
@cache_readonly
def cumulative_oddsratios(self):
# docstring for cached attributes in init above
return np.exp(self.cumulative_log_oddsratios)
class SquareTable(Table):
"""
Methods for analyzing a square contingency table.
Parameters
----------
table : array-like
A square contingency table, or DataFrame that is converted
to a square form.
shift_zeros : boolean
If True and any cell count is zero, add 0.5 to all values
in the table.
These methods should only be used when the rows and columns of the
table have the same categories. If `table` is provided as a
Pandas DataFrame, the row and column indices will be extended to
create a square table. Otherwise the table should be provided in
a square form, with the (implicit) row and column categories
appearing in the same order.
"""
def __init__(self, table, shift_zeros=True):
table = _make_df_square(table) # Non-pandas passes through
k1, k2 = table.shape
if k1 != k2:
raise ValueError('table must be square')
super(SquareTable, self).__init__(table, shift_zeros)
def symmetry(self, method="bowker"):
"""
Test for symmetry of a joint distribution.
This procedure tests the null hypothesis that the joint
distribution is symmetric around the main diagonal, that is
.. math::
p_{i, j} = p_{j, i} for all i, j
Returns
-------
A bunch with attributes:
statistic : float
chisquare test statistic
p-value : float
p-value of the test statistic based on chisquare distribution
df : int
degrees of freedom of the chisquare distribution
Notes
-----
The implementation is based on the SAS documentation. R includes
it in `mcnemar.test` if the table is not 2 by 2. However a more
direct generalization of the McNemar test to larger tables is
provided by the homogeneity test (TableSymmetry.homogeneity).
The p-value is based on the chi-square distribution which requires
that the sample size is not very small to be a good approximation
of the true distribution. For 2x2 contingency tables the exact
distribution can be obtained with `mcnemar`
See Also
--------
mcnemar
homogeneity
"""
if method.lower() != "bowker":
raise ValueError("method for symmetry testing must be 'bowker'")
k = self.table.shape[0]
upp_idx = np.triu_indices(k, 1)
tril = self.table.T[upp_idx] # lower triangle in column order
triu = self.table[upp_idx] # upper triangle in row order
statistic = ((tril - triu)**2 / (tril + triu + 1e-20)).sum()
df = k * (k-1) / 2.
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def homogeneity(self, method="stuart_maxwell"):
"""
Compare row and column marginal distributions.
Parameters
----------
method : string
Either 'stuart_maxwell' or 'bhapkar', leading to two different
estimates of the covariance matrix for the estimated
difference between the row margins and the column margins.
Returns a bunch with attributes:
statistic : float
The chi^2 test statistic
pvalue : float
The p-value of the test statistic
df : integer
The degrees of freedom of the reference distribution
Notes
-----
For a 2x2 table this is equivalent to McNemar's test. More
generally the procedure tests the null hypothesis that the
marginal distribution of the row factor is equal to the
marginal distribution of the column factor. For this to be
meaningful, the two factors must have the same sample space
(i.e. the same categories).
"""
if self.table.shape[0] < 1:
raise ValueError('table is empty')
elif self.table.shape[0] == 1:
b = _Bunch()
b.statistic = 0
b.pvalue = 1
b.df = 0
return b
method = method.lower()
if method not in ["bhapkar", "stuart_maxwell"]:
raise ValueError("method '%s' for homogeneity not known" % method)
n_obs = self.table.sum()
pr = self.table.astype(np.float64) / n_obs
# Compute margins, eliminate last row/column so there is no
# degeneracy
row = pr.sum(1)[0:-1]
col = pr.sum(0)[0:-1]
pr = pr[0:-1, 0:-1]
# The estimated difference between row and column margins.
d = col - row
# The degrees of freedom of the chi^2 reference distribution.
df = pr.shape[0]
if method == "bhapkar":
vmat = -(pr + pr.T) - np.outer(d, d)
dv = col + row - 2*np.diag(pr) - d**2
np.fill_diagonal(vmat, dv)
elif method == "stuart_maxwell":
vmat = -(pr + pr.T)
dv = row + col - 2*np.diag(pr)
np.fill_diagonal(vmat, dv)
try:
statistic = n_obs * np.dot(d, np.linalg.solve(vmat, d))
except np.linalg.LinAlgError:
import warnings
warnings.warn("Unable to invert covariance matrix",
SingularMatrixWarning)
b = _Bunch()
b.statistic = np.nan
b.pvalue = np.nan
b.df = df
return b
pvalue = 1 - stats.chi2.cdf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
b.df = df
return b
def summary(self, alpha=0.05, float_format="%.3f"):
"""
Produce a summary of the analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the interval.
float_format : string
Used to format numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
fmt = float_format
headers = ["Statistic", "P-value", "DF"]
stubs = ["Symmetry", "Homogeneity"]
sy = self.symmetry()
hm = self.homogeneity()
data = [[fmt % sy.statistic, fmt % sy.pvalue, '%d' % sy.df],
[fmt % hm.statistic, fmt % hm.pvalue, '%d' % hm.df]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class Table2x2(SquareTable):
"""
Analyses that can be performed on a 2x2 contingency table.
Parameters
----------
table : array-like
A 2x2 contingency table
shift_zeros : boolean
If true, 0.5 is added to all cells of the table if any cell is
equal to zero.
Attributes
----------
log_oddsratio : float
The log odds ratio of the table.
log_oddsratio_se : float
The asymptotic standard error of the estimated log odds ratio.
oddsratio : float
The odds ratio of the table.
riskratio : float
The ratio between the risk in the first row and the risk in
the second row. Column 0 is interpreted as containing the
number of occurences of the event of interest.
log_riskratio : float
The estimated log risk ratio for the table.
log_riskratio_se : float
The standard error of the estimated log risk ratio for the
table.
Notes
-----
The inference procedures used here are all based on a sampling
model in which the units are independent and identically
distributed, with each unit being classified with respect to two
categorical variables.
Note that for the risk ratio, the analysis is not symmetric with
respect to the rows and columns of the contingency table. The two
rows define population subgroups, column 0 is the number of
'events', and column 1 is the number of 'non-events'.
"""
def __init__(self, table, shift_zeros=True):
if (table.ndim != 2) or (table.shape[0] != 2) or (table.shape[1] != 2):
raise ValueError("Table2x2 takes a 2x2 table as input.")
super(Table2x2, self).__init__(table, shift_zeros)
@classmethod
def from_data(cls, data, shift_zeros=True):
"""
Construct a Table object from data.
Parameters
----------
data : array-like
The raw data, the first column defines the rows and the
second column defines the columns.
shift_zeros : boolean
If True, and if there are any zeros in the contingency
table, add 0.5 to all four cells of the table.
"""
if isinstance(data, pd.DataFrame):
table = pd.crosstab(data.iloc[:, 0], data.iloc[:, 1])
else:
table = pd.crosstab(data[:, 0], data[:, 1])
return cls(table, shift_zeros)
@cache_readonly
def log_oddsratio(self):
# docstring for cached attributes in init above
f = self.table.flatten()
return np.dot(np.log(f), np.r_[1, -1, -1, 1])
@cache_readonly
def oddsratio(self):
# docstring for cached attributes in init above
return self.table[0, 0] * self.table[1, 1] / (self.table[0, 1] * self.table[1, 0])
@cache_readonly
def log_oddsratio_se(self):
# docstring for cached attributes in init above
return np.sqrt(np.sum(1 / self.table))
def oddsratio_pvalue(self, null=1):
"""
P-value for a hypothesis test about the odds ratio.
Parameters
----------
null : float
The null value of the odds ratio.
"""
return self.log_oddsratio_pvalue(np.log(null))
def log_oddsratio_pvalue(self, null=0):
"""
P-value for a hypothesis test about the log odds ratio.
Parameters
----------
null : float
The null value of the log odds ratio.
"""
zscore = (self.log_oddsratio - null) / self.log_oddsratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence level for the log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lor = self.log_oddsratio
se = self.log_oddsratio_se
lcb = lor - f * se
ucb = lor + f * se
return lcb, ucb
def oddsratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_oddsratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
@cache_readonly
def riskratio(self):
# docstring for cached attributes in init above
p = self.table[:, 0] / self.table.sum(1)
return p[0] / p[1]
@cache_readonly
def log_riskratio(self):
# docstring for cached attributes in init above
return np.log(self.riskratio)
@cache_readonly
def log_riskratio_se(self):
# docstring for cached attributes in init above
n = self.table.sum(1)
p = self.table[:, 0] / n
va = np.sum((1 - p) / (n*p))
return np.sqrt(va)
def riskratio_pvalue(self, null=1):
"""
p-value for a hypothesis test about the risk ratio.
Parameters
----------
null : float
The null value of the risk ratio.
"""
return self.log_riskratio_pvalue(np.log(null))
def log_riskratio_pvalue(self, null=0):
"""
p-value for a hypothesis test about the log risk ratio.
Parameters
----------
null : float
The null value of the log risk ratio.
"""
zscore = (self.log_riskratio - null) / self.log_riskratio_se
pvalue = 2 * stats.norm.cdf(-np.abs(zscore))
return pvalue
def log_riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the log risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
f = -stats.norm.ppf(alpha / 2)
lrr = self.log_riskratio
se = self.log_riskratio_se
lcb = lrr - f * se
ucb = lrr + f * se
return lcb, ucb
def riskratio_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the risk ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
lcb, ucb = self.log_riskratio_confint(alpha, method=method)
return np.exp(lcb), np.exp(ucb)
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
Summarizes results for a 2x2 table analysis.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the confidence
intervals.
float_format : string
Used to format the numeric values in the table.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
headers = ["Estimate", "SE", "LCB", "UCB", "p-value"]
stubs = ["Odds ratio", "Log odds ratio", "Risk ratio", "Log risk ratio"]
lcb1, ucb1 = self.oddsratio_confint(alpha, method)
lcb2, ucb2 = self.log_oddsratio_confint(alpha, method)
lcb3, ucb3 = self.riskratio_confint(alpha, method)
lcb4, ucb4 = self.log_riskratio_confint(alpha, method)
data = [[fmt(x) for x in [self.oddsratio, "", lcb1, ucb1, self.oddsratio_pvalue()]],
[fmt(x) for x in [self.log_oddsratio, self.log_oddsratio_se, lcb2, ucb2,
self.oddsratio_pvalue()]],
[fmt(x) for x in [self.riskratio, "", lcb2, ucb2, self.riskratio_pvalue()]],
[fmt(x) for x in [self.log_riskratio, self.log_riskratio_se, lcb4, ucb4,
self.riskratio_pvalue()]]]
tab = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
return tab
class StratifiedTable(object):
"""
Analyses for a collection of 2x2 contingency tables.
Such a collection may arise by stratifying a single 2x2 table with
respect to another factor. This class implements the
'Cochran-Mantel-Haenszel' and 'Breslow-Day' procedures for
analyzing collections of 2x2 contingency tables.
Parameters
----------
tables : list or ndarray
Either a list containing several 2x2 contingency tables, or
a 2x2xk ndarray in which each slice along the third axis is a
2x2 contingency table.
Attributes
----------
logodds_pooled : float
An estimate of the pooled log odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all the tables.
log_oddsratio_se : float
The estimated standard error of the pooled log odds ratio,
following Robins, Breslow and Greenland (Biometrics
42:311-323).
oddsratio_pooled : float
An estimate of the pooled odds ratio. This is the
Mantel-Haenszel estimate of an odds ratio that is common to
all tables.
risk_pooled : float
An estimate of the pooled risk ratio. This is an estimate of
a risk ratio that is common to all the tables.
Notes
-----
This results are based on a sampling model in which the units are
independent both within and between strata.
"""
def __init__(self, tables, shift_zeros=False):
if isinstance(tables, np.ndarray):
sp = tables.shape
if (len(sp) != 3) or (sp[0] != 2) or (sp[1] != 2):
raise ValueError("If an ndarray, argument must be 2x2xn")
table = tables
else:
# Create a data cube
table = np.dstack(tables).astype(np.float64)
if shift_zeros:
zx = (table == 0).sum(0).sum(0)
ix = np.flatnonzero(zx > 0)
if len(ix) > 0:
table = table.copy()
table[:, :, ix] += 0.5
self.table = table
self._cache = resettable_cache()
# Quantities to precompute. Table entries are [[a, b], [c,
# d]], 'ad' is 'a * d', 'apb' is 'a + b', 'dma' is 'd - a',
# etc.
self._apb = table[0, 0, :] + table[0, 1, :]
self._apc = table[0, 0, :] + table[1, 0, :]
self._bpd = table[0, 1, :] + table[1, 1, :]
self._cpd = table[1, 0, :] + table[1, 1, :]
self._ad = table[0, 0, :] * table[1, 1, :]
self._bc = table[0, 1, :] * table[1, 0, :]
self._apd = table[0, 0, :] + table[1, 1, :]
self._dma = table[1, 1, :] - table[0, 0, :]
self._n = table.sum(0).sum(0)
@classmethod
def from_data(cls, var1, var2, strata, data):
"""
Construct a StratifiedTable object from data.
Parameters
----------
var1 : int or string
The column index or name of `data` containing the variable
defining the rows of the contingency table. The variable
must have only two distinct values.
var2 : int or string
The column index or name of `data` containing the variable
defining the columns of the contingency table. The variable
must have only two distinct values.
strata : int or string
The column index of name of `data` containing the variable
defining the strata.
data : array-like
The raw data. A cross-table for analysis is constructed
from the first two columns.
Returns
-------
A StratifiedTable instance.
"""
if not isinstance(data, pd.DataFrame):
data1 = pd.DataFrame(index=data.index, column=[var1, var2, strata])
data1.loc[:, var1] = data[:, var1]
data1.loc[:, var2] = data[:, var2]
data1.loc[:, strata] = data[:, strata]
else:
data1 = data[[var1, var2, strata]]
gb = data1.groupby(strata).groups
tables = []
for g in gb:
ii = gb[g]
tab = pd.crosstab(data1.loc[ii, var1], data1.loc[ii, var2])
tables.append(tab)
return cls(tables)
def test_null_odds(self, correction=False):
"""
Test that all tables have odds ratio equal to 1.
This is the 'Mantel-Haenszel' test.
Parameters
----------
correction : boolean
If True, use the continuity correction when calculating the
test statistic.
Returns
-------
A bunch containing the chi^2 test statistic and p-value.
"""
statistic = np.sum(self.table[0, 0, :] - self._apb * self._apc / self._n)
statistic = np.abs(statistic)
if correction:
statistic -= 0.5
statistic = statistic**2
denom = self._apb * self._apc * self._bpd * self._cpd
denom /= (self._n**2 * (self._n - 1))
denom = np.sum(denom)
statistic /= denom
# df is always 1
pvalue = 1 - stats.chi2.cdf(statistic, 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
@cache_readonly
def oddsratio_pooled(self):
# doc for cached attributes in init above
odds_ratio = np.sum(self._ad / self._n) / np.sum(self._bc / self._n)
return odds_ratio
@cache_readonly
def logodds_pooled(self):
# doc for cached attributes in init above
return np.log(self.oddsratio_pooled)
@cache_readonly
def risk_pooled(self):
# doc for cached attributes in init above
acd = self.table[0, 0, :] * self._cpd
cab = self.table[1, 0, :] * self._apb
rr = np.sum(acd / self._n) / np.sum(cab / self._n)
return rr
@cache_readonly
def logodds_pooled_se(self):
# doc for cached attributes in init above
adns = np.sum(self._ad / self._n)
bcns = np.sum(self._bc / self._n)
lor_va = np.sum(self._apd * self._ad / self._n**2) / adns**2
mid = self._apd * self._bc / self._n**2
mid += (1 - self._apd / self._n) * self._ad / self._n
mid = np.sum(mid)
mid /= (adns * bcns)
lor_va += mid
lor_va += np.sum((1 - self._apd / self._n) * self._bc / self._n) / bcns**2
lor_va /= 2
lor_se = np.sqrt(lor_va)
return lor_se
def logodds_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled log odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lor = np.log(self.oddsratio_pooled)
lor_se = self.logodds_pooled_se
f = -stats.norm.ppf(alpha / 2)
lcb = lor - f * lor_se
ucb = lor + f * lor_se
return lcb, ucb
def oddsratio_pooled_confint(self, alpha=0.05, method="normal"):
"""
A confidence interval for the pooled odds ratio.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
interval.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
Returns
-------
lcb : float
The lower confidence limit.
ucb : float
The upper confidence limit.
"""
lcb, ucb = self.logodds_pooled_confint(alpha, method=method)
lcb = np.exp(lcb)
ucb = np.exp(ucb)
return lcb, ucb
def test_equal_odds(self, adjust=False):
"""
Test that all odds ratios are identical.
This is the 'Breslow-Day' testing procedure.
Parameters
----------
adjust : boolean
Use the 'Tarone' adjustment to achieve the chi^2
asymptotic distribution.
Returns
-------
A bunch containing the following attributes:
statistic : float
The chi^2 test statistic.
p-value : float
The p-value for the test.
"""
table = self.table
r = self.oddsratio_pooled
a = 1 - r
b = r * (self._apb + self._apc) + self._dma
c = -r * self._apb * self._apc
# Expected value of first cell
e11 = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
# Variance of the first cell
v11 = 1 / e11 + 1 / (self._apc - e11) + 1 / (self._apb - e11) + 1 / (self._dma + e11)
v11 = 1 / v11
statistic = np.sum((table[0, 0, :] - e11)**2 / v11)
if adjust:
adj = table[0, 0, :].sum() - e11.sum()
adj = adj**2
adj /= np.sum(v11)
statistic -= adj
pvalue = 1 - stats.chi2.cdf(statistic, table.shape[2] - 1)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def summary(self, alpha=0.05, float_format="%.3f", method="normal"):
"""
A summary of all the main results.
Parameters
----------
alpha : float
`1 - alpha` is the nominal coverage probability of the
confidence intervals.
float_format : string
Used for formatting numeric values in the summary.
method : string
The method for producing the confidence interval. Currently
must be 'normal' which uses the normal approximation.
"""
def fmt(x):
if type(x) is str:
return x
return float_format % x
co_lcb, co_ucb = self.oddsratio_pooled_confint(alpha=alpha, method=method)
clo_lcb, clo_ucb = self.logodds_pooled_confint(alpha=alpha, method=method)
headers = ["Estimate", "LCB", "UCB"]
stubs = ["Pooled odds", "Pooled log odds", "Pooled risk ratio", ""]
data = [[fmt(x) for x in [self.oddsratio_pooled, co_lcb, co_ucb]],
[fmt(x) for x in [self.logodds_pooled, clo_lcb, clo_ucb]],
[fmt(x) for x in [self.risk_pooled, "", ""]],
['', '', '']]
tab1 = iolib.SimpleTable(data, headers, stubs, data_aligns="r",
table_dec_above='')
headers = ["Statistic", "P-value", ""]
stubs = ["Test of OR=1", "Test constant OR"]
rslt1 = self.test_null_odds()
rslt2 = self.test_equal_odds()
data = [[fmt(x) for x in [rslt1.statistic, rslt1.pvalue, ""]],
[fmt(x) for x in [rslt2.statistic, rslt2.pvalue, ""]]]
tab2 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab2)
headers = ["", "", ""]
stubs = ["Number of tables", "Min n", "Max n", "Avg n", "Total n"]
ss = self.table.sum(0).sum(0)
data = [["%d" % self.table.shape[2], '', ''],
["%d" % min(ss), '', ''],
["%d" % max(ss), '', ''],
["%.0f" % np.mean(ss), '', ''],
["%d" % sum(ss), '', '', '']]
tab3 = iolib.SimpleTable(data, headers, stubs, data_aligns="r")
tab1.extend(tab3)
return tab1
def mcnemar(table, exact=True, correction=True):
"""
McNemar test of homogeneity.
Parameters
----------
table : array-like
A square contingency table.
exact : bool
If exact is true, then the binomial distribution will be used.
If exact is false, then the chisquare distribution will be
used, which is the approximation to the distribution of the
test statistic for large sample sizes.
correction : bool
If true, then a continuity correction is used for the chisquare
distribution (if exact is false.)
Returns
-------
A bunch with attributes:
statistic : float or int, array
The test statistic is the chisquare statistic if exact is
false. If the exact binomial distribution is used, then this
contains the min(n1, n2), where n1, n2 are cases that are zero
in one sample but one in the other sample.
pvalue : float or array
p-value of the null hypothesis of equal marginal distributions.
Notes
-----
This is a special case of Cochran's Q test, and of the homogeneity
test. The results when the chisquare distribution is used are
identical, except for continuity correction.
"""
table = _make_df_square(table)
table = np.asarray(table, dtype=np.float64)
n1, n2 = table[0, 1], table[1, 0]
if exact:
statistic = np.minimum(n1, n2)
# binom is symmetric with p=0.5
pvalue = stats.binom.cdf(statistic, n1 + n2, 0.5) * 2
pvalue = np.minimum(pvalue, 1) # limit to 1 if n1==n2
else:
corr = int(correction) # convert bool to 0 or 1
statistic = (np.abs(n1 - n2) - corr)**2 / (1. * (n1 + n2))
df = 1
pvalue = stats.chi2.sf(statistic, df)
b = _Bunch()
b.statistic = statistic
b.pvalue = pvalue
return b
def cochrans_q(x, return_object=True):
"""
Cochran's Q test for identical binomial proportions.
Parameters
----------
x : array_like, 2d (N, k)
data with N cases and k variables
return_object : boolean
Return values as bunch instead of as individual values.
Returns
-------
Returns a bunch containing the following attributes, or the
individual values according to the value of `return_object`.
statistic : float
test statistic
pvalue : float
pvalue from the chisquare distribution
Notes
-----
Cochran's Q is a k-sample extension of the McNemar test. If there
are only two groups, then Cochran's Q test and the McNemar test
are equivalent.
The procedure tests that the probability of success is the same
for every group. The alternative hypothesis is that at least two
groups have a different probability of success.
In Wikipedia terminology, rows are blocks and columns are
treatments. The number of rows N, should be large for the
chisquare distribution to be a good approximation.
The Null hypothesis of the test is that all treatments have the
same effect.
References
----------
http://en.wikipedia.org/wiki/Cochran_test
SAS Manual for NPAR TESTS
"""
x = np.asarray(x, dtype=np.float64)
gruni = np.unique(x)
N, k = x.shape
count_row_success = (x == gruni[-1]).sum(1, float)
count_col_success = (x == gruni[-1]).sum(0, float)
count_row_ss = count_row_success.sum()
count_col_ss = count_col_success.sum()
assert count_row_ss == count_col_ss #just a calculation check
# From the SAS manual
q_stat = (k-1) * (k * np.sum(count_col_success**2) - count_col_ss**2) \
/ (k * count_row_ss - np.sum(count_row_success**2))
# Note: the denominator looks just like k times the variance of
# the columns
# Wikipedia uses a different, but equivalent expression
#q_stat = (k-1) * (k * np.sum(count_row_success**2) - count_row_ss**2) \
# / (k * count_col_ss - np.sum(count_col_success**2))
df = k - 1
pvalue = stats.chi2.sf(q_stat, df)
if return_object:
b = _Bunch()
b.statistic = q_stat
b.df = df
b.pvalue = pvalue
return b
return q_stat, pvalue, df
| bsd-3-clause |
Gui13/CouchPotatoServer | couchpotato/core/media/_base/media/main.py | 2 | 16591 | import traceback
from string import ascii_lowercase
from CodernityDB.database import RecordNotFound
from couchpotato import tryInt, get_db
from couchpotato.api import addApiView
from couchpotato.core.event import fireEvent, fireEventAsync, addEvent
from couchpotato.core.helpers.encoding import toUnicode
from couchpotato.core.helpers.variable import splitString, getImdb, getTitle
from couchpotato.core.logger import CPLog
from couchpotato.core.media import MediaBase
from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex
log = CPLog(__name__)
class MediaPlugin(MediaBase):
_database = {
'media': MediaIndex,
'media_search_title': TitleSearchIndex,
'media_status': MediaStatusIndex,
'media_by_type': MediaTypeIndex,
'media_title': TitleIndex,
'media_startswith': StartsWithIndex,
'media_children': MediaChildrenIndex,
}
def __init__(self):
addApiView('media.refresh', self.refresh, docs = {
'desc': 'Refresh a any media type by ID',
'params': {
'id': {'desc': 'Movie, Show, Season or Episode ID(s) you want to refresh.', 'type': 'int (comma separated)'},
}
})
addApiView('media.list', self.listView, docs = {
'desc': 'List media',
'params': {
'type': {'type': 'string', 'desc': 'Media type to filter on.'},
'status': {'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"'},
'release_status': {'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"'},
'limit_offset': {'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"'},
'starts_with': {'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"'},
'search': {'desc': 'Search movie title'},
},
'return': {'type': 'object', 'example': """{
'success': True,
'empty': bool, any movies returned or not,
'media': array, media found,
}"""}
})
addApiView('media.get', self.getView, docs = {
'desc': 'Get media by id',
'params': {
'id': {'desc': 'The id of the media'},
}
})
addApiView('media.delete', self.deleteView, docs = {
'desc': 'Delete a media from the wanted list',
'params': {
'id': {'desc': 'Media ID(s) you want to delete.', 'type': 'int (comma separated)'},
'delete_from': {'desc': 'Delete media from this page', 'type': 'string: all (default), wanted, manage'},
}
})
addApiView('media.available_chars', self.charView)
addEvent('app.load', self.addSingleRefreshView, priority = 100)
addEvent('app.load', self.addSingleListView, priority = 100)
addEvent('app.load', self.addSingleCharView, priority = 100)
addEvent('app.load', self.addSingleDeleteView, priority = 100)
addEvent('media.get', self.get)
addEvent('media.with_status', self.withStatus)
addEvent('media.with_identifiers', self.withIdentifiers)
addEvent('media.list', self.list)
addEvent('media.delete', self.delete)
addEvent('media.restatus', self.restatus)
def refresh(self, id = '', **kwargs):
handlers = []
ids = splitString(id)
for x in ids:
refresh_handler = self.createRefreshHandler(x)
if refresh_handler:
handlers.append(refresh_handler)
fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids})
fireEventAsync('schedule.queue', handlers = handlers)
return {
'success': True,
}
def createRefreshHandler(self, media_id):
try:
media = get_db().get('id', media_id)
event = '%s.update_info' % media.get('type')
def handler():
fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id))
return handler
except:
log.error('Refresh handler for non existing media: %s', traceback.format_exc())
def addSingleRefreshView(self):
for media_type in fireEvent('media.types', merge = True):
addApiView('%s.refresh' % media_type, self.refresh)
def get(self, media_id):
try:
db = get_db()
imdb_id = getImdb(str(media_id))
if imdb_id:
media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc']
else:
media = db.get('id', media_id)
if media:
# Attach category
try: media['category'] = db.get('id', media.get('category_id'))
except: pass
media['releases'] = fireEvent('release.for_media', media['_id'], single = True)
return media
except RecordNotFound:
log.error('Media with id "%s" not found', media_id)
except:
raise
def getView(self, id = None, **kwargs):
media = self.get(id) if id else None
return {
'success': media is not None,
'media': media,
}
def withStatus(self, status, with_doc = True):
db = get_db()
status = list(status if isinstance(status, (list, tuple)) else [status])
for s in status:
for ms in db.get_many('media_status', s, with_doc = with_doc):
yield ms['doc'] if with_doc else ms
def withIdentifiers(self, identifiers, with_doc = False):
db = get_db()
for x in identifiers:
try:
media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc)
return media
except:
pass
log.debug('No media found with identifiers: %s', identifiers)
def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, starts_with = None, search = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = list(all_media_ids)
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Add search filters
if starts_with:
filter_by['starts_with'] = set()
starts_with = toUnicode(starts_with.lower())[0]
starts_with = starts_with if starts_with in ascii_lowercase else '#'
filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)]
# Filter with search query
if search:
filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)]
if status_or and 'media_status' in filter_by and 'release_status' in filter_by:
filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status'])
del filter_by['media_status']
del filter_by['release_status']
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
total_count = len(media_ids)
if total_count == 0:
return 0, []
offset = 0
limit = -1
if limit_offset:
splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset
limit = tryInt(splt[0])
offset = tryInt(0 if len(splt) is 1 else splt[1])
# List movies based on title order
medias = []
for m in db.all('media_title'):
media_id = m['_id']
if media_id not in media_ids: continue
if offset > 0:
offset -= 1
continue
media = fireEvent('media.get', media_id, single = True)
# Merge releases with movie dict
medias.append(media)
# remove from media ids
media_ids.remove(media_id)
if len(media_ids) == 0 or len(medias) == limit: break
return total_count, medias
def listView(self, **kwargs):
total_movies, movies = self.list(
types = splitString(kwargs.get('type')),
status = splitString(kwargs.get('status')),
release_status = splitString(kwargs.get('release_status')),
status_or = kwargs.get('status_or') is not None,
limit_offset = kwargs.get('limit_offset'),
starts_with = kwargs.get('starts_with'),
search = kwargs.get('search')
)
return {
'success': True,
'empty': len(movies) == 0,
'total': total_movies,
'movies': movies,
}
def addSingleListView(self):
for media_type in fireEvent('media.types', merge = True):
def tempList(*args, **kwargs):
return self.listView(types = media_type, **kwargs)
addApiView('%s.list' % media_type, tempList)
def availableChars(self, types = None, status = None, release_status = None):
db = get_db()
# Make a list from string
if status and not isinstance(status, (list, tuple)):
status = [status]
if release_status and not isinstance(release_status, (list, tuple)):
release_status = [release_status]
if types and not isinstance(types, (list, tuple)):
types = [types]
# query media ids
if types:
all_media_ids = set()
for media_type in types:
all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)]))
else:
all_media_ids = set([x['_id'] for x in db.all('media')])
media_ids = all_media_ids
filter_by = {}
# Filter on movie status
if status and len(status) > 0:
filter_by['media_status'] = set()
for media_status in fireEvent('media.with_status', status, with_doc = False, single = True):
filter_by['media_status'].add(media_status.get('_id'))
# Filter on release status
if release_status and len(release_status) > 0:
filter_by['release_status'] = set()
for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True):
filter_by['release_status'].add(release_status.get('media_id'))
# Filter by combining ids
for x in filter_by:
media_ids = [n for n in media_ids if n in filter_by[x]]
chars = set()
for x in db.all('media_startswith'):
if x['_id'] in media_ids:
chars.add(x['key'])
if len(chars) == 25:
break
return list(chars)
def charView(self, **kwargs):
type = splitString(kwargs.get('type', 'movie'))
status = splitString(kwargs.get('status', None))
release_status = splitString(kwargs.get('release_status', None))
chars = self.availableChars(type, status, release_status)
return {
'success': True,
'empty': len(chars) == 0,
'chars': chars,
}
def addSingleCharView(self):
for media_type in fireEvent('media.types', merge = True):
def tempChar(*args, **kwargs):
return self.charView(types = media_type, **kwargs)
addApiView('%s.available_chars' % media_type, tempChar)
def delete(self, media_id, delete_from = None):
try:
db = get_db()
media = db.get('id', media_id)
if media:
deleted = False
media_releases = fireEvent('release.for_media', media['_id'], single = True)
if delete_from == 'all':
# Delete connected releases
for release in media_releases:
db.delete(release)
db.delete(media)
deleted = True
else:
total_releases = len(media_releases)
total_deleted = 0
new_media_status = None
for release in media_releases:
if delete_from in ['wanted', 'snatched', 'late']:
if release.get('status') != 'done':
db.delete(release)
total_deleted += 1
new_media_status = 'done'
elif delete_from == 'manage':
if release.get('status') == 'done':
db.delete(release)
total_deleted += 1
if (total_releases == total_deleted and media['status'] != 'active') or (delete_from == 'wanted' and media['status'] == 'active') or (not new_media_status and delete_from == 'late'):
db.delete(media)
deleted = True
elif new_media_status:
media['status'] = new_media_status
db.update(media)
else:
fireEvent('media.restatus', media.get('_id'), single = True)
if deleted:
fireEvent('notify.frontend', type = 'media.deleted', data = media)
except:
log.error('Failed deleting media: %s', traceback.format_exc())
return True
def deleteView(self, id = '', **kwargs):
ids = splitString(id)
for media_id in ids:
self.delete(media_id, delete_from = kwargs.get('delete_from', 'all'))
return {
'success': True,
}
def addSingleDeleteView(self):
for media_type in fireEvent('media.types', merge = True):
def tempDelete(*args, **kwargs):
return self.deleteView(types = media_type, *args, **kwargs)
addApiView('%s.delete' % media_type, tempDelete)
def restatus(self, media_id):
try:
db = get_db()
m = db.get('id', media_id)
previous_status = m['status']
log.debug('Changing status for %s', getTitle(m))
if not m['profile_id']:
m['status'] = 'done'
else:
move_to_wanted = True
profile = db.get('id', m['profile_id'])
media_releases = fireEvent('release.for_media', m['_id'], single = True)
for q_identifier in profile['qualities']:
index = profile['qualities'].index(q_identifier)
for release in media_releases:
if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]):
move_to_wanted = False
m['status'] = 'active' if move_to_wanted else 'done'
# Only update when status has changed
if previous_status != m['status']:
db.update(m)
return True
except:
log.error('Failed restatus: %s', traceback.format_exc())
| gpl-3.0 |
gorcz/security_monkey | security_monkey/watchers/iam/iam_group.py | 2 | 6319 | # Copyright 2014 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.iam.iam_group
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Patrick Kelley <[email protected]> @monkeysecurity
"""
from security_monkey.watcher import Watcher
from security_monkey.watcher import ChangeItem
from security_monkey.exceptions import InvalidAWSJSON
from security_monkey.exceptions import BotoConnectionIssue
from security_monkey import app
import json
import urllib
def all_managed_policies(conn):
managed_policies = {}
for policy in conn.policies.all():
for attached_group in policy.attached_groups.all():
policy = {
"name": policy.policy_name,
"arn": policy.arn,
"version": policy.default_version_id
}
if attached_group.arn not in managed_policies:
managed_policies[attached_group.arn] = [policy]
else:
managed_policies[attached_group.arn].append(policy)
return managed_policies
class IAMGroup(Watcher):
index = 'iamgroup'
i_am_singular = 'IAM Group'
i_am_plural = 'IAM Groups'
def __init__(self, accounts=None, debug=False):
super(IAMGroup, self).__init__(accounts=accounts, debug=debug)
def get_all_groups(self, conn):
all_groups = []
marker = None
while True:
groups_response = self.wrap_aws_rate_limited_call(
conn.get_all_groups,
marker=marker
)
all_groups.extend(groups_response.groups)
if hasattr(groups_response, 'marker'):
marker = groups_response.marker
else:
break
return all_groups
def get_all_group_policies(self, conn, group_name):
all_group_policies = []
marker = None
while True:
group_policies = self.wrap_aws_rate_limited_call(
conn.get_all_group_policies,
group_name,
marker=marker
)
all_group_policies.extend(group_policies.policy_names)
if hasattr(group_policies, 'marker'):
marker = group_policies.marker
else:
break
return all_group_policies
def get_all_group_users(self, conn, group_name):
all_group_users = []
marker = None
while True:
group_users_response = self.wrap_aws_rate_limited_call(
conn.get_group,
group_name,
marker=marker
)
all_group_users.extend(group_users_response.users)
if hasattr(group_users_response, 'marker'):
marker = group_users_response.marker
else:
break
return all_group_users
def slurp(self):
"""
:returns: item_list - list of IAM Groups.
:returns: exception_map - A dict where the keys are a tuple containing the
location of the exception and the value is the actual exception
"""
self.prep_for_slurp()
item_list = []
exception_map = {}
from security_monkey.common.sts_connect import connect
for account in self.accounts:
try:
iam_b3 = connect(account, 'iam_boto3')
managed_policies = all_managed_policies(iam_b3)
iam = connect(account, 'iam')
groups = self.get_all_groups(iam)
except Exception as e:
exc = BotoConnectionIssue(str(e), 'iamgroup', account, None)
self.slurp_exception((self.index, account, 'universal'), exc, exception_map)
continue
for group in groups:
app.logger.debug("Slurping %s (%s) from %s" % (self.i_am_singular, group.group_name, account))
if self.check_ignore_list(group.group_name):
continue
item_config = {
'group': dict(group),
'grouppolicies': {},
'users': {}
}
if managed_policies.has_key(group.arn):
item_config['managed_policies'] = managed_policies.get(group.arn)
### GROUP POLICIES ###
group_policies = self.get_all_group_policies(iam, group.group_name)
for policy_name in group_policies:
policy = self.wrap_aws_rate_limited_call(iam.get_group_policy, group.group_name, policy_name)
policy = policy.policy_document
policy = urllib.unquote(policy)
try:
policydict = json.loads(policy)
except:
exc = InvalidAWSJSON(policy)
self.slurp_exception((self.index, account, 'universal', group.group_name), exc, exception_map)
item_config['grouppolicies'][policy_name] = dict(policydict)
### GROUP USERS ###
group_users = self.get_all_group_users(iam, group['group_name'])
for user in group_users:
item_config['users'][user.arn] = user.user_name
item = IAMGroupItem(account=account, name=group.group_name, config=item_config)
item_list.append(item)
return item_list, exception_map
class IAMGroupItem(ChangeItem):
def __init__(self, account=None, name=None, config={}):
super(IAMGroupItem, self).__init__(
index=IAMGroup.index,
region='universal',
account=account,
name=name,
new_config=config)
| apache-2.0 |
andmos/ansible | test/units/modules/network/netvisor/test_pn_stp.py | 9 | 2167 | # Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.netvisor import pn_stp
from units.modules.utils import set_module_args
from .nvos_module import TestNvosModule, load_fixture
class TestStpModule(TestNvosModule):
module = pn_stp
def setUp(self):
self.mock_run_nvos_commands = patch('ansible.modules.network.netvisor.pn_stp.run_cli')
self.run_nvos_commands = self.mock_run_nvos_commands.start()
def tearDown(self):
self.mock_run_nvos_commands.stop()
def run_cli_patch(self, module, cli, state_map):
if state_map['update'] == 'stp-modify':
results = dict(
changed=True,
cli_cmd=cli
)
module.exit_json(**results)
def load_fixtures(self, commands=None, state=None, transport='cli'):
self.run_nvos_commands.side_effect = self.run_cli_patch
def test_stp_modify_t1(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_hello_time': '3',
'pn_stp_mode': 'rstp', 'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 3 root-guard-wait-time 20 mst-max-hops 20 max-age 20 '
expected_cmd += 'stp-mode rstp forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
def test_stp_modify_t2(self):
set_module_args({'pn_cliswitch': 'sw01', 'pn_root_guard_wait_time': '50',
'state': 'update'})
result = self.execute_module(changed=True, state='update')
expected_cmd = '/usr/bin/cli --quiet -e --no-login-prompt switch sw01 stp-modify hello-time 2 root-guard-wait-time 50 mst-max-hops 20 '
expected_cmd += 'max-age 20 forwarding-delay 15 bridge-priority 32768'
self.assertEqual(result['cli_cmd'], expected_cmd)
| gpl-3.0 |
timoschwarzer/blendworks | BlendWorks Server/python/Lib/shelve.py | 83 | 8428 | """Manage shelves of pickled objects.
A "shelf" is a persistent, dictionary-like object. The difference
with dbm databases is that the values (not the keys!) in a shelf can
be essentially arbitrary Python objects -- anything that the "pickle"
module can handle. This includes most class instances, recursive data
types, and objects containing lots of shared sub-objects. The keys
are ordinary strings.
To summarize the interface (key is a string, data is an arbitrary
object):
import shelve
d = shelve.open(filename) # open, with (g)dbm filename -- no suffix
d[key] = data # store data at key (overwrites old data if
# using an existing key)
data = d[key] # retrieve a COPY of the data at key (raise
# KeyError if no such key) -- NOTE that this
# access returns a *copy* of the entry!
del d[key] # delete data stored at key (raises KeyError
# if no such key)
flag = key in d # true if the key exists
list = d.keys() # a list of all existing keys (slow!)
d.close() # close it
Dependent on the implementation, closing a persistent dictionary may
or may not be necessary to flush changes to disk.
Normally, d[key] returns a COPY of the entry. This needs care when
mutable entries are mutated: for example, if d[key] is a list,
d[key].append(anitem)
does NOT modify the entry d[key] itself, as stored in the persistent
mapping -- it only modifies the copy, which is then immediately
discarded, so that the append has NO effect whatsoever. To append an
item to d[key] in a way that will affect the persistent mapping, use:
data = d[key]
data.append(anitem)
d[key] = data
To avoid the problem with mutable entries, you may pass the keyword
argument writeback=True in the call to shelve.open. When you use:
d = shelve.open(filename, writeback=True)
then d keeps a cache of all entries you access, and writes them all back
to the persistent mapping when you call d.close(). This ensures that
such usage as d[key].append(anitem) works as intended.
However, using keyword argument writeback=True may consume vast amount
of memory for the cache, and it may make d.close() very slow, if you
access many of d's entries after opening it in this way: d has no way to
check which of the entries you access are mutable and/or which ones you
actually mutate, so it must cache, and write back at close, all of the
entries that you access. You can call d.sync() to write back all the
entries in the cache, and empty the cache (d.sync() also synchronizes
the persistent dictionary on disk, if feasible).
"""
from pickle import Pickler, Unpickler
from io import BytesIO
import collections
__all__ = ["Shelf", "BsdDbShelf", "DbfilenameShelf", "open"]
class _ClosedDict(collections.MutableMapping):
'Marker for a closed dict. Access attempts raise a ValueError.'
def closed(self, *args):
raise ValueError('invalid operation on closed shelf')
__iter__ = __len__ = __getitem__ = __setitem__ = __delitem__ = keys = closed
def __repr__(self):
return '<Closed Dictionary>'
class Shelf(collections.MutableMapping):
"""Base class for shelf implementations.
This is initialized with a dictionary-like object.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
self.dict = dict
if protocol is None:
protocol = 3
self._protocol = protocol
self.writeback = writeback
self.cache = {}
self.keyencoding = keyencoding
def __iter__(self):
for k in self.dict.keys():
yield k.decode(self.keyencoding)
def __len__(self):
return len(self.dict)
def __contains__(self, key):
return key.encode(self.keyencoding) in self.dict
def get(self, key, default=None):
if key.encode(self.keyencoding) in self.dict:
return self[key]
return default
def __getitem__(self, key):
try:
value = self.cache[key]
except KeyError:
f = BytesIO(self.dict[key.encode(self.keyencoding)])
value = Unpickler(f).load()
if self.writeback:
self.cache[key] = value
return value
def __setitem__(self, key, value):
if self.writeback:
self.cache[key] = value
f = BytesIO()
p = Pickler(f, self._protocol)
p.dump(value)
self.dict[key.encode(self.keyencoding)] = f.getvalue()
def __delitem__(self, key):
del self.dict[key.encode(self.keyencoding)]
try:
del self.cache[key]
except KeyError:
pass
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self.sync()
try:
self.dict.close()
except AttributeError:
pass
# Catch errors that may happen when close is called from __del__
# because CPython is in interpreter shutdown.
try:
self.dict = _ClosedDict()
except (NameError, TypeError):
self.dict = None
def __del__(self):
if not hasattr(self, 'writeback'):
# __init__ didn't succeed, so don't bother closing
# see http://bugs.python.org/issue1339007 for details
return
self.close()
def sync(self):
if self.writeback and self.cache:
self.writeback = False
for key, entry in self.cache.items():
self[key] = entry
self.writeback = True
self.cache = {}
if hasattr(self.dict, 'sync'):
self.dict.sync()
class BsdDbShelf(Shelf):
"""Shelf implementation using the "BSD" db interface.
This adds methods first(), next(), previous(), last() and
set_location() that have no counterpart in [g]dbm databases.
The actual database must be opened using one of the "bsddb"
modules "open" routines (i.e. bsddb.hashopen, bsddb.btopen or
bsddb.rnopen) and passed to the constructor.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, dict, protocol=None, writeback=False,
keyencoding="utf-8"):
Shelf.__init__(self, dict, protocol, writeback, keyencoding)
def set_location(self, key):
(key, value) = self.dict.set_location(key)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def next(self):
(key, value) = next(self.dict)
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def previous(self):
(key, value) = self.dict.previous()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def first(self):
(key, value) = self.dict.first()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
def last(self):
(key, value) = self.dict.last()
f = BytesIO(value)
return (key.decode(self.keyencoding), Unpickler(f).load())
class DbfilenameShelf(Shelf):
"""Shelf implementation using the "dbm" generic dbm interface.
This is initialized with the filename for the dbm database.
See the module's __doc__ string for an overview of the interface.
"""
def __init__(self, filename, flag='c', protocol=None, writeback=False):
import dbm
Shelf.__init__(self, dbm.open(filename, flag), protocol, writeback)
def open(filename, flag='c', protocol=None, writeback=False):
"""Open a persistent dictionary for reading and writing.
The filename parameter is the base filename for the underlying
database. As a side-effect, an extension may be added to the
filename and more than one file may be created. The optional flag
parameter has the same interpretation as the flag parameter of
dbm.open(). The optional protocol parameter specifies the
version of the pickle protocol (0, 1, or 2).
See the module's __doc__ string for an overview of the interface.
"""
return DbfilenameShelf(filename, flag, protocol, writeback)
| gpl-2.0 |
danakj/chromium | third_party/closure_linter/closure_linter/tokenutil_test.py | 109 | 7678 | #!/usr/bin/env python
#
# Copyright 2012 The Closure Linter Authors. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for the scopeutil module."""
# Allow non-Google copyright
# pylint: disable=g-bad-file-header
__author__ = ('[email protected] (Nathan Naze)')
import unittest as googletest
from closure_linter import ecmametadatapass
from closure_linter import javascripttokens
from closure_linter import testutil
from closure_linter import tokenutil
class FakeToken(object):
pass
class TokenUtilTest(googletest.TestCase):
def testGetTokenRange(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.next = b
b.next = c
c.next = d
self.assertEquals([a, b, c, d], tokenutil.GetTokenRange(a, d))
# This is an error as e does not come after a in the token chain.
self.assertRaises(Exception, lambda: tokenutil.GetTokenRange(a, e))
def testTokensToString(self):
a = FakeToken()
b = FakeToken()
c = FakeToken()
d = FakeToken()
e = FakeToken()
a.string = 'aaa'
b.string = 'bbb'
c.string = 'ccc'
d.string = 'ddd'
e.string = 'eee'
a.line_number = 5
b.line_number = 6
c.line_number = 6
d.line_number = 10
e.line_number = 11
self.assertEquals(
'aaa\nbbbccc\n\n\n\nddd\neee',
tokenutil.TokensToString([a, b, c, d, e]))
self.assertEquals(
'ddd\neee\naaa\nbbbccc',
tokenutil.TokensToString([d, e, a, b, c]),
'Neighboring tokens not in line_number order should have a newline '
'between them.')
def testGetPreviousCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
None,
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start1.',
tokenutil.GetPreviousCodeToken(_GetTokenStartingWith('end1')).string)
def testGetNextCodeToken(self):
tokens = testutil.TokenizeSource("""
start1. // comment
/* another comment */
end1
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'end1',
tokenutil.GetNextCodeToken(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetNextCodeToken(_GetTokenStartingWith('end1')))
def testGetIdentifierStart(self):
tokens = testutil.TokenizeSource("""
start1 . // comment
prototype. /* another comment */
end1
['edge'][case].prototype.
end2 = function() {}
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end1')).string)
self.assertEquals(
'start1',
tokenutil.GetIdentifierStart(_GetTokenStartingWith('start1')).string)
self.assertEquals(
None,
tokenutil.GetIdentifierStart(_GetTokenStartingWith('end2')))
def testInsertTokenBefore(self):
self.AssertInsertTokenAfterBefore(False)
def testInsertTokenAfter(self):
self.AssertInsertTokenAfterBefore(True)
def AssertInsertTokenAfterBefore(self, after):
new_token = javascripttokens.JavaScriptToken(
'a', javascripttokens.JavaScriptTokenType.IDENTIFIER, 1, 1)
existing_token1 = javascripttokens.JavaScriptToken(
'var', javascripttokens.JavaScriptTokenType.KEYWORD, 1, 1)
existing_token1.start_index = 0
existing_token1.metadata = ecmametadatapass.EcmaMetaData()
existing_token2 = javascripttokens.JavaScriptToken(
' ', javascripttokens.JavaScriptTokenType.WHITESPACE, 1, 1)
existing_token2.start_index = 3
existing_token2.metadata = ecmametadatapass.EcmaMetaData()
existing_token2.metadata.last_code = existing_token1
existing_token1.next = existing_token2
existing_token2.previous = existing_token1
if after:
tokenutil.InsertTokenAfter(new_token, existing_token1)
else:
tokenutil.InsertTokenBefore(new_token, existing_token2)
self.assertEquals(existing_token1, new_token.previous)
self.assertEquals(existing_token2, new_token.next)
self.assertEquals(new_token, existing_token1.next)
self.assertEquals(new_token, existing_token2.previous)
self.assertEquals(existing_token1, new_token.metadata.last_code)
self.assertEquals(new_token, existing_token2.metadata.last_code)
self.assertEquals(0, existing_token1.start_index)
self.assertEquals(3, new_token.start_index)
self.assertEquals(4, existing_token2.start_index)
def testGetIdentifierForToken(self):
tokens = testutil.TokenizeSource("""
start1.abc.def.prototype.
onContinuedLine
(start2.abc.def
.hij.klm
.nop)
start3.abc.def
.hij = function() {};
// An absurd multi-liner.
start4.abc.def.
hij.
klm = function() {};
start5 . aaa . bbb . ccc
shouldntBePartOfThePreviousSymbol
start6.abc.def ghi.shouldntBePartOfThePreviousSymbol
var start7 = 42;
function start8() {
}
start9.abc. // why is there a comment here?
def /* another comment */
shouldntBePart
start10.abc // why is there a comment here?
.def /* another comment */
shouldntBePart
start11.abc. middle1.shouldNotBeIdentifier
""")
def _GetTokenStartingWith(token_starts_with):
for t in tokens:
if t.string.startswith(token_starts_with):
return t
self.assertEquals(
'start1.abc.def.prototype.onContinuedLine',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start1')))
self.assertEquals(
'start2.abc.def.hij.klm.nop',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start2')))
self.assertEquals(
'start3.abc.def.hij',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start3')))
self.assertEquals(
'start4.abc.def.hij.klm',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start4')))
self.assertEquals(
'start5.aaa.bbb.ccc',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start5')))
self.assertEquals(
'start6.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start6')))
self.assertEquals(
'start7',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start7')))
self.assertEquals(
'start8',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start8')))
self.assertEquals(
'start9.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start9')))
self.assertEquals(
'start10.abc.def',
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('start10')))
self.assertIsNone(
tokenutil.GetIdentifierForToken(_GetTokenStartingWith('middle1')))
if __name__ == '__main__':
googletest.main()
| bsd-3-clause |
Qalthos/ansible | lib/ansible/modules/storage/netapp/na_elementsw_network_interfaces.py | 44 | 10836 | #!/usr/bin/python
# (c) 2018, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or
# https://www.gnu.org/licenses/gpl-3.0.txt)
'''
Element Software Node Network Interfaces - Bond 1G and 10G configuration
'''
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
module: na_elementsw_network_interfaces
short_description: NetApp Element Software Configure Node Network Interfaces
extends_documentation_fragment:
- netapp.solidfire
version_added: '2.7'
author: NetApp Ansible Team (@carchi8py) <[email protected]>
description:
- Configure Element SW Node Network Interfaces for Bond 1G and 10G IP address.
options:
method:
description:
- Type of Method used to configure the interface.
- method depends on other settings such as the use of a static IP address, which will change the method to static.
- loopback - Used to define the IPv4 loopback interface.
- manual - Used to define interfaces for which no configuration is done by default.
- dhcp - May be used to obtain an IP address via DHCP.
- static - Used to define Ethernet interfaces with statically allocated IPv4 addresses.
choices: ['loopback', 'manual', 'dhcp', 'static']
required: true
ip_address_1g:
description:
- IP address for the 1G network.
required: true
ip_address_10g:
description:
- IP address for the 10G network.
required: true
subnet_1g:
description:
- 1GbE Subnet Mask.
required: true
subnet_10g:
description:
- 10GbE Subnet Mask.
required: true
gateway_address_1g:
description:
- Router network address to send packets out of the local network.
required: true
gateway_address_10g:
description:
- Router network address to send packets out of the local network.
required: true
mtu_1g:
description:
- Maximum Transmission Unit for 1GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
mtu_10g:
description:
- Maximum Transmission Unit for 10GbE, Largest packet size that a network protocol can transmit.
- Must be greater than or equal to 1500 bytes.
default: '1500'
dns_nameservers:
description:
- List of addresses for domain name servers.
dns_search_domains:
description:
- List of DNS search domains.
bond_mode_1g:
description:
- Bond mode for 1GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
bond_mode_10g:
description:
- Bond mode for 10GbE configuration.
choices: ['ActivePassive', 'ALB', 'LACP']
default: 'ActivePassive'
lacp_1g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
lacp_10g:
description:
- Link Aggregation Control Protocol useful only if LACP is selected as the Bond Mode.
- Slow - Packets are transmitted at 30 second intervals.
- Fast - Packets are transmitted in 1 second intervals.
choices: ['Fast', 'Slow']
default: 'Slow'
virtual_network_tag:
description:
- This is the primary network tag. All nodes in a cluster have the same VLAN tag.
'''
EXAMPLES = """
- name: Set Node network interfaces configuration for Bond 1G and 10G properties
tags:
- elementsw_network_interfaces
na_elementsw_network_interfaces:
hostname: "{{ elementsw_hostname }}"
username: "{{ elementsw_username }}"
password: "{{ elementsw_password }}"
method: static
ip_address_1g: 10.226.109.68
ip_address_10g: 10.226.201.72
subnet_1g: 255.255.255.0
subnet_10g: 255.255.255.0
gateway_address_1g: 10.193.139.1
gateway_address_10g: 10.193.140.1
mtu_1g: 1500
mtu_10g: 9000
bond_mode_1g: ActivePassive
bond_mode_10g: LACP
lacp_10g: Fast
"""
RETURN = """
msg:
description: Success message
returned: success
type: str
"""
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
import ansible.module_utils.netapp as netapp_utils
HAS_SF_SDK = netapp_utils.has_sf_sdk()
try:
from solidfire.models import Network, NetworkConfig
HAS_SF_SDK = True
except Exception:
HAS_SF_SDK = False
class ElementSWNetworkInterfaces(object):
"""
Element Software Network Interfaces - Bond 1G and 10G Network configuration
"""
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(
method=dict(type='str', required=True, choices=['loopback', 'manual', 'dhcp', 'static']),
ip_address_1g=dict(type='str', required=True),
ip_address_10g=dict(type='str', required=True),
subnet_1g=dict(type='str', required=True),
subnet_10g=dict(type='str', required=True),
gateway_address_1g=dict(type='str', required=True),
gateway_address_10g=dict(type='str', required=True),
mtu_1g=dict(type='str', default='1500'),
mtu_10g=dict(type='str', default='1500'),
dns_nameservers=dict(type='list'),
dns_search_domains=dict(type='list'),
bond_mode_1g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
bond_mode_10g=dict(type='str', default='ActivePassive', choices=['ActivePassive', 'ALB', 'LACP']),
lacp_1g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
lacp_10g=dict(type='str', default='Slow', choices=['Fast', 'Slow']),
virtual_network_tag=dict(type='str'),
)
self.module = AnsibleModule(
argument_spec=self.argument_spec,
supports_check_mode=True,
)
input_params = self.module.params
self.method = input_params['method']
self.ip_address_1g = input_params['ip_address_1g']
self.ip_address_10g = input_params['ip_address_10g']
self.subnet_1g = input_params['subnet_1g']
self.subnet_10g = input_params['subnet_10g']
self.gateway_address_1g = input_params['gateway_address_1g']
self.gateway_address_10g = input_params['gateway_address_10g']
self.mtu_1g = input_params['mtu_1g']
self.mtu_10g = input_params['mtu_10g']
self.dns_nameservers = input_params['dns_nameservers']
self.dns_search_domains = input_params['dns_search_domains']
self.bond_mode_1g = input_params['bond_mode_1g']
self.bond_mode_10g = input_params['bond_mode_10g']
self.lacp_1g = input_params['lacp_1g']
self.lacp_10g = input_params['lacp_10g']
self.virtual_network_tag = input_params['virtual_network_tag']
if HAS_SF_SDK is False:
self.module.fail_json(msg="Unable to import the SolidFire Python SDK")
else:
self.sfe = netapp_utils.create_sf_connection(module=self.module, port=442)
def set_network_config(self):
"""
set network configuration
"""
try:
self.sfe.set_network_config(network=self.network_object)
except Exception as exception_object:
self.module.fail_json(msg='Error network setting for node %s' % (to_native(exception_object)),
exception=traceback.format_exc())
def get_network_params_object(self):
"""
Get Element SW Network object
:description: get Network object
:return: NetworkConfig object
:rtype: object(NetworkConfig object)
"""
try:
bond_1g_network = NetworkConfig(method=self.method,
address=self.ip_address_1g,
netmask=self.subnet_1g,
gateway=self.gateway_address_1g,
mtu=self.mtu_1g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_1g,
bond_lacp_rate=self.lacp_1g,
virtual_network_tag=self.virtual_network_tag)
bond_10g_network = NetworkConfig(method=self.method,
address=self.ip_address_10g,
netmask=self.subnet_10g,
gateway=self.gateway_address_10g,
mtu=self.mtu_10g,
dns_nameservers=self.dns_nameservers,
dns_search=self.dns_search_domains,
bond_mode=self.bond_mode_10g,
bond_lacp_rate=self.lacp_10g,
virtual_network_tag=self.virtual_network_tag)
network_object = Network(bond1_g=bond_1g_network,
bond10_g=bond_10g_network)
return network_object
except Exception as e:
self.module.fail_json(msg='Error with setting up network object for node 1G and 10G configuration : %s' % to_native(e),
exception=to_native(e))
def apply(self):
"""
Check connection and initialize node with cluster ownership
"""
changed = False
result_message = None
self.network_object = self.get_network_params_object()
if self.network_object is not None:
self.set_network_config()
changed = True
else:
result_message = "Skipping changes, No change requested"
self.module.exit_json(changed=changed, msg=result_message)
def main():
"""
Main function
"""
elementsw_network_interfaces = ElementSWNetworkInterfaces()
elementsw_network_interfaces.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
csmart/jockey-yum | setup.py | 1 | 1204 | #!/usr/bin/env python
# (c) 2007 Canonical Ltd.
# Author: Martin Pitt <[email protected]>
# This script needs python-distutils-extra, an extension to the standard
# distutils which provides i18n, icon support, etc.
# https://launchpad.net/python-distutils-extra
from glob import glob
from distutils.version import StrictVersion
try:
import DistUtilsExtra.auto
except ImportError:
import sys
print >> sys.stderr, 'To build Jockey you need https://launchpad.net/python-distutils-extra'
sys.exit(1)
assert StrictVersion(DistUtilsExtra.auto.__version__) >= '2.4', 'needs DistUtilsExtra.auto >= 2.4'
DistUtilsExtra.auto.setup(
name='jockey',
version='0.9.3',
description='UI for managing third-party and non-free drivers',
url='https://launchpad.net/jockey',
license='GPL v2 or later',
author='Martin Pitt',
author_email='[email protected]',
data_files = [
('share/jockey', ['backend/jockey-backend']),
('share/jockey', ['gtk/jockey-gtk.ui']), # bug in DistUtilsExtra.auto 2.2
('share/jockey', glob('kde/*.ui')), # don't use pykdeuic4
],
scripts = ['gtk/jockey-gtk', 'kde/jockey-kde', 'text/jockey-text'],
)
| gpl-2.0 |
MarsSnail/gyp_tools | pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| bsd-3-clause |
gersolar/stations | stations_configuration/settings.py | 1 | 5198 | # Only Celery settings for stations project.
#import djcelery
#djcelery.setup_loader()
#BROKER_TRANSPORT = 'amqplib'
#BROKER_URL = 'django://'
##CELERY_RESULT_BACKEND = 'database'
#CELERY_DEFAULT_QUEUE = "default"
#CELERY_QUEUES = {
# "default": {
# "binding_key": "task.#",
# },
# "mailer": {
# "binding_key": "task.#",
# },
#}
#CELERY_ROUTES = {'downloader.tasks.check_email_schedule': {'queue': 'mailer'}}
#CELERY_TIMEZONE = 'UTC'
#CELERY_CONCURRENCY = 7
# Django settings for stations project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', '[email protected]'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'stations.sqlite3',
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'UTC' # 'America/Buenos_Aires'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'fax%_3d9oshwed$!3s)jdn876jpj#5u&50m$6naau#&=zpyn%0'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'sslify.middleware.SSLifyMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'stations_configuration.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'stations_configuration.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'polymorphic',
'django.contrib.contenttypes',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
'django.contrib.admindocs',
'stations',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
TEMPLATE_DIRS='templates'
| mit |
ujenmr/ansible | lib/ansible/modules/remote_management/oneview/oneview_ethernet_network_facts.py | 125 | 4863 | #!/usr/bin/python
# Copyright (c) 2016-2017 Hewlett Packard Enterprise Development LP
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneview_ethernet_network_facts
short_description: Retrieve the facts about one or more of the OneView Ethernet Networks
description:
- Retrieve the facts about one or more of the Ethernet Networks from OneView.
version_added: "2.4"
requirements:
- hpOneView >= 2.0.1
author:
- Felipe Bulsoni (@fgbulsoni)
- Thiago Miotto (@tmiotto)
- Adriane Cardozo (@adriane-cardozo)
options:
name:
description:
- Ethernet Network name.
options:
description:
- "List with options to gather additional facts about an Ethernet Network and related resources.
Options allowed: C(associatedProfiles) and C(associatedUplinkGroups)."
extends_documentation_fragment:
- oneview
- oneview.factsparams
'''
EXAMPLES = '''
- name: Gather facts about all Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather paginated and filtered facts about Ethernet Networks
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
params:
start: 1
count: 3
sort: 'name:descending'
filter: 'purpose=General'
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: Ethernet network name
delegate_to: localhost
- debug: var=ethernet_networks
- name: Gather facts about an Ethernet Network by name with options
oneview_ethernet_network_facts:
config: /etc/oneview/oneview_config.json
name: eth1
options:
- associatedProfiles
- associatedUplinkGroups
delegate_to: localhost
- debug: var=enet_associated_profiles
- debug: var=enet_associated_uplink_groups
'''
RETURN = '''
ethernet_networks:
description: Has all the OneView facts about the Ethernet Networks.
returned: Always, but can be null.
type: dict
enet_associated_profiles:
description: Has all the OneView facts about the profiles which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
enet_associated_uplink_groups:
description: Has all the OneView facts about the uplink sets which are using the Ethernet network.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModuleBase
class EthernetNetworkFactsModule(OneViewModuleBase):
argument_spec = dict(
name=dict(type='str'),
options=dict(type='list'),
params=dict(type='dict')
)
def __init__(self):
super(EthernetNetworkFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.resource_client = self.oneview_client.ethernet_networks
def execute_module(self):
ansible_facts = {}
if self.module.params['name']:
ethernet_networks = self.resource_client.get_by('name', self.module.params['name'])
if self.module.params.get('options') and ethernet_networks:
ansible_facts = self.__gather_optional_facts(ethernet_networks[0])
else:
ethernet_networks = self.resource_client.get_all(**self.facts_params)
ansible_facts['ethernet_networks'] = ethernet_networks
return dict(changed=False, ansible_facts=ansible_facts)
def __gather_optional_facts(self, ethernet_network):
ansible_facts = {}
if self.options.get('associatedProfiles'):
ansible_facts['enet_associated_profiles'] = self.__get_associated_profiles(ethernet_network)
if self.options.get('associatedUplinkGroups'):
ansible_facts['enet_associated_uplink_groups'] = self.__get_associated_uplink_groups(ethernet_network)
return ansible_facts
def __get_associated_profiles(self, ethernet_network):
associated_profiles = self.resource_client.get_associated_profiles(ethernet_network['uri'])
return [self.oneview_client.server_profiles.get(x) for x in associated_profiles]
def __get_associated_uplink_groups(self, ethernet_network):
uplink_groups = self.resource_client.get_associated_uplink_groups(ethernet_network['uri'])
return [self.oneview_client.uplink_sets.get(x) for x in uplink_groups]
def main():
EthernetNetworkFactsModule().run()
if __name__ == '__main__':
main()
| gpl-3.0 |
gram526/VTK | Filters/Hybrid/Testing/Python/WarpPolyData.py | 20 | 6369 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# this example tests the warping of PolyData using thin plate splines
# and with grid transforms using different interpolation modes
# create a rendering window
renWin = vtk.vtkRenderWindow()
renWin.SetSize(600,300)
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(20)
sphere.SetPhiResolution(20)
ap = vtk.vtkPolyDataNormals()
ap.SetInputConnection(sphere.GetOutputPort())
#---------------------------
# thin plate spline transform
spoints = vtk.vtkPoints()
spoints.SetNumberOfPoints(10)
spoints.SetPoint(0,0.000,0.000,0.500)
spoints.SetPoint(1,0.000,0.000,-0.500)
spoints.SetPoint(2,0.433,0.000,0.250)
spoints.SetPoint(3,0.433,0.000,-0.250)
spoints.SetPoint(4,-0.000,0.433,0.250)
spoints.SetPoint(5,-0.000,0.433,-0.250)
spoints.SetPoint(6,-0.433,-0.000,0.250)
spoints.SetPoint(7,-0.433,-0.000,-0.250)
spoints.SetPoint(8,0.000,-0.433,0.250)
spoints.SetPoint(9,0.000,-0.433,-0.250)
tpoints = vtk.vtkPoints()
tpoints.SetNumberOfPoints(10)
tpoints.SetPoint(0,0.000,0.000,0.800)
tpoints.SetPoint(1,0.000,0.000,-0.200)
tpoints.SetPoint(2,0.433,0.000,0.350)
tpoints.SetPoint(3,0.433,0.000,-0.150)
tpoints.SetPoint(4,-0.000,0.233,0.350)
tpoints.SetPoint(5,-0.000,0.433,-0.150)
tpoints.SetPoint(6,-0.433,-0.000,0.350)
tpoints.SetPoint(7,-0.433,-0.000,-0.150)
tpoints.SetPoint(8,0.000,-0.233,0.350)
tpoints.SetPoint(9,0.000,-0.433,-0.150)
thin = vtk.vtkThinPlateSplineTransform()
thin.SetSourceLandmarks(spoints)
thin.SetTargetLandmarks(tpoints)
thin.SetBasisToR2LogR()
# thin Inverse
t1 = vtk.vtkGeneralTransform()
t1.SetInput(thin)
f11 = vtk.vtkTransformPolyDataFilter()
f11.SetInputConnection(ap.GetOutputPort())
f11.SetTransform(t1)
m11 = vtk.vtkDataSetMapper()
m11.SetInputConnection(f11.GetOutputPort())
a11 = vtk.vtkActor()
a11.SetMapper(m11)
a11.RotateY(90)
a11.GetProperty().SetColor(1,0,0)
#[a11 GetProperty] SetRepresentationToWireframe
ren11 = vtk.vtkRenderer()
ren11.SetViewport(0.0,0.5,0.25,1.0)
ren11.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren11.AddActor(a11)
renWin.AddRenderer(ren11)
# inverse thin plate spline transform
f12 = vtk.vtkTransformPolyDataFilter()
f12.SetInputConnection(ap.GetOutputPort())
f12.SetTransform(t1.GetInverse())
m12 = vtk.vtkDataSetMapper()
m12.SetInputConnection(f12.GetOutputPort())
a12 = vtk.vtkActor()
a12.SetMapper(m12)
a12.RotateY(90)
a12.GetProperty().SetColor(0.9,0.9,0)
#[a12 GetProperty] SetRepresentationToWireframe
ren12 = vtk.vtkRenderer()
ren12.SetViewport(0.0,0.0,0.25,0.5)
ren12.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren12.AddActor(a12)
renWin.AddRenderer(ren12)
#--------------------------
# grid transform, cubic interpolation
gridTrans = vtk.vtkTransformToGrid()
gridTrans.SetInput(t1)
gridTrans.SetGridOrigin(-1.5,-1.5,-1.5)
gridTrans.SetGridExtent(0,60,0,60,0,60)
gridTrans.SetGridSpacing(0.05,0.05,0.05)
t2 = vtk.vtkGridTransform()
t2.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t2.SetInterpolationModeToCubic()
f21 = vtk.vtkTransformPolyDataFilter()
f21.SetInputConnection(ap.GetOutputPort())
f21.SetTransform(t2)
m21 = vtk.vtkDataSetMapper()
m21.SetInputConnection(f21.GetOutputPort())
a21 = vtk.vtkActor()
a21.SetMapper(m21)
a21.RotateY(90)
a21.GetProperty().SetColor(1,0,0)
#[a21 GetProperty] SetRepresentationToWireframe
ren21 = vtk.vtkRenderer()
ren21.SetViewport(0.25,0.5,0.50,1.0)
ren21.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren21.AddActor(a21)
renWin.AddRenderer(ren21)
# inverse
f22 = vtk.vtkTransformPolyDataFilter()
f22.SetInputConnection(ap.GetOutputPort())
f22.SetTransform(t2.GetInverse())
m22 = vtk.vtkDataSetMapper()
m22.SetInputConnection(f22.GetOutputPort())
a22 = vtk.vtkActor()
a22.SetMapper(m22)
a22.RotateY(90)
a22.GetProperty().SetColor(0.9,0.9,0)
#[a22 GetProperty] SetRepresentationToWireframe
ren22 = vtk.vtkRenderer()
ren22.SetViewport(0.25,0.0,0.50,0.5)
ren22.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren22.AddActor(a22)
renWin.AddRenderer(ren22)
#--------------------------
# grid transform, linear
t3 = vtk.vtkGridTransform()
t3.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t3.SetInterpolationModeToLinear()
f31 = vtk.vtkTransformPolyDataFilter()
f31.SetInputConnection(ap.GetOutputPort())
f31.SetTransform(t3)
m31 = vtk.vtkDataSetMapper()
m31.SetInputConnection(f31.GetOutputPort())
a31 = vtk.vtkActor()
a31.SetMapper(m31)
a31.RotateY(90)
a31.GetProperty().SetColor(1,0,0)
#[a31 GetProperty] SetRepresentationToWireframe
ren31 = vtk.vtkRenderer()
ren31.SetViewport(0.50,0.5,0.75,1.0)
ren31.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren31.AddActor(a31)
renWin.AddRenderer(ren31)
# inverse
f32 = vtk.vtkTransformPolyDataFilter()
f32.SetInputConnection(ap.GetOutputPort())
f32.SetTransform(t3.GetInverse())
m32 = vtk.vtkDataSetMapper()
m32.SetInputConnection(f32.GetOutputPort())
a32 = vtk.vtkActor()
a32.SetMapper(m32)
a32.RotateY(90)
a32.GetProperty().SetColor(0.9,0.9,0)
#[a32 GetProperty] SetRepresentationToWireframe
ren32 = vtk.vtkRenderer()
ren32.SetViewport(0.5,0.0,0.75,0.5)
ren32.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren32.AddActor(a32)
renWin.AddRenderer(ren32)
#--------------------------
# grid transform, nearest
t4 = vtk.vtkGridTransform()
t4.SetDisplacementGridConnection(gridTrans.GetOutputPort())
t4.SetInterpolationModeToNearestNeighbor()
t4.SetInverseTolerance(0.05)
f41 = vtk.vtkTransformPolyDataFilter()
f41.SetInputConnection(ap.GetOutputPort())
f41.SetTransform(t4)
m41 = vtk.vtkDataSetMapper()
m41.SetInputConnection(f41.GetOutputPort())
a41 = vtk.vtkActor()
a41.SetMapper(m41)
a41.RotateY(90)
a41.GetProperty().SetColor(1,0,0)
#[a41 GetProperty] SetRepresentationToWireframe
ren41 = vtk.vtkRenderer()
ren41.SetViewport(0.75,0.5,1.0,1.0)
ren41.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren41.AddActor(a41)
renWin.AddRenderer(ren41)
#inverse
f42 = vtk.vtkTransformPolyDataFilter()
f42.SetInputConnection(ap.GetOutputPort())
f42.SetTransform(t4.GetInverse())
m42 = vtk.vtkDataSetMapper()
m42.SetInputConnection(f42.GetOutputPort())
a42 = vtk.vtkActor()
a42.SetMapper(m42)
a42.RotateY(90)
a42.GetProperty().SetColor(0.9,0.9,0)
#[a42 GetProperty] SetRepresentationToWireframe
ren42 = vtk.vtkRenderer()
ren42.SetViewport(0.75,0.0,1.0,0.5)
ren42.ResetCamera(-0.5,0.5,-0.5,0.5,-1,1)
ren42.AddActor(a42)
renWin.AddRenderer(ren42)
t1.RotateX(-100)
t1.PostMultiply()
t1.RotateX(+100)
renWin.Render()
# --- end of script --
| bsd-3-clause |
chrisseto/modular-odm | tests/test_foreign.py | 4 | 1849 | #!/usr/bin/env python
# encoding: utf-8
from nose.tools import *
from tests.base import ModularOdmTestCase, TestObject
from modularodm import fields
class TestForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.ForeignField('bar', list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
class TestAbstractForeignList(ModularOdmTestCase):
def define_objects(self):
class Foo(TestObject):
_id = fields.IntegerField()
bars = fields.AbstractForeignField(list=True)
class Bar(TestObject):
_id = fields.IntegerField()
return Foo, Bar
def set_up_objects(self):
self.foo = self.Foo(_id=1)
self.bars = []
for idx in range(5):
self.bars.append(self.Bar(_id=idx))
self.bars[idx].save()
self.foo.bars = self.bars
self.foo.save()
def test_get_item(self):
assert_equal(self.bars[2], self.foo.bars[2])
def test_get_slice(self):
assert_equal(self.bars[:3], list(self.foo.bars[:3]))
def test_get_slice_extended(self):
assert_equal(self.bars[::-1], list(self.foo.bars[::-1]))
| apache-2.0 |
ycl2045/nova-master | nova/api/openstack/compute/plugins/v3/keypairs.py | 10 | 6309 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Keypair management extension."""
import webob
import webob.exc
from nova.api.openstack.compute.schemas.v3 import keypairs
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api import validation
from nova.compute import api as compute_api
from nova import exception
from nova.openstack.common.gettextutils import _
ALIAS = 'keypairs'
authorize = extensions.extension_authorizer('compute', 'v3:' + ALIAS)
soft_authorize = extensions.soft_extension_authorizer('compute', 'v3:' + ALIAS)
class KeypairController(object):
"""Keypair API controller for the OpenStack API."""
def __init__(self):
self.api = compute_api.KeypairAPI()
def _filter_keypair(self, keypair, **attrs):
clean = {
'name': keypair.name,
'public_key': keypair.public_key,
'fingerprint': keypair.fingerprint,
}
for attr in attrs:
clean[attr] = keypair[attr]
return clean
@extensions.expected_errors((400, 409, 413))
@wsgi.response(201)
@validation.schema(keypairs.create)
def create(self, req, body):
"""Create or import keypair.
Sending name will generate a key and return private_key
and fingerprint.
You can send a public_key to add an existing ssh key
params: keypair object with:
name (required) - string
public_key (optional) - string
"""
context = req.environ['nova.context']
authorize(context, action='create')
params = body['keypair']
name = params['name']
try:
if 'public_key' in params:
keypair = self.api.import_key_pair(context,
context.user_id, name,
params['public_key'])
keypair = self._filter_keypair(keypair, user_id=True)
else:
keypair, private_key = self.api.create_key_pair(
context, context.user_id, name)
keypair = self._filter_keypair(keypair, user_id=True)
keypair['private_key'] = private_key
return {'keypair': keypair}
except exception.KeypairLimitExceeded:
msg = _("Quota exceeded, too many key pairs.")
raise webob.exc.HTTPRequestEntityTooLarge(
explanation=msg,
headers={'Retry-After': 0})
except exception.InvalidKeypair as exc:
raise webob.exc.HTTPBadRequest(explanation=exc.format_message())
except exception.KeyPairExists as exc:
raise webob.exc.HTTPConflict(explanation=exc.format_message())
@wsgi.response(204)
@extensions.expected_errors(404)
def delete(self, req, id):
"""Delete a keypair with a given name."""
context = req.environ['nova.context']
authorize(context, action='delete')
try:
self.api.delete_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
@extensions.expected_errors(404)
def show(self, req, id):
"""Return data for the given key name."""
context = req.environ['nova.context']
authorize(context, action='show')
try:
keypair = self.api.get_key_pair(context, context.user_id, id)
except exception.KeypairNotFound:
raise webob.exc.HTTPNotFound()
return {'keypair': self._filter_keypair(keypair)}
@extensions.expected_errors(())
def index(self, req):
"""List of keypairs for a user."""
context = req.environ['nova.context']
authorize(context, action='index')
key_pairs = self.api.get_key_pairs(context, context.user_id)
rval = []
for key_pair in key_pairs:
rval.append({'keypair': self._filter_keypair(key_pair)})
return {'keypairs': rval}
class Controller(wsgi.Controller):
def _add_key_name(self, req, servers):
for server in servers:
db_server = req.get_db_instance(server['id'])
# server['id'] is guaranteed to be in the cache due to
# the core API adding it in its 'show'/'detail' methods.
server['key_name'] = db_server['key_name']
def _show(self, req, resp_obj):
if 'server' in resp_obj.obj:
server = resp_obj.obj['server']
self._add_key_name(req, [server])
@wsgi.extends
def show(self, req, resp_obj, id):
context = req.environ['nova.context']
if soft_authorize(context):
self._show(req, resp_obj)
@wsgi.extends
def detail(self, req, resp_obj):
context = req.environ['nova.context']
if 'servers' in resp_obj.obj and soft_authorize(context):
servers = resp_obj.obj['servers']
self._add_key_name(req, servers)
class Keypairs(extensions.V3APIExtensionBase):
"""Keypair Support."""
name = "Keypairs"
alias = ALIAS
version = 1
def get_resources(self):
resources = [
extensions.ResourceExtension('keypairs',
KeypairController())]
return resources
def get_controller_extensions(self):
controller = Controller()
extension = extensions.ControllerExtension(self, 'servers', controller)
return [extension]
# use nova.api.extensions.server.extensions entry point to modify
# server create kwargs
def server_create(self, server_dict, create_kwargs):
create_kwargs['key_name'] = server_dict.get('key_name')
| apache-2.0 |
liamgh/liamgreenhughes-sl4a-tf101 | python/src/Lib/test/test_pwd.py | 58 | 3352 | import unittest
from test import test_support
import pwd
class PwdTest(unittest.TestCase):
def test_values(self):
entries = pwd.getpwall()
entriesbyname = {}
entriesbyuid = {}
for e in entries:
self.assertEqual(len(e), 7)
self.assertEqual(e[0], e.pw_name)
self.assert_(isinstance(e.pw_name, basestring))
self.assertEqual(e[1], e.pw_passwd)
self.assert_(isinstance(e.pw_passwd, basestring))
self.assertEqual(e[2], e.pw_uid)
self.assert_(isinstance(e.pw_uid, int))
self.assertEqual(e[3], e.pw_gid)
self.assert_(isinstance(e.pw_gid, int))
self.assertEqual(e[4], e.pw_gecos)
self.assert_(isinstance(e.pw_gecos, basestring))
self.assertEqual(e[5], e.pw_dir)
self.assert_(isinstance(e.pw_dir, basestring))
self.assertEqual(e[6], e.pw_shell)
self.assert_(isinstance(e.pw_shell, basestring))
# The following won't work, because of duplicate entries
# for one uid
# self.assertEqual(pwd.getpwuid(e.pw_uid), e)
# instead of this collect all entries for one uid
# and check afterwards
entriesbyname.setdefault(e.pw_name, []).append(e)
entriesbyuid.setdefault(e.pw_uid, []).append(e)
if len(entries) > 1000: # Huge passwd file (NIS?) -- skip the rest
return
# check whether the entry returned by getpwuid()
# for each uid is among those from getpwall() for this uid
for e in entries:
if not e[0] or e[0] == '+':
continue # skip NIS entries etc.
self.assert_(pwd.getpwnam(e.pw_name) in entriesbyname[e.pw_name])
self.assert_(pwd.getpwuid(e.pw_uid) in entriesbyuid[e.pw_uid])
def test_errors(self):
self.assertRaises(TypeError, pwd.getpwuid)
self.assertRaises(TypeError, pwd.getpwnam)
self.assertRaises(TypeError, pwd.getpwall, 42)
# try to get some errors
bynames = {}
byuids = {}
for (n, p, u, g, gecos, d, s) in pwd.getpwall():
bynames[n] = u
byuids[u] = n
allnames = bynames.keys()
namei = 0
fakename = allnames[namei]
while fakename in bynames:
chars = list(fakename)
for i in xrange(len(chars)):
if chars[i] == 'z':
chars[i] = 'A'
break
elif chars[i] == 'Z':
continue
else:
chars[i] = chr(ord(chars[i]) + 1)
break
else:
namei = namei + 1
try:
fakename = allnames[namei]
except IndexError:
# should never happen... if so, just forget it
break
fakename = ''.join(chars)
self.assertRaises(KeyError, pwd.getpwnam, fakename)
# Choose a non-existent uid.
fakeuid = 4127
while fakeuid in byuids:
fakeuid = (fakeuid * 3) % 0x10000
self.assertRaises(KeyError, pwd.getpwuid, fakeuid)
def test_main():
test_support.run_unittest(PwdTest)
if __name__ == "__main__":
test_main()
| apache-2.0 |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/encodings/mac_iceland.py | 593 | 13754 | """ Python Character Mapping Codec mac_iceland generated from 'MAPPINGS/VENDORS/APPLE/ICELAND.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-iceland',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0x81 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc7' # 0x82 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xd1' # 0x84 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe0' # 0x88 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe2' # 0x89 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe3' # 0x8B -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x8C -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe7' # 0x8D -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\xe8' # 0x8F -> LATIN SMALL LETTER E WITH GRAVE
u'\xea' # 0x90 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x91 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\xec' # 0x93 -> LATIN SMALL LETTER I WITH GRAVE
u'\xee' # 0x94 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x95 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xf1' # 0x96 -> LATIN SMALL LETTER N WITH TILDE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf2' # 0x98 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\xf9' # 0x9D -> LATIN SMALL LETTER U WITH GRAVE
u'\xfb' # 0x9E -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xdd' # 0xA0 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\xa2' # 0xA2 -> CENT SIGN
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\xb4' # 0xAB -> ACUTE ACCENT
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\xc6' # 0xAE -> LATIN CAPITAL LETTER AE
u'\xd8' # 0xAF -> LATIN CAPITAL LETTER O WITH STROKE
u'\u221e' # 0xB0 -> INFINITY
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\xa5' # 0xB4 -> YEN SIGN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u220f' # 0xB8 -> N-ARY PRODUCT
u'\u03c0' # 0xB9 -> GREEK SMALL LETTER PI
u'\u222b' # 0xBA -> INTEGRAL
u'\xaa' # 0xBB -> FEMININE ORDINAL INDICATOR
u'\xba' # 0xBC -> MASCULINE ORDINAL INDICATOR
u'\u03a9' # 0xBD -> GREEK CAPITAL LETTER OMEGA
u'\xe6' # 0xBE -> LATIN SMALL LETTER AE
u'\xf8' # 0xBF -> LATIN SMALL LETTER O WITH STROKE
u'\xbf' # 0xC0 -> INVERTED QUESTION MARK
u'\xa1' # 0xC1 -> INVERTED EXCLAMATION MARK
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0192' # 0xC4 -> LATIN SMALL LETTER F WITH HOOK
u'\u2248' # 0xC5 -> ALMOST EQUAL TO
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\xc0' # 0xCB -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc3' # 0xCC -> LATIN CAPITAL LETTER A WITH TILDE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0152' # 0xCE -> LATIN CAPITAL LIGATURE OE
u'\u0153' # 0xCF -> LATIN SMALL LIGATURE OE
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\xff' # 0xD8 -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\u0178' # 0xD9 -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u2044' # 0xDA -> FRACTION SLASH
u'\u20ac' # 0xDB -> EURO SIGN
u'\xd0' # 0xDC -> LATIN CAPITAL LETTER ETH
u'\xf0' # 0xDD -> LATIN SMALL LETTER ETH
u'\xde' # 0xDE -> LATIN CAPITAL LETTER THORN
u'\xfe' # 0xDF -> LATIN SMALL LETTER THORN
u'\xfd' # 0xE0 -> LATIN SMALL LETTER Y WITH ACUTE
u'\xb7' # 0xE1 -> MIDDLE DOT
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2030' # 0xE4 -> PER MILLE SIGN
u'\xc2' # 0xE5 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xca' # 0xE6 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xcb' # 0xE8 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0xE9 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xEB -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xEC -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0xED -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\uf8ff' # 0xF0 -> Apple logo
u'\xd2' # 0xF1 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xF3 -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xd9' # 0xF4 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\u0131' # 0xF5 -> LATIN SMALL LETTER DOTLESS I
u'\u02c6' # 0xF6 -> MODIFIER LETTER CIRCUMFLEX ACCENT
u'\u02dc' # 0xF7 -> SMALL TILDE
u'\xaf' # 0xF8 -> MACRON
u'\u02d8' # 0xF9 -> BREVE
u'\u02d9' # 0xFA -> DOT ABOVE
u'\u02da' # 0xFB -> RING ABOVE
u'\xb8' # 0xFC -> CEDILLA
u'\u02dd' # 0xFD -> DOUBLE ACUTE ACCENT
u'\u02db' # 0xFE -> OGONEK
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-2.0 |
alertby/mbed | workspace_tools/host_tests/rtc_auto.py | 122 | 2052 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import re
from time import time, strftime, gmtime
class RTCTest():
PATTERN_RTC_VALUE = "\[(\d+)\] \[(\d+-\d+-\d+ \d+:\d+:\d+ [AaPpMm]{2})\]"
re_detect_rtc_value = re.compile(PATTERN_RTC_VALUE)
def test(self, selftest):
test_result = True
start = time()
sec_prev = 0
for i in range(0, 5):
# Timeout changed from default: we need to wait longer for some boards to start-up
c = selftest.mbed.serial_readline(timeout=10)
if c is None:
return selftest.RESULT_IO_SERIAL
selftest.notify(c.strip())
delta = time() - start
m = self.re_detect_rtc_value.search(c)
if m and len(m.groups()):
sec = int(m.groups()[0])
time_str = m.groups()[1]
correct_time_str = strftime("%Y-%m-%d %H:%M:%S %p", gmtime(float(sec)))
single_result = time_str == correct_time_str and sec > 0 and sec > sec_prev
test_result = test_result and single_result
result_msg = "OK" if single_result else "FAIL"
selftest.notify("HOST: [%s] [%s] received time %+d sec after %.2f sec... %s"% (sec, time_str, sec - sec_prev, delta, result_msg))
sec_prev = sec
else:
test_result = False
break
start = time()
return selftest.RESULT_SUCCESS if test_result else selftest.RESULT_FAILURE
| apache-2.0 |
m8ttyB/socorro | webapp-django/crashstats/supersearch/tests/test_utils.py | 3 | 1142 | import datetime
from nose.tools import eq_
from django.utils.timezone import utc
from crashstats.crashstats.tests.test_views import BaseTestViews
from crashstats.topcrashers.views import get_date_boundaries
class TestDateBoundaries(BaseTestViews):
def test_get_date_boundaries(self):
# Simple test.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2010, 3, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 10).replace(tzinfo=utc))
# Test with messy dates.
start, end = get_date_boundaries({
'date': [
'>2010-03-01T12:12:12',
'>2009-01-01T12:12:12',
'<2010-03-11T00:00:00',
'<=2010-03-10T00:00:00',
]
})
eq_(
start,
datetime.datetime(2009, 1, 1, 12, 12, 12).replace(tzinfo=utc)
)
eq_(end, datetime.datetime(2010, 3, 11).replace(tzinfo=utc))
| mpl-2.0 |
ojengwa/grr | client/client_actions/standard.py | 2 | 20847 | #!/usr/bin/env python
"""Standard actions that happen on the client."""
import cStringIO as StringIO
import ctypes
import gzip
import hashlib
import os
import platform
import socket
import sys
import time
import zlib
import psutil
import logging
from grr.client import actions
from grr.client import client_utils_common
from grr.client import vfs
from grr.client.client_actions import tempfiles
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import rdfvalue
from grr.lib import utils
from grr.lib.rdfvalues import crypto
# We do not send larger buffers than this:
MAX_BUFFER_SIZE = 640 * 1024
class ReadBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to a server callback."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
fd.Seek(args.offset)
offset = fd.Tell()
data = fd.Read(args.length)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
# Now return the data to the server
self.SendReply(offset=offset, data=data,
length=len(data), pathspec=fd.pathspec)
HASH_CACHE = utils.FastStore(100)
class TransferBuffer(actions.ActionPlugin):
"""Reads a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length,
progress_callback=self.Progress)
result = rdfvalue.DataBlob(
data=zlib.compress(data),
compression=rdfvalue.DataBlob.CompressionType.ZCOMPRESSION)
digest = hashlib.sha256(data).digest()
# Ensure that the buffer is counted against this response. Check network
# send limit.
self.ChargeBytesToSession(len(data))
# Now return the data to the server into the special TransferStore well
# known flow.
self.grr_worker.SendReply(
result, session_id=rdfvalue.SessionID(flow_name="TransferStore"))
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class HashBuffer(actions.ActionPlugin):
"""Hash a buffer from a file and returns it to the server efficiently."""
in_rdfvalue = rdfvalue.BufferReference
out_rdfvalue = rdfvalue.BufferReference
def Run(self, args):
"""Reads a buffer on the client and sends it to the server."""
# Make sure we limit the size of our output
if args.length > MAX_BUFFER_SIZE:
raise RuntimeError("Can not read buffers this large.")
data = vfs.ReadVFS(args.pathspec, args.offset, args.length)
digest = hashlib.sha256(data).digest()
# Now report the hash of this blob to our flow as well as the offset and
# length.
self.SendReply(offset=args.offset, length=len(data),
data=digest)
class CopyPathToFile(actions.ActionPlugin):
"""Copy contents of a pathspec to a file on disk."""
in_rdfvalue = rdfvalue.CopyPathToFileRequest
out_rdfvalue = rdfvalue.CopyPathToFileRequest
BLOCK_SIZE = 10 * 1024 * 1024
def _Copy(self, dest_fd):
"""Copy from VFS to file until no more data or self.length is reached.
Args:
dest_fd: file object to write to
Returns:
self.written: bytes written
"""
while self.written < self.length:
to_read = min(self.length - self.written, self.BLOCK_SIZE)
data = self.src_fd.read(to_read)
if not data:
break
dest_fd.write(data)
self.written += len(data)
# Send heartbeats for long files.
self.Progress()
return self.written
def Run(self, args):
"""Read from a VFS file and write to a GRRTempFile on disk.
If file writing doesn't complete files won't be cleaned up.
Args:
args: see CopyPathToFile in jobs.proto
"""
self.src_fd = vfs.VFSOpen(args.src_path, progress_callback=self.Progress)
self.src_fd.Seek(args.offset)
offset = self.src_fd.Tell()
self.length = args.length or (1024 ** 4) # 1 TB
self.written = 0
suffix = ".gz" if args.gzip_output else ""
self.dest_fd = tempfiles.CreateGRRTempFile(directory=args.dest_dir,
lifetime=args.lifetime,
suffix=suffix)
self.dest_file = self.dest_fd.name
with self.dest_fd:
if args.gzip_output:
gzip_fd = gzip.GzipFile(self.dest_file, "wb", 9, self.dest_fd)
# Gzip filehandle needs its own close method called
with gzip_fd:
self._Copy(gzip_fd)
else:
self._Copy(self.dest_fd)
pathspec_out = rdfvalue.PathSpec(
path=self.dest_file, pathtype=rdfvalue.PathSpec.PathType.OS)
self.SendReply(offset=offset, length=self.written, src_path=args.src_path,
dest_dir=args.dest_dir, dest_path=pathspec_out,
gzip_output=args.gzip_output)
class ListDirectory(ReadBuffer):
"""Lists all the files in a directory."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Lists a directory."""
try:
directory = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(directory.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
for response in files:
self.SendReply(response)
class IteratedListDirectory(actions.IteratedAction):
"""Lists a directory as an iterator."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self, request, client_state):
"""Restores its way through the directory using an Iterator."""
try:
fd = vfs.VFSOpen(request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
files = list(fd.ListFiles())
files.sort(key=lambda x: x.pathspec.path)
index = client_state.get("index", 0)
length = request.iterator.number
for response in files[index:index + length]:
self.SendReply(response)
# Update the state
client_state["index"] = index + length
class SuspendableListDirectory(actions.SuspendableAction):
"""Lists a directory as a suspendable client action."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Iterate(self):
try:
fd = vfs.VFSOpen(self.request.pathspec, progress_callback=self.Progress)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
length = self.request.iterator.number
for group in utils.Grouper(fd.ListFiles(), length):
for response in group:
self.SendReply(response)
self.Suspend()
class StatFile(ListDirectory):
"""Sends a StatResponse for a single file."""
in_rdfvalue = rdfvalue.ListDirRequest
out_rdfvalue = rdfvalue.StatEntry
def Run(self, args):
"""Sends a StatResponse for a single file."""
try:
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
res = fd.Stat()
self.SendReply(res)
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
return
class ExecuteCommand(actions.ActionPlugin):
"""Executes one of the predefined commands."""
in_rdfvalue = rdfvalue.ExecuteRequest
out_rdfvalue = rdfvalue.ExecuteResponse
def Run(self, command):
"""Run."""
cmd = command.cmd
args = command.args
time_limit = command.time_limit
res = client_utils_common.Execute(cmd, args, time_limit)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteResponse(
request=command,
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecuteBinaryCommand(actions.ActionPlugin):
"""Executes a command from a passed in binary.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by the CONFIG[PrivateKeys.executable_signing_private_key],
which should be stored offline and well protected.
This method can be utilized as part of an autoupdate mechanism if necessary.
NOTE: If the binary is too large to fit inside a single request, the request
will have the more_data flag enabled, indicating more data is coming.
"""
in_rdfvalue = rdfvalue.ExecuteBinaryRequest
out_rdfvalue = rdfvalue.ExecuteBinaryResponse
suffix = ""
def WriteBlobToFile(self, request, suffix=""):
"""Writes the blob to a file and returns its path."""
lifetime = 0
# Only set the lifetime thread on the last chunk written.
if not request.more_data:
lifetime = request.time_limit
# Keep the file for at least 5 seconds after execution.
if lifetime > 0:
lifetime += 5
# First chunk truncates the file, later chunks append.
if request.offset == 0:
mode = "w+b"
else:
mode = "r+b"
temp_file = tempfiles.CreateGRRTempFile(filename=request.write_path,
suffix=suffix, mode=mode)
with temp_file:
path = temp_file.name
temp_file.seek(0, 2)
if temp_file.tell() != request.offset:
raise IOError("Chunks out of order Error.")
# Write the new chunk.
temp_file.write(request.executable.data)
return path
def CleanUp(self, path):
"""Removes the temp file."""
try:
if os.path.exists(path):
os.remove(path)
except (OSError, IOError), e:
logging.info("Failed to remove temporary file %s. Err: %s", path, e)
def Run(self, args):
"""Run."""
# Verify the executable blob.
args.executable.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
path = self.WriteBlobToFile(args, self.suffix)
# Only actually run the file on the last chunk.
if not args.more_data:
self.ProcessFile(path, args)
self.CleanUp(path)
def ProcessFile(self, path, args):
res = client_utils_common.Execute(path, args.args, args.time_limit,
bypass_whitelist=True)
(stdout, stderr, status, time_used) = res
# Limit output to 10MB so our response doesn't get too big.
stdout = stdout[:10 * 1024 * 1024]
stderr = stderr[:10 * 1024 * 1024]
result = rdfvalue.ExecuteBinaryResponse(
stdout=stdout,
stderr=stderr,
exit_status=status,
# We have to return microseconds.
time_used=int(1e6 * time_used))
self.SendReply(result)
class ExecutePython(actions.ActionPlugin):
"""Executes python code with exec.
Obviously this is a dangerous function, it provides for arbitrary code exec by
the server running as root/SYSTEM.
This is protected by CONFIG[PrivateKeys.executable_signing_private_key], which
should be stored offline and well protected.
"""
in_rdfvalue = rdfvalue.ExecutePythonRequest
out_rdfvalue = rdfvalue.ExecutePythonResponse
def Run(self, args):
"""Run."""
time_start = time.time()
class StdOutHook(object):
def __init__(self, buf):
self.buf = buf
def write(self, text):
self.buf.write(text)
args.python_code.Verify(config_lib.CONFIG[
"Client.executable_signing_public_key"])
# The execed code can assign to this variable if it wants to return data.
logging.debug("exec for python code %s", args.python_code.data[0:100])
context = globals().copy()
context["py_args"] = args.py_args.ToDict()
context["magic_return_str"] = ""
# Export the Progress function to allow python hacks to call it.
context["Progress"] = self.Progress
stdout = StringIO.StringIO()
with utils.Stubber(sys, "stdout", StdOutHook(stdout)):
exec(args.python_code.data, context) # pylint: disable=exec-used
stdout_output = stdout.getvalue()
magic_str_output = context.get("magic_return_str")
if stdout_output and magic_str_output:
output = "Stdout: %s\nMagic Str:%s\n" % (stdout_output, magic_str_output)
else:
output = stdout_output or magic_str_output
time_used = time.time() - time_start
# We have to return microseconds.
result = rdfvalue.ExecutePythonResponse(
time_used=int(1e6 * time_used),
return_val=utils.SmartStr(output))
self.SendReply(result)
class Segfault(actions.ActionPlugin):
"""This action is just for debugging. It induces a segfault."""
in_rdfvalue = None
out_rdfvalue = None
def Run(self, unused_args):
"""Does the segfaulting."""
if flags.FLAGS.debug:
logging.warning("Segfault action requested :(")
print ctypes.cast(1, ctypes.POINTER(ctypes.c_void_p)).contents
else:
logging.warning("Segfault requested but not running in debug mode.")
class ListProcesses(actions.ActionPlugin):
"""This action lists all the processes running on a machine."""
in_rdfvalue = None
out_rdfvalue = rdfvalue.Process
def Run(self, unused_arg):
# psutil will cause an active loop on Windows 2000
if platform.system() == "Windows" and platform.version().startswith("5.0"):
raise RuntimeError("ListProcesses not supported on Windows 2000")
for proc in psutil.process_iter():
response = rdfvalue.Process()
process_fields = ["pid", "ppid", "name", "exe", "username", "terminal"]
for field in process_fields:
try:
value = getattr(proc, field)
if value is None:
continue
if callable(value):
value = value()
if not isinstance(value, (int, long)):
value = utils.SmartUnicode(value)
setattr(response, field, value)
except (psutil.NoSuchProcess, psutil.AccessDenied, AttributeError):
pass
try:
for arg in proc.cmdline():
response.cmdline.append(utils.SmartUnicode(arg))
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.nice = proc.nice()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on Windows.
if hasattr(proc, "uids"):
(response.real_uid, response.effective_uid,
response.saved_uid) = proc.uids()
(response.real_gid, response.effective_gid,
response.saved_gid) = proc.gids()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.ctime = long(proc.create_time() * 1e6)
response.status = str(proc.status())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
# Not available on OSX.
if hasattr(proc, "cwd"):
response.cwd = utils.SmartUnicode(proc.cwd())
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.num_threads = proc.num_threads()
except (psutil.NoSuchProcess, psutil.AccessDenied, RuntimeError):
pass
try:
(response.user_cpu_time,
response.system_cpu_time) = proc.cpu_times()
# This is very time consuming so we do not collect cpu_percent here.
# response.cpu_percent = proc.get_cpu_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
try:
response.RSS_size, response.VMS_size = proc.memory_info()
response.memory_percent = proc.memory_percent()
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
# Due to a bug in psutil, this function is disabled for now
# (https://github.com/giampaolo/psutil/issues/340)
# try:
# for f in proc.open_files():
# response.open_files.append(utils.SmartUnicode(f.path))
# except (psutil.NoSuchProcess, psutil.AccessDenied):
# pass
try:
for c in proc.connections():
conn = response.connections.Append(family=c.family,
type=c.type,
pid=proc.pid)
try:
conn.state = c.status
except ValueError:
logging.info("Encountered unknown connection status (%s).",
c.status)
try:
conn.local_address.ip, conn.local_address.port = c.laddr
# Could be in state LISTEN.
if c.raddr:
conn.remote_address.ip, conn.remote_address.port = c.raddr
except AttributeError:
conn.local_address.ip, conn.local_address.port = c.local_address
# Could be in state LISTEN.
if c.remote_address:
(conn.remote_address.ip,
conn.remote_address.port) = c.remote_address
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
self.SendReply(response)
# Reading information here is slow so we heartbeat between processes.
self.Progress()
class SendFile(actions.ActionPlugin):
"""This action encrypts and sends a file to a remote listener."""
in_rdfvalue = rdfvalue.SendFileRequest
out_rdfvalue = rdfvalue.StatEntry
BLOCK_SIZE = 1024 * 1024 * 10 # 10 MB
def Send(self, sock, msg):
totalsent = 0
n = len(msg)
while totalsent < n:
sent = sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent += sent
def Run(self, args):
"""Run."""
# Open the file.
fd = vfs.VFSOpen(args.pathspec, progress_callback=self.Progress)
if args.address_family == rdfvalue.NetworkAddress.Family.INET:
family = socket.AF_INET
elif args.address_family == rdfvalue.NetworkAddress.Family.INET6:
family = socket.AF_INET6
else:
raise RuntimeError("Socket address family not supported.")
s = socket.socket(family, socket.SOCK_STREAM)
try:
s.connect((args.host, args.port))
except socket.error as e:
raise RuntimeError(str(e))
cipher = crypto.AES128CBCCipher(args.key, args.iv,
crypto.Cipher.OP_ENCRYPT)
while True:
data = fd.read(self.BLOCK_SIZE)
if not data:
break
self.Send(s, cipher.Update(data))
# Send heartbeats for long files.
self.Progress()
self.Send(s, cipher.Final())
s.close()
self.SendReply(fd.Stat())
class StatFS(actions.ActionPlugin):
"""Call os.statvfs for a given list of paths. OS X and Linux only.
Note that a statvfs call for a network filesystem (e.g. NFS) that is
unavailable, e.g. due to no network, will result in the call blocking.
"""
in_rdfvalue = rdfvalue.StatFSRequest
out_rdfvalue = rdfvalue.Volume
def Run(self, args):
if platform.system() == "Windows":
raise RuntimeError("os.statvfs not available on Windows")
for path in args.path_list:
try:
fd = vfs.VFSOpen(rdfvalue.PathSpec(path=path, pathtype=args.pathtype),
progress_callback=self.Progress)
st = fd.StatFS()
mount_point = fd.GetMountPoint()
except (IOError, OSError), e:
self.SetStatus(rdfvalue.GrrStatus.ReturnedStatus.IOERROR, e)
continue
unix = rdfvalue.UnixVolume(mount_point=mount_point)
# On linux pre 2.6 kernels don't have frsize, so we fall back to bsize.
# The actual_available_allocation_units attribute is set to blocks
# available to the unprivileged user, root may have some additional
# reserved space.
result = rdfvalue.Volume(bytes_per_sector=(st.f_frsize or st.f_bsize),
sectors_per_allocation_unit=1,
total_allocation_units=st.f_blocks,
actual_available_allocation_units=st.f_bavail,
unix=unix)
self.SendReply(result)
| apache-2.0 |
taiyuanfang/gyp | test/win/gyptest-cl-buffer-security-check.py | 344 | 1612 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Make sure buffer security check setting is extracted properly.
"""
import TestGyp
import sys
if sys.platform == 'win32':
test = TestGyp.TestGyp(formats=['msvs', 'ninja'])
CHDIR = 'compiler-flags'
test.run_gyp('buffer-security-check.gyp', chdir=CHDIR)
test.build('buffer-security-check.gyp', chdir=CHDIR)
def GetDisassemblyOfMain(exe):
# The standard library uses buffer security checks independent of our
# buffer security settings, so we extract just our code (i.e. main()) to
# check against.
full_path = test.built_file_path(exe, chdir=CHDIR)
output = test.run_dumpbin('/disasm', full_path)
result = []
in_main = False
for line in output.splitlines():
if line == '_main:':
in_main = True
elif in_main:
# Disassembly of next function starts.
if line.startswith('_'):
break
result.append(line)
return '\n'.join(result)
# Buffer security checks are on by default, make sure security_cookie
# appears in the disassembly of our code.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_unset.exe'):
test.fail_test()
# Explicitly on.
if 'security_cookie' not in GetDisassemblyOfMain('test_bsc_on.exe'):
test.fail_test()
# Explicitly off, shouldn't be a reference to the security cookie.
if 'security_cookie' in GetDisassemblyOfMain('test_bsc_off.exe'):
test.fail_test()
test.pass_test()
| bsd-3-clause |
40223135/40223135- | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/__init__.py | 693 | 6866 | #
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__version__ = '0.70a1'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Barrier', 'Queue', 'SimpleQueue', 'JoinableQueue', 'Pool',
'Value', 'Array', 'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk ([email protected])'
#
# Imports
#
import os
import sys
from multiprocessing.process import Process, current_process, active_children
from multiprocessing.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocessing.managers import SyncManager
m = SyncManager()
m.start()
return m
#brython fix me
#def Pipe(duplex=True):
# '''
# Returns two connection object connected by a pipe
# '''
# from multiprocessing.connection import Pipe
# return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocessing.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocessing.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocessing.util import log_to_stderr
return log_to_stderr(level)
#brython fix me
#def allow_connection_pickling():
# '''
# Install support for sending connections and sockets between processes
# '''
# # This is undocumented. In previous versions of multiprocessing
# # its only effect was to make socket objects inheritable on Windows.
# import multiprocessing.connection
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocessing.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocessing.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocessing.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocessing.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocessing.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocessing.synchronize import Event
return Event()
def Barrier(parties, action=None, timeout=None):
'''
Returns a barrier object
'''
from multiprocessing.synchronize import Barrier
return Barrier(parties, action, timeout)
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocessing.queues import JoinableQueue
return JoinableQueue(maxsize)
def SimpleQueue():
'''
Returns a queue object
'''
from multiprocessing.queues import SimpleQueue
return SimpleQueue()
def Pool(processes=None, initializer=None, initargs=(), maxtasksperchild=None):
'''
Returns a process pool object
'''
from multiprocessing.pool import Pool
return Pool(processes, initializer, initargs, maxtasksperchild)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocessing.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocessing.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, lock=True):
'''
Returns a synchronized shared object
'''
from multiprocessing.sharedctypes import Value
return Value(typecode_or_type, *args, lock=lock)
def Array(typecode_or_type, size_or_initializer, *, lock=True):
'''
Returns a synchronized shared array
'''
from multiprocessing.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, lock=lock)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocessing.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| gpl-3.0 |
valentin-krasontovitsch/ansible | lib/ansible/modules/network/enos/enos_config.py | 42 | 11179 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (C) 2017 Red Hat Inc.
# Copyright (C) 2017 Lenovo.
#
# GNU General Public License v3.0+
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# Module to configure Lenovo Switches.
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: enos_config
version_added: "2.5"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage Lenovo ENOS configuration sections
description:
- Lenovo ENOS configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ENOS configuration sections in
a deterministic way.
extends_documentation_fragment: enos
notes:
- Tested against ENOS 8.4.1
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
aliases: ['commands']
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
src:
description:
- Specifies the source path to the file that contains the configuration
or configuration template to load. The path to the source file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory. This argument is
mutually exclusive with I(lines), I(parents).
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
default: line
choices: ['line', 'block', 'config']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
backup:
description:
- This argument will cause the module to create a full backup of
the current C(running-config) from the remote device before any
changes are made. If the C(backup_options) value is not given,
the backup file is written to the C(backup) folder in the playbook
root directory. If the directory does not exist, it is created.
type: bool
default: 'no'
comment:
description:
- Allows a commit description to be specified to be included
when the configuration is committed. If the configuration is
not changed or committed, this argument is ignored.
default: 'configured by enos_config'
admin:
description:
- Enters into administration configuration mode for making config
changes to the device.
type: bool
default: 'no'
backup_options:
description:
- This is a dict object containing configurable options related to backup file path.
The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
to I(no) this option will be silently ignored.
suboptions:
filename:
description:
- The filename to be used to store the backup configuration. If the the filename
is not given it will be generated based on the hostname, current time and date
in format defined by <hostname>_config.<current-date>@<current-time>
dir_path:
description:
- This option provides the path ending with directory name in which the backup
configuration file will be stored. If the directory does not exist it will be first
created and the filename is either the value of C(filename) or default filename
as described in C(filename) options description. If the path value is not given
in that case a I(backup) directory will be created in the current working directory
and backup configuration will be copied in C(filename) within I(backup) directory.
type: path
type: dict
version_added: "2.8"
"""
EXAMPLES = """
- name: configure top level configuration
enos_config:
"lines: hostname {{ inventory_hostname }}"
- name: configure interface settings
enos_config:
lines:
- enable
- ip ospf enable
parents: interface ip 13
- name: load a config from disk and replace the current config
enos_config:
src: config.cfg
backup: yes
- name: configurable backup path
enos_config:
src: config.cfg
backup: yes
backup_options:
filename: backup.cfg
dir_path: /home/user
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: Only when lines is specified.
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: str
sample: /playbooks/ansible/backup/enos01.2016-07-16@22:28:34
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.enos.enos import load_config, get_config
from ansible.module_utils.network.enos.enos import enos_argument_spec
from ansible.module_utils.network.enos.enos import check_args
from ansible.module_utils.network.common.config import NetworkConfig, dumps
DEFAULT_COMMIT_COMMENT = 'configured by enos_config'
def get_running_config(module):
contents = module.params['config']
if not contents:
contents = get_config(module)
return NetworkConfig(indent=1, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=1)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def run(module, result):
match = module.params['match']
replace = module.params['replace']
replace_config = replace == 'config'
path = module.params['parents']
comment = module.params['comment']
admin = module.params['admin']
check_mode = module.check_mode
candidate = get_candidate(module)
if match != 'none' and replace != 'config':
contents = get_running_config(module)
configobj = NetworkConfig(contents=contents, indent=1)
commands = candidate.difference(configobj, path=path, match=match,
replace=replace)
else:
commands = candidate.items
if commands:
commands = dumps(commands, 'commands').split('\n')
if any((module.params['lines'], module.params['src'])):
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['commands'] = commands
diff = load_config(module, commands)
if diff:
result['diff'] = dict(prepared=diff)
result['changed'] = True
def main():
"""main entry point for module execution
"""
backup_spec = dict(
filename=dict(),
dir_path=dict(type='path')
)
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block', 'config']),
config=dict(),
backup=dict(type='bool', default=False),
backup_options=dict(type='dict', options=backup_spec),
comment=dict(default=DEFAULT_COMMIT_COMMENT),
admin=dict(type='bool', default=False)
)
argument_spec.update(enos_argument_spec)
mutually_exclusive = [('lines', 'src'),
('parents', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines']),
('replace', 'config', ['src'])]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = get_config(module)
run(module, result)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lizardsystem/lizard-measure | lizard_measure/migrations/0010_auto__del_score__del_measuringrod__del_field_measurestatusmoment_is_pl.py | 1 | 23606 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Score'
db.delete_table('lizard_measure_score')
# Deleting model 'MeasuringRod'
db.delete_table('lizard_measure_measuringrod')
# Deleting field 'MeasureStatusMoment.is_planning'
db.delete_column('lizard_measure_measurestatusmoment', 'is_planning')
# Deleting field 'MeasureStatusMoment.date'
db.delete_column('lizard_measure_measurestatusmoment', 'date')
# Adding field 'MeasureStatusMoment.planning_date'
db.add_column('lizard_measure_measurestatusmoment', 'planning_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Adding field 'MeasureStatusMoment.realisation_date'
db.add_column('lizard_measure_measurestatusmoment', 'realisation_date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.total_costs'
db.delete_column('lizard_measure_measure', 'total_costs')
# Adding field 'Measure.valid'
db.add_column('lizard_measure_measure', 'valid', self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True), keep_default=False)
# Adding field 'Measure.geom'
db.add_column('lizard_measure_measure', 'geom', self.gf('django.contrib.gis.db.models.fields.GeometryField')(null=True, blank=True), keep_default=False)
def backwards(self, orm):
# Adding model 'Score'
db.create_table('lizard_measure_score', (
('gep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('area_ident', self.gf('django.db.models.fields.CharField')(max_length=32, null=True, blank=True)),
('ascending', self.gf('django.db.models.fields.NullBooleanField')(null=True, blank=True)),
('mep', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_measure.MeasuringRod'])),
('limit_bad_insufficient', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('area', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['lizard_area.Area'], null=True, blank=True)),
('target_2027', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('limit_insufficient_moderate', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
('target_2015', self.gf('django.db.models.fields.FloatField')(null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['Score'])
# Adding model 'MeasuringRod'
db.create_table('lizard_measure_measuringrod', (
('group', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sign', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('sub_measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('measuring_rod', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
('id', self.gf('django.db.models.fields.IntegerField')(primary_key=True)),
('unit', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)),
))
db.send_create_signal('lizard_measure', ['MeasuringRod'])
# Adding field 'MeasureStatusMoment.is_planning'
db.add_column('lizard_measure_measurestatusmoment', 'is_planning', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'MeasureStatusMoment.date'
db.add_column('lizard_measure_measurestatusmoment', 'date', self.gf('django.db.models.fields.DateField')(null=True, blank=True), keep_default=False)
# Deleting field 'MeasureStatusMoment.planning_date'
db.delete_column('lizard_measure_measurestatusmoment', 'planning_date')
# Deleting field 'MeasureStatusMoment.realisation_date'
db.delete_column('lizard_measure_measurestatusmoment', 'realisation_date')
# Adding field 'Measure.total_costs'
db.add_column('lizard_measure_measure', 'total_costs', self.gf('django.db.models.fields.IntegerField')(null=True, blank=True), keep_default=False)
# Deleting field 'Measure.valid'
db.delete_column('lizard_measure_measure', 'valid')
# Deleting field 'Measure.geom'
db.delete_column('lizard_measure_measure', 'geom')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lizard_area.area': {
'Meta': {'ordering': "('name',)", 'object_name': 'Area', '_ormbases': ['lizard_area.Communique']},
'area_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'communique_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_area.Communique']", 'unique': 'True', 'primary_key': 'True'}),
'data_administrator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.DataAdministrator']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'})
},
'lizard_area.communique': {
'Meta': {'object_name': 'Communique', '_ormbases': ['lizard_geo.GeoObject']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'geoobject_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['lizard_geo.GeoObject']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'lizard_area.dataadministrator': {
'Meta': {'object_name': 'DataAdministrator'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'lizard_geo.geoobject': {
'Meta': {'object_name': 'GeoObject'},
'geo_object_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObjectGroup']"}),
'geometry': ('django.contrib.gis.db.models.fields.GeometryField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'lizard_geo.geoobjectgroup': {
'Meta': {'object_name': 'GeoObjectGroup'},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'db_index': 'True'}),
'source_log': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.fundingorganization': {
'Meta': {'object_name': 'FundingOrganization'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Organization']"}),
'percentage': ('django.db.models.fields.FloatField', [], {})
},
'lizard_measure.krwstatus': {
'Meta': {'object_name': 'KRWStatus'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.krwwatertype': {
'Meta': {'object_name': 'KRWWatertype'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measure': {
'Meta': {'ordering': "('id',)", 'object_name': 'Measure'},
'aggregation_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'areas': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'area_measure_set'", 'blank': 'True', 'to': "orm['lizard_area.Area']"}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureCategory']", 'symmetrical': 'False', 'blank': 'True'}),
'datetime_in_source': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'executive': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'executive_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'exploitation_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'funding_organizations': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Organization']", 'through': "orm['lizard_measure.FundingOrganization']", 'symmetrical': 'False'}),
'geom': ('django.contrib.gis.db.models.fields.GeometryField', [], {'null': 'True', 'blank': 'True'}),
'geometry': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_geo.GeoObject']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'import_raw': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'import_source': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'initiator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'initiator_measure_set'", 'null': 'True', 'to': "orm['lizard_measure.Organization']"}),
'investment_costs': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'is_KRW_measure': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'is_indicator': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'measure_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureType']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']", 'null': 'True', 'blank': 'True'}),
'period': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasurePeriod']", 'null': 'True', 'blank': 'True'}),
'read_only': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'responsible_department': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'status_moments': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.MeasureStatus']", 'through': "orm['lizard_measure.MeasureStatusMoment']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Unit']"}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {}),
'waterbodies': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.WaterBody']", 'symmetrical': 'False', 'blank': 'True'})
},
'lizard_measure.measurecategory': {
'Meta': {'object_name': 'MeasureCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measureperiod': {
'Meta': {'ordering': "('start_date', 'end_date')", 'object_name': 'MeasurePeriod'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.measurestatus': {
'Meta': {'ordering': "('-value',)", 'object_name': 'MeasureStatus'},
'color': ('lizard_map.models.ColorField', [], {'max_length': '8'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'value': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
'lizard_measure.measurestatusmoment': {
'Meta': {'ordering': "('measure__id', 'status__value')", 'object_name': 'MeasureStatusMoment'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'exploitation_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'investment_expenditure': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'measure': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.Measure']"}),
'planning_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'realisation_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.MeasureStatus']"})
},
'lizard_measure.measuretype': {
'Meta': {'ordering': "('code',)", 'object_name': 'MeasureType'},
'code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'combined_name': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'harmonisation': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'klass': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'subcategory': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'units': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['lizard_measure.Unit']", 'symmetrical': 'False', 'blank': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.organization': {
'Meta': {'ordering': "('description',)", 'unique_together': "(('source', 'code'),)", 'object_name': 'Organization'},
'code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'source': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.steeringparameter': {
'Meta': {'object_name': 'SteeringParameter'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']"}),
'fews_parameter': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'target_maximum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'target_minimum': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'})
},
'lizard_measure.unit': {
'Meta': {'object_name': 'Unit'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'conversion_factor': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'valid': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'})
},
'lizard_measure.waterbody': {
'Meta': {'object_name': 'WaterBody'},
'area': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_area.Area']", 'null': 'True', 'blank': 'True'}),
'area_ident': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'krw_status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWStatus']", 'null': 'True', 'blank': 'True'}),
'krw_watertype': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lizard_measure.KRWWatertype']", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['lizard_measure']
| gpl-3.0 |
konrad/kufpybio | kufpybiotools/generate_igr_gff.py | 1 | 1881 | #!/usr/bin/env python
__description__ = ""
__author__ = "Konrad Foerstner <[email protected]>"
__copyright__ = "2013 by Konrad Foerstner <[email protected]>"
__license__ = "ISC license"
__email__ = "[email protected]"
__version__ = ""
import argparse
import csv
import sys
sys.path.append(".")
from kufpybio.gff3 import Gff3Parser, Gff3Entry
from kufpybio.gene import Gene
from kufpybio.igrfinder import IGRFinder
parser = argparse.ArgumentParser(description=__description__)
parser.add_argument("gff_file", type=argparse.FileType("r"))
parser.add_argument("output_file", type=argparse.FileType("w"))
parser.add_argument("--margin", type=int, default=0)
parser.add_argument("--plus_only", default=False, action="store_true")
args = parser.parse_args()
# Build gene list
gene_list = []
gff_parser = Gff3Parser()
region_entry = None
for entry in gff_parser.entries(args.gff_file):
if entry.feature == "region":
region_entry = entry
continue
gene_list.append(Gene(
entry.seq_id, "", "", entry.start, entry.end,
entry.strand))
# Find IGRs and generate GFF file
igr_finder = IGRFinder()
args.output_file.write("##gff-version 3\n")
strands = ["+", "-"]
if args.plus_only is True:
strands = ["+"]
for start, end in igr_finder.find_igrs(gene_list, region_entry.end):
start = start + args.margin
end = end - args.margin
if end <= start:
continue
for strand in strands:
gff3_entry = Gff3Entry({
"seq_id" : region_entry.seq_id,
"source" : "IGR",
"feature" : "IGR",
"start" : start,
"end" : end,
"score" : ".",
"strand" : strand,
"phase" : ".",
"attributes" : "ID=IGR_%s_%s_to_%s" % (
region_entry.seq_id, start, end)})
args.output_file.write(str(gff3_entry) + "\n")
| isc |
Panaetius/woipv | src/models/train_model.py | 1 | 20953 | import tensorflow as tf
from tensorflow.python.client import timeline
from tensorflow.python import debug as tf_debug
import os
import time
from datetime import datetime
import numpy as np
from model import WoipvModel, NetworkType
from mscoco_input import MSCOCOInputProducer
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/training/woipv_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
class Config(object):
path = "%s/../../data/processed/MSCOCO/" % os.path.dirname(
os.path.realpath(__file__))
chkpt_path = "%s/../../models/transfer_chkpt/" % os.path.dirname(
os.path.realpath(__file__))
num_examples_per_epoch = 72000
num_epochs_per_decay = 3
is_training = True
num_classes = 90
initial_learning_rate = 1e-4
learning_rate_decay_factor = 0.25
width = 600
height = 600
min_box_size = 10
rcnn_cls_loss_weight = 95.0 / (256)
rcnn_reg_loss_weight = 0.005
rpn_cls_loss_weight = 2.0
rpn_reg_loss_weight = 25.0
dropout_prob = 0.5
weight_decay = 0.0001
net = NetworkType.PRETRAINED
pretrained_checkpoint_path = "%s/../../models/pretrained/"% os.path.dirname(
os.path.realpath(__file__))
pretrained_checkpoint_meta = "ResNet-L50.meta"
restore_from_chkpt = False
resnet_34_variables_to_restore = ['first_layer/weights:0', 'first_layer/Variable:0', 'first_layer/Variable_1:0', 'first_layer/Variable_2:0', 'first_layer/Variable_3:0', 'reslayer_64_0/sub1/weights:0', 'reslayer_64_0/sub1/Variable:0', 'reslayer_64_0/sub1/Variable_1:0', 'reslayer_64_0/sub1/Variable_2:0', 'reslayer_64_0/sub1/Variable_3:0', 'reslayer_64_0/sub2/weights:0', 'reslayer_64_0/sub2/Variable:0', 'reslayer_64_0/sub2/Variable_1:0', 'reslayer_64_0/sub2/Variable_2:0', 'reslayer_64_0/sub2/Variable_3:0', 'reslayer_64_1/sub1/weights:0', 'reslayer_64_1/sub1/Variable:0', 'reslayer_64_1/sub1/Variable_1:0', 'reslayer_64_1/sub1/Variable_2:0', 'reslayer_64_1/sub1/Variable_3:0', 'reslayer_64_1/sub2/weights:0', 'reslayer_64_1/sub2/Variable:0', 'reslayer_64_1/sub2/Variable_1:0', 'reslayer_64_1/sub2/Variable_2:0', 'reslayer_64_1/sub2/Variable_3:0', 'reslayer_64_2/sub1/weights:0', 'reslayer_64_2/sub1/Variable:0', 'reslayer_64_2/sub1/Variable_1:0', 'reslayer_64_2/sub1/Variable_2:0', 'reslayer_64_2/sub1/Variable_3:0', 'reslayer_64_2/sub2/weights:0', 'reslayer_64_2/sub2/Variable:0', 'reslayer_64_2/sub2/Variable_1:0', 'reslayer_64_2/sub2/Variable_2:0', 'reslayer_64_2/sub2/Variable_3:0', 'reslayer_downsample_128/sub1/weights:0', 'reslayer_downsample_128/sub1/Variable:0', 'reslayer_downsample_128/sub1/Variable_1:0', 'reslayer_downsample_128/sub1/Variable_2:0', 'reslayer_downsample_128/sub1/Variable_3:0', 'reslayer_downsample_128/sub2/weights:0', 'reslayer_downsample_128/sub2/Variable:0', 'reslayer_downsample_128/sub2/Variable_1:0', 'reslayer_downsample_128/sub2/Variable_2:0', 'reslayer_downsample_128/sub2/Variable_3:0', 'reslayer_128_0/sub1/weights:0', 'reslayer_128_0/sub1/Variable:0', 'reslayer_128_0/sub1/Variable_1:0', 'reslayer_128_0/sub1/Variable_2:0', 'reslayer_128_0/sub1/Variable_3:0', 'reslayer_128_0/sub2/weights:0', 'reslayer_128_0/sub2/Variable:0', 'reslayer_128_0/sub2/Variable_1:0', 'reslayer_128_0/sub2/Variable_2:0', 'reslayer_128_0/sub2/Variable_3:0', 'reslayer_128_1/sub1/weights:0', 'reslayer_128_1/sub1/Variable:0', 'reslayer_128_1/sub1/Variable_1:0', 'reslayer_128_1/sub1/Variable_2:0', 'reslayer_128_1/sub1/Variable_3:0', 'reslayer_128_1/sub2/weights:0', 'reslayer_128_1/sub2/Variable:0', 'reslayer_128_1/sub2/Variable_1:0', 'reslayer_128_1/sub2/Variable_2:0', 'reslayer_128_1/sub2/Variable_3:0', 'reslayer_128_2/sub1/weights:0', 'reslayer_128_2/sub1/Variable:0', 'reslayer_128_2/sub1/Variable_1:0', 'reslayer_128_2/sub1/Variable_2:0', 'reslayer_128_2/sub1/Variable_3:0', 'reslayer_128_2/sub2/weights:0', 'reslayer_128_2/sub2/Variable:0', 'reslayer_128_2/sub2/Variable_1:0', 'reslayer_128_2/sub2/Variable_2:0', 'reslayer_128_2/sub2/Variable_3:0', 'reslayer_downsample_256/sub1/weights:0', 'reslayer_downsample_256/sub1/Variable:0', 'reslayer_downsample_256/sub1/Variable_1:0', 'reslayer_downsample_256/sub1/Variable_2:0', 'reslayer_downsample_256/sub1/Variable_3:0', 'reslayer_downsample_256/sub2/weights:0', 'reslayer_downsample_256/sub2/Variable:0', 'reslayer_downsample_256/sub2/Variable_1:0', 'reslayer_downsample_256/sub2/Variable_2:0', 'reslayer_downsample_256/sub2/Variable_3:0', 'reslayer_256_0/sub1/weights:0', 'reslayer_256_0/sub1/Variable:0', 'reslayer_256_0/sub1/Variable_1:0', 'reslayer_256_0/sub1/Variable_2:0', 'reslayer_256_0/sub1/Variable_3:0', 'reslayer_256_0/sub2/weights:0', 'reslayer_256_0/sub2/Variable:0', 'reslayer_256_0/sub2/Variable_1:0', 'reslayer_256_0/sub2/Variable_2:0', 'reslayer_256_0/sub2/Variable_3:0', 'reslayer_256_1/sub1/weights:0', 'reslayer_256_1/sub1/Variable:0', 'reslayer_256_1/sub1/Variable_1:0', 'reslayer_256_1/sub1/Variable_2:0', 'reslayer_256_1/sub1/Variable_3:0', 'reslayer_256_1/sub2/weights:0', 'reslayer_256_1/sub2/Variable:0', 'reslayer_256_1/sub2/Variable_1:0', 'reslayer_256_1/sub2/Variable_2:0', 'reslayer_256_1/sub2/Variable_3:0', 'reslayer_256_2/sub1/weights:0', 'reslayer_256_2/sub1/Variable:0', 'reslayer_256_2/sub1/Variable_1:0', 'reslayer_256_2/sub1/Variable_2:0', 'reslayer_256_2/sub1/Variable_3:0', 'reslayer_256_2/sub2/weights:0', 'reslayer_256_2/sub2/Variable:0', 'reslayer_256_2/sub2/Variable_1:0', 'reslayer_256_2/sub2/Variable_2:0', 'reslayer_256_2/sub2/Variable_3:0', 'reslayer_256_3/sub1/weights:0', 'reslayer_256_3/sub1/Variable:0', 'reslayer_256_3/sub1/Variable_1:0', 'reslayer_256_3/sub1/Variable_2:0', 'reslayer_256_3/sub1/Variable_3:0', 'reslayer_256_3/sub2/weights:0', 'reslayer_256_3/sub2/Variable:0', 'reslayer_256_3/sub2/Variable_1:0', 'reslayer_256_3/sub2/Variable_2:0', 'reslayer_256_3/sub2/Variable_3:0', 'reslayer_256_4/sub1/weights:0', 'reslayer_256_4/sub1/Variable:0', 'reslayer_256_4/sub1/Variable_1:0', 'reslayer_256_4/sub1/Variable_2:0', 'reslayer_256_4/sub1/Variable_3:0', 'reslayer_256_4/sub2/weights:0', 'reslayer_256_4/sub2/Variable:0', 'reslayer_256_4/sub2/Variable_1:0', 'reslayer_256_4/sub2/Variable_2:0', 'reslayer_256_4/sub2/Variable_3:0', 'reslayer_downsample_512/sub1/weights:0', 'reslayer_downsample_512/sub1/Variable:0', 'reslayer_downsample_512/sub1/Variable_1:0', 'reslayer_downsample_512/sub1/Variable_2:0', 'reslayer_downsample_512/sub1/Variable_3:0', 'reslayer_downsample_512/sub2/weights:0', 'reslayer_downsample_512/sub2/Variable:0', 'reslayer_downsample_512/sub2/Variable_1:0', 'reslayer_downsample_512/sub2/Variable_2:0', 'reslayer_downsample_512/sub2/Variable_3:0', 'reslayer_512_0/sub1/weights:0', 'reslayer_512_0/sub1/Variable:0', 'reslayer_512_0/sub1/Variable_1:0', 'reslayer_512_0/sub1/Variable_2:0', 'reslayer_512_0/sub1/Variable_3:0', 'reslayer_512_0/sub2/weights:0', 'reslayer_512_0/sub2/Variable:0', 'reslayer_512_0/sub2/Variable_1:0', 'reslayer_512_0/sub2/Variable_2:0', 'reslayer_512_0/sub2/Variable_3:0', 'reslayer_512_1/sub1/weights:0', 'reslayer_512_1/sub1/Variable:0', 'reslayer_512_1/sub1/Variable_1:0', 'reslayer_512_1/sub1/Variable_2:0', 'reslayer_512_1/sub1/Variable_3:0', 'reslayer_512_1/sub2/weights:0', 'reslayer_512_1/sub2/Variable:0', 'reslayer_512_1/sub2/Variable_1:0', 'reslayer_512_1/sub2/Variable_2:0', 'reslayer_512_1/sub2/Variable_3:0']
resnet_50_variables_to_restore = ['first_layer/weights:0', 'first_layer/Variable:0', 'first_layer/Variable_1:0', 'first_layer/Variable_2:0', 'first_layer/Variable_3:0', 'reslayer_64_0/sub1/weights:0', 'reslayer_64_0/sub1/Variable:0', 'reslayer_64_0/sub1/Variable_1:0', 'reslayer_64_0/sub1/Variable_2:0', 'reslayer_64_0/sub1/Variable_3:0', 'reslayer_64_0/sub2/weights:0', 'reslayer_64_0/sub2/Variable:0', 'reslayer_64_0/sub2/Variable_1:0', 'reslayer_64_0/sub2/Variable_2:0', 'reslayer_64_0/sub2/Variable_3:0', 'reslayer_64_1/sub1/weights:0', 'reslayer_64_1/sub1/Variable:0', 'reslayer_64_1/sub1/Variable_1:0', 'reslayer_64_1/sub1/Variable_2:0', 'reslayer_64_1/sub1/Variable_3:0', 'reslayer_64_1/sub2/weights:0', 'reslayer_64_1/sub2/Variable:0', 'reslayer_64_1/sub2/Variable_1:0', 'reslayer_64_1/sub2/Variable_2:0', 'reslayer_64_1/sub2/Variable_3:0', 'reslayer_64_2/sub1/weights:0', 'reslayer_64_2/sub1/Variable:0', 'reslayer_64_2/sub1/Variable_1:0', 'reslayer_64_2/sub1/Variable_2:0', 'reslayer_64_2/sub1/Variable_3:0', 'reslayer_64_2/sub2/weights:0', 'reslayer_64_2/sub2/Variable:0', 'reslayer_64_2/sub2/Variable_1:0', 'reslayer_64_2/sub2/Variable_2:0', 'reslayer_64_2/sub2/Variable_3:0', 'reslayer_downsample_128/sub1/weights:0', 'reslayer_downsample_128/sub1/Variable:0', 'reslayer_downsample_128/sub1/Variable_1:0', 'reslayer_downsample_128/sub1/Variable_2:0', 'reslayer_downsample_128/sub1/Variable_3:0', 'reslayer_downsample_128/sub2/weights:0', 'reslayer_downsample_128/sub2/Variable:0', 'reslayer_downsample_128/sub2/Variable_1:0', 'reslayer_downsample_128/sub2/Variable_2:0', 'reslayer_downsample_128/sub2/Variable_3:0', 'reslayer_128_0/sub1/weights:0', 'reslayer_128_0/sub1/Variable:0', 'reslayer_128_0/sub1/Variable_1:0', 'reslayer_128_0/sub1/Variable_2:0', 'reslayer_128_0/sub1/Variable_3:0', 'reslayer_128_0/sub2/weights:0', 'reslayer_128_0/sub2/Variable:0', 'reslayer_128_0/sub2/Variable_1:0', 'reslayer_128_0/sub2/Variable_2:0', 'reslayer_128_0/sub2/Variable_3:0', 'reslayer_128_1/sub1/weights:0', 'reslayer_128_1/sub1/Variable:0', 'reslayer_128_1/sub1/Variable_1:0', 'reslayer_128_1/sub1/Variable_2:0', 'reslayer_128_1/sub1/Variable_3:0', 'reslayer_128_1/sub2/weights:0', 'reslayer_128_1/sub2/Variable:0', 'reslayer_128_1/sub2/Variable_1:0', 'reslayer_128_1/sub2/Variable_2:0', 'reslayer_128_1/sub2/Variable_3:0', 'reslayer_128_2/sub1/weights:0', 'reslayer_128_2/sub1/Variable:0', 'reslayer_128_2/sub1/Variable_1:0', 'reslayer_128_2/sub1/Variable_2:0', 'reslayer_128_2/sub1/Variable_3:0', 'reslayer_128_2/sub2/weights:0', 'reslayer_128_2/sub2/Variable:0', 'reslayer_128_2/sub2/Variable_1:0', 'reslayer_128_2/sub2/Variable_2:0', 'reslayer_128_2/sub2/Variable_3:0', 'reslayer_downsample_256/sub1/weights:0', 'reslayer_downsample_256/sub1/Variable:0', 'reslayer_downsample_256/sub1/Variable_1:0', 'reslayer_downsample_256/sub1/Variable_2:0', 'reslayer_downsample_256/sub1/Variable_3:0', 'reslayer_downsample_256/sub2/weights:0', 'reslayer_downsample_256/sub2/Variable:0', 'reslayer_downsample_256/sub2/Variable_1:0', 'reslayer_downsample_256/sub2/Variable_2:0', 'reslayer_downsample_256/sub2/Variable_3:0', 'reslayer_256_0/sub1/weights:0', 'reslayer_256_0/sub1/Variable:0', 'reslayer_256_0/sub1/Variable_1:0', 'reslayer_256_0/sub1/Variable_2:0', 'reslayer_256_0/sub1/Variable_3:0', 'reslayer_256_0/sub2/weights:0', 'reslayer_256_0/sub2/Variable:0', 'reslayer_256_0/sub2/Variable_1:0', 'reslayer_256_0/sub2/Variable_2:0', 'reslayer_256_0/sub2/Variable_3:0', 'reslayer_256_1/sub1/weights:0', 'reslayer_256_1/sub1/Variable:0', 'reslayer_256_1/sub1/Variable_1:0', 'reslayer_256_1/sub1/Variable_2:0', 'reslayer_256_1/sub1/Variable_3:0', 'reslayer_256_1/sub2/weights:0', 'reslayer_256_1/sub2/Variable:0', 'reslayer_256_1/sub2/Variable_1:0', 'reslayer_256_1/sub2/Variable_2:0', 'reslayer_256_1/sub2/Variable_3:0', 'reslayer_256_2/sub1/weights:0', 'reslayer_256_2/sub1/Variable:0', 'reslayer_256_2/sub1/Variable_1:0', 'reslayer_256_2/sub1/Variable_2:0', 'reslayer_256_2/sub1/Variable_3:0', 'reslayer_256_2/sub2/weights:0', 'reslayer_256_2/sub2/Variable:0', 'reslayer_256_2/sub2/Variable_1:0', 'reslayer_256_2/sub2/Variable_2:0', 'reslayer_256_2/sub2/Variable_3:0', 'reslayer_256_3/sub1/weights:0', 'reslayer_256_3/sub1/Variable:0', 'reslayer_256_3/sub1/Variable_1:0', 'reslayer_256_3/sub1/Variable_2:0', 'reslayer_256_3/sub1/Variable_3:0', 'reslayer_256_3/sub2/weights:0', 'reslayer_256_3/sub2/Variable:0', 'reslayer_256_3/sub2/Variable_1:0', 'reslayer_256_3/sub2/Variable_2:0', 'reslayer_256_3/sub2/Variable_3:0', 'reslayer_256_4/sub1/weights:0', 'reslayer_256_4/sub1/Variable:0', 'reslayer_256_4/sub1/Variable_1:0', 'reslayer_256_4/sub1/Variable_2:0', 'reslayer_256_4/sub1/Variable_3:0', 'reslayer_256_4/sub2/weights:0', 'reslayer_256_4/sub2/Variable:0', 'reslayer_256_4/sub2/Variable_1:0', 'reslayer_256_4/sub2/Variable_2:0', 'reslayer_256_4/sub2/Variable_3:0', 'reslayer_downsample_512/sub1/weights:0', 'reslayer_downsample_512/sub1/Variable:0', 'reslayer_downsample_512/sub1/Variable_1:0', 'reslayer_downsample_512/sub1/Variable_2:0', 'reslayer_downsample_512/sub1/Variable_3:0', 'reslayer_downsample_512/sub2/weights:0', 'reslayer_downsample_512/sub2/Variable:0', 'reslayer_downsample_512/sub2/Variable_1:0', 'reslayer_downsample_512/sub2/Variable_2:0', 'reslayer_downsample_512/sub2/Variable_3:0', 'reslayer_512_0/sub1/weights:0', 'reslayer_512_0/sub1/Variable:0', 'reslayer_512_0/sub1/Variable_1:0', 'reslayer_512_0/sub1/Variable_2:0', 'reslayer_512_0/sub1/Variable_3:0', 'reslayer_512_0/sub2/weights:0', 'reslayer_512_0/sub2/Variable:0', 'reslayer_512_0/sub2/Variable_1:0', 'reslayer_512_0/sub2/Variable_2:0', 'reslayer_512_0/sub2/Variable_3:0', 'reslayer_512_1/sub1/weights:0', 'reslayer_512_1/sub1/Variable:0', 'reslayer_512_1/sub1/Variable_1:0', 'reslayer_512_1/sub1/Variable_2:0', 'reslayer_512_1/sub1/Variable_3:0', 'reslayer_512_1/sub2/weights:0', 'reslayer_512_1/sub2/Variable:0', 'reslayer_512_1/sub2/Variable_1:0', 'reslayer_512_1/sub2/Variable_2:0', 'reslayer_512_1/sub2/Variable_3:0']
graph = tf.Graph()
def train():
"""Train ip5wke for a number of steps."""
print("Building graph %.3f" % time.time())
cfg = Config()
with cfg.graph.as_default():
# Get images and labels for ip5wke.
input_producer = MSCOCOInputProducer(cfg)
images, categories, bboxes = input_producer.inputs()
model = WoipvModel(cfg)
config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
conv_output = None
if cfg.net == NetworkType.PRETRAINED:
print("restoring pretrained model")
new_saver = tf.train.import_meta_graph(cfg.pretrained_checkpoint_path + cfg.pretrained_checkpoint_meta, input_map={'images': images})
new_saver.restore(sess, tf.train.latest_checkpoint(cfg.pretrained_checkpoint_path))
conv_output = cfg.graph.get_tensor_by_name('scale4/block6/Relu:0')
print(conv_output)
global_step = tf.Variable(0, trainable=False, name="global_step")
# Build a Graph that computes the logits predictions from the
# inference model.
class_scores, region_scores, rpn_class_scores, rpn_region_scores, \
proposed_boxes = \
model.inference(images, conv_output)
# Calculate loss.
loss, rcn_accuracy, rpn_accuracy = model.loss(class_scores,
region_scores,
rpn_class_scores,
rpn_region_scores, categories, bboxes, proposed_boxes, images)
# Build a Graph that trains the model with one batch of examples and
# updates the model parameters.
train_op = model.train(loss, global_step)
# Create a saver.
saver = tf.train.Saver(tf.global_variables(),
write_version=tf.train.SaverDef.V2)
# Build the summary operation based on the TF collection of Summaries.
summary_op = tf.summary.merge_all()
# Build an initialization operation to run below.
init = tf.global_variables_initializer()
# Start running operations on the Graph.
print("Running init %.3f" % time.time())
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
sess.run(init)
if cfg.restore_from_chkpt:
# restore variables (for transfer learning)
print("Restoring checkpoint for transfer learning %.3f" %
time.time())
ckpt = tf.train.get_checkpoint_state(cfg.chkpt_path)
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
if self.net == NetworkType.RESNET34:
variables_to_restore = self.resnet_34_variables_to_restore
elif self.net == NetworkType.RESNET50:
variables_to_restore = self.resnet_50_variables_to_restore
variables_to_restore = [v for v in tf.global_variables() if v.name
in cfg.variables_to_restore]
chkpt_saver = tf.train.Saver(variables_to_restore,
write_version=tf.train.SaverDef.V2)
chkpt_saver.restore(sess, ckpt.model_checkpoint_path)
print("checkpoint restored %.3f" % time.time())
# Start the queue runners.
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary_writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
# run_metadata = tf.RunMetadata()
print("Started training %.3f" % time.time())
for step in range(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
# options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE),
# run_metadata=run_metadata)
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
#tf.contrib.tfprof.model_analyzer.print_model_analysis(
# tf.get_default_graph(),
# run_meta=run_metadata,
# tfprof_options={
# 'max_depth': 10000,
# 'min_bytes': 1, # Only >=1
# 'min_micros': 1, # Only >=1
# 'min_params': 0,
# 'min_float_ops': 0,
# 'device_regexes': ['.*'],
# 'order_by': 'name',
# 'account_type_regexes': ['.*'],
# 'start_name_regexes': ['.*'],
# 'trim_name_regexes': [],
# 'show_name_regexes': ['.*'],
# 'hide_name_regexes': [],
# 'account_displayed_op_only': True,
# 'select': ['micros'],
# 'viz': False,
# 'dump_to_file': ''
# })
#return
if step % 50 == 0:
examples_per_sec = 1.0 / duration
sec_per_batch = float(duration)
# correct_prediction = tf.equal(tf.argmax(logits, 1),
# tf.cast(labels, tf.int64))
# accuracy = tf.reduce_mean(tf.cast(correct_prediction,
# tf.float32))
# train_acc = sess.run(accuracy)
# tf.summary.scalar('accuracy', accuracy)
# trace = timeline.Timeline(step_stats=run_metadata.step_stats)
# trace_file = open('timeline.ctf.json', 'w')
# trace_file.write(trace.generate_chrome_trace_format(show_memory=True))
# trace_file.close()
rcn_acc, rpn_acc = sess.run([rcn_accuracy, rpn_accuracy])
format_str = ('%s: step %d, loss = %.2f, rcn_accuracy = %.3f '
' rpn_acc = %.3f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
rcn_acc, rpn_acc,
examples_per_sec, sec_per_batch))
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
# When done, ask the threads to stop.
coord.request_stop()
# Wait for threads to finish.
coord.join(threads)
# noinspection PyUnusedLocal
def main(argv=None):
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| mit |
ininex/geofire-python | resource/lib/python2.7/site-packages/gcloud/bigquery/client.py | 3 | 10779 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client for interacting with the Google BigQuery API."""
from gcloud.client import JSONClient
from gcloud.bigquery.connection import Connection
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.job import CopyJob
from gcloud.bigquery.job import ExtractTableToStorageJob
from gcloud.bigquery.job import LoadTableFromStorageJob
from gcloud.bigquery.job import QueryJob
from gcloud.bigquery.query import QueryResults
class Client(JSONClient):
"""Client to bundle configuration needed for API requests.
:type project: str
:param project: the project which the client acts on behalf of. Will be
passed when creating a dataset / job. If not passed,
falls back to the default inferred from the environment.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def list_datasets(self, include_all=False, max_results=None,
page_token=None):
"""List datasets for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/datasets/list
:type include_all: boolean
:param include_all: True if results include hidden datasets.
:type max_results: int
:param max_results: maximum number of datasets to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of datasets. If
not passed, the API will return the first page of
datasets.
:rtype: tuple, (list, str)
:returns: list of :class:`gcloud.bigquery.dataset.Dataset`, plus a
"next page token" string: if the token is not None,
indicates that more datasets can be retrieved with another
call (pass that value as ``page_token``).
"""
params = {}
if include_all:
params['all'] = True
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
path = '/projects/%s/datasets' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
datasets = [Dataset.from_api_repr(resource, self)
for resource in resp.get('datasets', ())]
return datasets, resp.get('nextPageToken')
def dataset(self, dataset_name):
"""Construct a dataset bound to this client.
:type dataset_name: str
:param dataset_name: Name of the dataset.
:rtype: :class:`gcloud.bigquery.dataset.Dataset`
:returns: a new ``Dataset`` instance
"""
return Dataset(dataset_name, client=self)
def job_from_resource(self, resource):
"""Detect correct job type from resource and instantiate.
:type resource: dict
:param resource: one job resource from API response
:rtype: One of:
:class:`gcloud.bigquery.job.LoadTableFromStorageJob`,
:class:`gcloud.bigquery.job.CopyJob`,
:class:`gcloud.bigquery.job.ExtractTableToStorageJob`,
:class:`gcloud.bigquery.job.QueryJob`,
:class:`gcloud.bigquery.job.RunSyncQueryJob`
:returns: the job instance, constructed via the resource
"""
config = resource['configuration']
if 'load' in config:
return LoadTableFromStorageJob.from_api_repr(resource, self)
elif 'copy' in config:
return CopyJob.from_api_repr(resource, self)
elif 'extract' in config:
return ExtractTableToStorageJob.from_api_repr(resource, self)
elif 'query' in config:
return QueryJob.from_api_repr(resource, self)
raise ValueError('Cannot parse job resource')
def list_jobs(self, max_results=None, page_token=None, all_users=None,
state_filter=None):
"""List jobs for the project associated with this client.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/list
:type max_results: int
:param max_results: maximum number of jobs to return, If not
passed, defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of jobs. If
not passed, the API will return the first page of
jobs.
:type all_users: boolean
:param all_users: if true, include jobs owned by all users in the
project.
:type state_filter: str
:param state_filter: if passed, include only jobs matching the given
state. One of
* ``"done"``
* ``"pending"``
* ``"running"``
:rtype: tuple, (list, str)
:returns: list of job instances, plus a "next page token" string:
if the token is not ``None``, indicates that more jobs can be
retrieved with another call, passing that value as
``page_token``).
"""
params = {'projection': 'full'}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if all_users is not None:
params['allUsers'] = all_users
if state_filter is not None:
params['stateFilter'] = state_filter
path = '/projects/%s/jobs' % (self.project,)
resp = self.connection.api_request(method='GET', path=path,
query_params=params)
jobs = [self.job_from_resource(resource) for resource in resp['jobs']]
return jobs, resp.get('nextPageToken')
def load_table_from_storage(self, job_name, destination, *source_uris):
"""Construct a job for loading data into a table from CloudStorage.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be loaded.
:type source_uris: sequence of string
:param source_uris: URIs of data files to be loaded; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.LoadTableFromStorageJob`
:returns: a new ``LoadTableFromStorageJob`` instance
"""
return LoadTableFromStorageJob(job_name, destination, source_uris,
client=self)
def copy_table(self, job_name, destination, *sources):
"""Construct a job for copying one or more tables into another table.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
:type job_name: str
:param job_name: Name of the job.
:type destination: :class:`gcloud.bigquery.table.Table`
:param destination: Table into which data is to be copied.
:type sources: sequence of :class:`gcloud.bigquery.table.Table`
:param sources: tables to be copied.
:rtype: :class:`gcloud.bigquery.job.CopyJob`
:returns: a new ``CopyJob`` instance
"""
return CopyJob(job_name, destination, sources, client=self)
def extract_table_to_storage(self, job_name, source, *destination_uris):
"""Construct a job for extracting a table into Cloud Storage files.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.extract
:type job_name: str
:param job_name: Name of the job.
:type source: :class:`gcloud.bigquery.table.Table`
:param source: table to be extracted.
:type destination_uris: sequence of string
:param destination_uris: URIs of CloudStorage file(s) into which
table data is to be extracted; in format
``gs://<bucket_name>/<object_name_or_glob>``.
:rtype: :class:`gcloud.bigquery.job.ExtractTableToStorageJob`
:returns: a new ``ExtractTableToStorageJob`` instance
"""
return ExtractTableToStorageJob(job_name, source, destination_uris,
client=self)
def run_async_query(self, job_name, query):
"""Construct a job for running a SQL query asynchronously.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.query
:type job_name: str
:param job_name: Name of the job.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.job.QueryJob`
:returns: a new ``QueryJob`` instance
"""
return QueryJob(job_name, query, client=self)
def run_sync_query(self, query):
"""Run a SQL query synchronously.
:type query: str
:param query: SQL query to be executed
:rtype: :class:`gcloud.bigquery.query.QueryResults`
:returns: a new ``QueryResults`` instance
"""
return QueryResults(query, client=self)
| mit |
jakobmoss/tsa | utils/makeweights.py | 1 | 2350 | # -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Path to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt(dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
| mit |
erstis-go-botting/sexy-bot | misc.py | 1 | 1888 | import os
#checks if settings.ini should be generated. if not given universe, username and password it will generate a settings.ini with the default account
#This settings_generator will only work for universe 82 if the flag argument is given als True(to make sure that universe 82 is intended)
def settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if (os.path.isfile('settings/settings.ini')):
print("settings file found, stopping now.")
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
if not (os.path.isdir('settings')):
os.makedir('settings')
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
def force_settings_generator(universe = 82, username = 'defaultName', password = 'defaultPassword', flag=False):
path = os.path.normcase('settings/settings.ini')
if not (os.path.isfile('settings/settings.ini')):
settings_generator(universe, username, password, flag)
return
if (universe == 82 and not(flag)) or (username == 'defaultName') or (password == 'defaultPassword'):
print("Not all fields specified, fallback on default configuration")
universe = 82
username = 'defaultName'
password = 'defaultPassword'
with open(path,'w') as foo:
foo.write('[credentials]\nuniverse = '+ str(universe) +'\npassword = '+password+'\nusername = '+username)
print("Settings.ini generated")
#settings_generator()
| mit |
cainmatt/django | tests/template_tests/syntax_tests/test_autoescape.py | 337 | 5575 | from django.template import TemplateSyntaxError
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import SafeClass, UnsafeClass, setup
class AutoescapeTagTests(SimpleTestCase):
@setup({'autoescape-tag01': '{% autoescape off %}hello{% endautoescape %}'})
def test_autoescape_tag01(self):
output = self.engine.render_to_string('autoescape-tag01')
self.assertEqual(output, 'hello')
@setup({'autoescape-tag02': '{% autoescape off %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag02(self):
output = self.engine.render_to_string('autoescape-tag02', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
@setup({'autoescape-tag03': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag03(self):
output = self.engine.render_to_string('autoescape-tag03', {'first': '<b>hello</b>'})
self.assertEqual(output, '<b>hello</b>')
# Autoescape disabling and enabling nest in a predictable way.
@setup({'autoescape-tag04': '{% autoescape off %}'
'{{ first }} {% autoescape on %}{{ first }}{% endautoescape %}{% endautoescape %}'})
def test_autoescape_tag04(self):
output = self.engine.render_to_string('autoescape-tag04', {'first': '<a>'})
self.assertEqual(output, '<a> <a>')
@setup({'autoescape-tag05': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag05(self):
output = self.engine.render_to_string('autoescape-tag05', {'first': '<b>first</b>'})
self.assertEqual(output, '<b>first</b>')
# Strings (ASCII or unicode) already marked as "safe" are not
# auto-escaped
@setup({'autoescape-tag06': '{{ first }}'})
def test_autoescape_tag06(self):
output = self.engine.render_to_string('autoescape-tag06', {'first': mark_safe('<b>first</b>')})
self.assertEqual(output, '<b>first</b>')
@setup({'autoescape-tag07': '{% autoescape on %}{{ first }}{% endautoescape %}'})
def test_autoescape_tag07(self):
output = self.engine.render_to_string('autoescape-tag07', {'first': mark_safe('<b>Apple</b>')})
self.assertEqual(output, '<b>Apple</b>')
@setup({'autoescape-tag08': r'{% autoescape on %}'
r'{{ var|default_if_none:" endquote\" hah" }}{% endautoescape %}'})
def test_autoescape_tag08(self):
"""
Literal string arguments to filters, if used in the result, are safe.
"""
output = self.engine.render_to_string('autoescape-tag08', {"var": None})
self.assertEqual(output, ' endquote" hah')
# Objects which return safe strings as their __str__ method
# won't get double-escaped.
@setup({'autoescape-tag09': r'{{ unsafe }}'})
def test_autoescape_tag09(self):
output = self.engine.render_to_string('autoescape-tag09', {'unsafe': UnsafeClass()})
self.assertEqual(output, 'you & me')
@setup({'autoescape-tag10': r'{{ safe }}'})
def test_autoescape_tag10(self):
output = self.engine.render_to_string('autoescape-tag10', {'safe': SafeClass()})
self.assertEqual(output, 'you > me')
@setup({'autoescape-filtertag01': '{{ first }}{% filter safe %}{{ first }} x<y{% endfilter %}'})
def test_autoescape_filtertag01(self):
"""
The "safe" and "escape" filters cannot work due to internal
implementation details (fortunately, the (no)autoescape block
tags can be used in those cases)
"""
with self.assertRaises(TemplateSyntaxError):
self.engine.render_to_string('autoescape-filtertag01', {'first': '<a>'})
@setup({'autoescape-ifequal01': '{% ifequal var "this & that" %}yes{% endifequal %}'})
def test_autoescape_ifequal01(self):
"""
ifequal compares unescaped vales.
"""
output = self.engine.render_to_string('autoescape-ifequal01', {'var': 'this & that'})
self.assertEqual(output, 'yes')
# Arguments to filters are 'safe' and manipulate their input unescaped.
@setup({'autoescape-filters01': '{{ var|cut:"&" }}'})
def test_autoescape_filters01(self):
output = self.engine.render_to_string('autoescape-filters01', {'var': 'this & that'})
self.assertEqual(output, 'this that')
@setup({'autoescape-filters02': '{{ var|join:" & " }}'})
def test_autoescape_filters02(self):
output = self.engine.render_to_string('autoescape-filters02', {'var': ('Tom', 'Dick', 'Harry')})
self.assertEqual(output, 'Tom & Dick & Harry')
@setup({'autoescape-literals01': '{{ "this & that" }}'})
def test_autoescape_literals01(self):
"""
Literal strings are safe.
"""
output = self.engine.render_to_string('autoescape-literals01')
self.assertEqual(output, 'this & that')
@setup({'autoescape-stringiterations01': '{% for l in var %}{{ l }},{% endfor %}'})
def test_autoescape_stringiterations01(self):
"""
Iterating over strings outputs safe characters.
"""
output = self.engine.render_to_string('autoescape-stringiterations01', {'var': 'K&R'})
self.assertEqual(output, 'K,&,R,')
@setup({'autoescape-lookup01': '{{ var.key }}'})
def test_autoescape_lookup01(self):
"""
Escape requirement survives lookup.
"""
output = self.engine.render_to_string('autoescape-lookup01', {'var': {'key': 'this & that'}})
self.assertEqual(output, 'this & that')
| bsd-3-clause |
ActiveState/code | recipes/Python/275366_Email_address_leech/recipe-275366.py | 1 | 1624 | import re
def test():
text = \
''' You can contact us at [email protected]
or at yourname AT server DOT site DOT com.
Also at o u r n a m e @ s e r v e r dot s i t e dot c o m
and t.h.e.i.r.n.a.m.e at server dot s/i/t/e DOT COM.
'''
for email in emailLeech(text): print email
DOMAINS = ["com","edu","net","org","gov","us"] #.. and so on
FLAGS = re.IGNORECASE | re.VERBOSE
AT = r'(?: @ | \b A \s* T \b)'
ADDRESSPART = r'\b (?: \w+ | \w (?:(?:\s+|\W) \w)*) \b'
DOMAIN = r'(?:%s)' % '|'.join(["(?:\s*|\W)".join(domain) for domain in DOMAINS])
NONWORD = re.compile(r'\W+')
DOT_REGEX = re.compile(r'(?: \. | \b D \s* O \s* T \b)', FLAGS)
EMAIL_REGEX = re.compile(
(r'(?P<name>%s) \W* %s \W*' % (ADDRESSPART,AT)) +
r'(?P<site>(?: %s \W* %s \W*)+)' % (ADDRESSPART, DOT_REGEX.pattern) +
r'(?P<domain>%s)' % DOMAIN, FLAGS)
def emailLeech(text):
''' An iterator over recognized email addresses within text'''
while (True):
match = EMAIL_REGEX.search(text)
if not match: break
parts = [match.group("name")] + \
DOT_REGEX.split(match.group("site")) + \
[match.group("domain")]
# discard non word chars
parts = [NONWORD.sub('',part) for part in parts]
# discard all empty parts and make lowercase
parts = [part.lower() for part in parts if len(part)>0]
# join the parts
yield "%s@%s.%s" % (parts[0], '.'.join(parts[1:-1]), parts[-1])
text = text[match.end():]
if __name__ == '__main__': test()
| mit |
mosdef-hub/foyer | foyer/tests/test_forcefield_parameters.py | 1 | 10029 | import numpy as np
import pytest
from foyer import Forcefield, forcefields
from foyer.exceptions import MissingForceError, MissingParametersError
from foyer.forcefield import get_available_forcefield_loaders
from foyer.tests.base_test import BaseTest
from foyer.tests.utils import get_fn
@pytest.mark.skipif(
condition="load_GAFF"
not in map(lambda func: func.__name__, get_available_forcefield_loaders()),
reason="GAFF Plugin is not installed",
)
class TestForcefieldParameters(BaseTest):
@pytest.fixture(scope="session")
def gaff(self):
return forcefields.load_GAFF()
def test_gaff_missing_group(self, gaff):
with pytest.raises(ValueError):
gaff.get_parameters("missing", key=[])
def test_gaff_non_string_keys(self, gaff):
with pytest.raises(TypeError):
gaff.get_parameters("atoms", key=1)
def test_gaff_bond_parameters_gaff(self, gaff):
bond_params = gaff.get_parameters("harmonic_bonds", ["br", "ca"])
assert np.isclose(bond_params["length"], 0.19079)
assert np.isclose(bond_params["k"], 219827.36)
def test_gaff_bond_params_reversed(self, gaff):
assert gaff.get_parameters(
"harmonic_bonds", ["ca", "br"]
) == gaff.get_parameters("harmonic_bonds", ["ca", "br"])
def test_gaff_missing_bond_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_bonds", ["str1", "str2"])
def test_gaff_angle_parameters(self, gaff):
angle_params = gaff.get_parameters("harmonic_angles", ["f", "c1", "f"])
assert np.allclose(
[angle_params["theta"], angle_params["k"]],
[3.141592653589793, 487.0176],
)
def test_gaff_angle_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"harmonic_angles", ["f", "c2", "ha"]
).values()
),
list(
gaff.get_parameters(
"harmonic_angles", ["ha", "c2", "f"]
).values()
),
)
def test_gaff_missing_angle_parameters(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("harmonic_angles", ["1", "2", "3"])
def test_gaff_periodic_proper_parameters(self, gaff):
periodic_proper_params = gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
)
assert np.allclose(periodic_proper_params["periodicity"], [2.0, 1.0])
assert np.allclose(
periodic_proper_params["k"], [9.414, 5.4392000000000005]
)
assert np.allclose(
periodic_proper_params["phase"],
[3.141592653589793, 3.141592653589793],
)
def test_gaff_periodic_proper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_propers", ["c3", "c", "sh", "hs"]
).values()
),
list(
gaff.get_parameters(
"periodic_propers", ["hs", "sh", "c", "c3"]
).values()
),
)
def test_gaff_periodic_improper_parameters(self, gaff):
periodic_improper_params = gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
)
assert np.allclose(periodic_improper_params["periodicity"], [2.0])
assert np.allclose(periodic_improper_params["k"], [4.6024])
assert np.allclose(
periodic_improper_params["phase"], [3.141592653589793]
)
def test_gaff_periodic_improper_parameters_reversed(self, gaff):
assert np.allclose(
list(
gaff.get_parameters(
"periodic_impropers", ["c", "", "o", "o"]
).values()
),
list(
gaff.get_parameters(
"periodic_impropers", ["c", "o", "", "o"]
).values()
),
)
def test_gaff_proper_params_missing(self, gaff):
with pytest.raises(MissingParametersError):
gaff.get_parameters("periodic_impropers", ["a", "b", "c", "d"])
def test_gaff_scaling_factors(self, gaff):
assert gaff.lj14scale == 0.5
assert np.isclose(gaff.coulomb14scale, 0.833333333)
def test_opls_get_parameters_atoms(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", "opls_145")
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atoms_list(self, oplsaa):
atom_params = oplsaa.get_parameters("atoms", ["opls_145"])
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_atom_class(self, oplsaa):
atom_params = oplsaa.get_parameters(
"atoms", "CA", keys_are_atom_classes=True
)
assert atom_params["sigma"] == 0.355
assert atom_params["epsilon"] == 0.29288
def test_opls_get_parameters_bonds(self, oplsaa):
bond_params = oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
)
assert bond_params["length"] == 0.146
assert bond_params["k"] == 334720.0
def test_opls_get_parameters_bonds_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_760", "opls_145"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["opls_145", "opls_760"]
).values()
),
)
def test_opls_get_parameters_bonds_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_bonds", ["C_2", "O_2"], True
).values()
),
list(
oplsaa.get_parameters(
"harmonic_bonds", ["O_2", "C_2"], True
).values()
),
)
def test_opls_get_parameters_angle(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.0943950239, 585.76]
)
def test_opls_get_parameters_angle_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_166", "opls_772", "opls_167"]
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles", ["opls_167", "opls_772", "opls_166"]
).values()
),
)
def test_opls_get_parameters_angle_atom_classes(self, oplsaa):
angle_params = oplsaa.get_parameters(
"harmonic_angles", ["CA", "C_2", "CA"], keys_are_atom_classes=True
)
assert np.allclose(
[angle_params["theta"], angle_params["k"]], [2.09439510239, 711.28]
)
def test_opls_get_parameters_angle_atom_classes_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"harmonic_angles",
["CA", "C", "O"],
keys_are_atom_classes=True,
).values()
),
list(
oplsaa.get_parameters(
"harmonic_angles",
["O", "C", "CA"],
keys_are_atom_classes=True,
).values()
),
)
def test_opls_get_parameters_rb_proper(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["opls_215", "opls_215", "opls_235", "opls_269"]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[2.28446, 0.0, -2.28446, 0.0, 0.0, 0.0],
)
def test_get_parameters_rb_proper_reversed(self, oplsaa):
assert np.allclose(
list(
oplsaa.get_parameters(
"rb_propers",
["opls_215", "opls_215", "opls_235", "opls_269"],
).values()
),
list(
oplsaa.get_parameters(
"rb_propers",
["opls_269", "opls_235", "opls_215", "opls_215"],
).values()
),
)
def test_opls_get_parameters_wildcard(self, oplsaa):
proper_params = oplsaa.get_parameters(
"rb_propers", ["", "opls_235", "opls_544", ""]
)
assert np.allclose(
[
proper_params["c0"],
proper_params["c1"],
proper_params["c2"],
proper_params["c3"],
proper_params["c4"],
proper_params["c5"],
],
[30.334, 0.0, -30.334, 0.0, 0.0, 0.0],
)
def test_opls_missing_force(self, oplsaa):
with pytest.raises(MissingForceError):
oplsaa.get_parameters("periodic_propers", key=["a", "b", "c", "d"])
def test_opls_scaling_factors(self, oplsaa):
assert oplsaa.lj14scale == 0.5
assert oplsaa.coulomb14scale == 0.5
def test_missing_scaling_factors(self):
ff = Forcefield(forcefield_files=(get_fn("validate_customtypes.xml")))
with pytest.raises(AttributeError):
assert ff.lj14scale
with pytest.raises(AttributeError):
assert ff.coulomb14scale
| mit |
kamilmowinski/nao_gesture | scripts/nao.py | 2 | 1999 | #!/usr/bin/env python
import rospy
import math
from naoqi import ALProxy
from my_kinnect.msg import NaoCoords
class NaoMonkey:
PART = {
'LShoulder': ['LShoulderPitch', 'LShoulderRoll'],
'RShoulder': ['RShoulderPitch', 'RShoulderRoll'],
'LElbow': ['LElbowYaw', 'LElbowRoll'],
'RElbow': ['RElbowYaw', 'RElbowRoll'],
'Head': ['HeadYaw', 'HeadPitch'],
}
LIMITS = {
'Head': [[-2.0, 2.0], [-0.67, 0.51]],
'LShoulder': [[-2.0, 2.0], [-0.31, 1.32]],
'RShoulder': [[-2.0, 2.0], [-1.32, 0.31]],
'LElbow': [[-2.0, 2.0], [-1.54, -0.03]],
'RElbow': [[-2.0, 2.0], [0.03, 1.54]],
}
def __init__(self):
rospy.init_node('nao_mykinect', anonymous=True)
self.listener = rospy.Subscriber('nao', NaoCoords, self.move)
ip = rospy.get_param('~ip', '10.104.16.141')
port = int(rospy.get_param('~port', '9559'))
self.al = ALProxy("ALAutonomousLife", ip, port)
self.postureProxy = ALProxy("ALRobotPosture", ip, port)
self.motionProxy = ALProxy("ALMotion", ip, port)
self.al.setState("disabled")
for part in ["Head", "LArm", "RArm"]:
self.motionProxy.setStiffnesses(part, 1.0)
rospy.loginfo(self.motionProxy.getSummary())
def move(self, coords):
part = coords.Part.data
angles1 = coords.Angles1
angles2 = coords.Angles2
angles = [float(angles1.data), float(angles2.data)]
speed = 1.0
if part not in NaoMonkey.PART:
error_msg = 'Wat? I Do not have ' + str(part)
rospy.loginfo(error_msg)
return
if len(NaoMonkey.PART[part]) != len(angles):
error_msg = 'Wat? What shall i do with rest joint?'
rospy.loginfo(error_msg)
return
angles = map(lambda x: float(x)*math.pi/180.0, angles)
for limit, angle in zip(NaoMonkey.LIMITS[part], angles):
if angle < limit[0] or angle > limit[1]:
error_msg = 'Wat? Limits man!'
rospy.loginfo(error_msg)
self.motionProxy.setAngles(NaoMonkey.PART[part], angles, speed);
if __name__ == '__main__':
try:
NaoMonkey()
rospy.spin()
except rospy.ROSInterruptException:
pass
| gpl-2.0 |
piosz/test-infra | gubernator/pb_glance_test.py | 36 | 1815 | # Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pb_glance
def tostr(data):
if isinstance(data, list):
return ''.join(c if isinstance(c, str) else chr(c) for c in data)
return data
class PBGlanceTest(unittest.TestCase):
def expect(self, data, expected, types=None):
result = pb_glance.parse_protobuf(tostr(data), types)
self.assertEqual(result, expected)
def test_basic(self):
self.expect(
[0, 1, # varint
0, 0x96, 1, # multi-byte varint
(1<<3)|1, 'abcdefgh', # 64-bit
(2<<3)|2, 5, 'value', # length-delimited (string)
(3<<3)|5, 'abcd', # 32-bit
],
{
0: [1, 150],
1: ['abcdefgh'],
2: ['value'],
3: ['abcd'],
})
def test_embedded(self):
self.expect([2, 2, 3<<3, 1], {0: [{3: [1]}]}, {0: {}})
def test_field_names(self):
self.expect([2, 2, 'hi'], {'greeting': ['hi']}, {0: 'greeting'})
def test_embedded_names(self):
self.expect(
[2, 4, (3<<3)|2, 2, 'hi'],
{'msg': [{'greeting': ['hi']}]},
{0: {'name': 'msg', 3: 'greeting'}})
| apache-2.0 |
vismartltd/edx-platform | common/test/acceptance/pages/lms/open_response.py | 165 | 4579 | """
Open-ended response in the courseware.
"""
from bok_choy.page_object import PageObject
from bok_choy.promise import EmptyPromise
from .rubric import RubricPage
class OpenResponsePage(PageObject):
"""
Open-ended response in the courseware.
"""
url = None
def is_browser_on_page(self):
return self.q(css='div.xmodule_CombinedOpenEndedModule').present
@property
def assessment_type(self):
"""
Return the type of assessment currently active.
Options are "self", "ai", or "peer"
"""
labels = self.q(css='section#combined-open-ended-status>div.statusitem-current').text
if len(labels) < 1:
self.warning("Could not find assessment type label")
# Provide some tolerance to UI changes
label_compare = labels[0].lower().strip()
if 'self' in label_compare:
return 'self'
elif 'ai' in label_compare:
return 'ai'
elif 'peer' in label_compare:
return 'peer'
else:
raise ValueError("Unexpected assessment type: '{0}'".format(label_compare))
@property
def prompt(self):
"""
Return an HTML string representing the essay prompt.
"""
prompt_css = "section.open-ended-child>div.prompt"
prompts = self.q(css=prompt_css).map(lambda el: el.get_attribute('innerHTML').strip()).results
if len(prompts) == 0:
self.warning("Could not find essay prompt on page.")
return ""
elif len(prompts) > 1:
self.warning("Multiple essay prompts found on page; using the first one.")
return prompts[0]
@property
def rubric(self):
"""
Return a `RubricPage` for a self-assessment problem.
If no rubric is available, raises a `BrokenPromise` exception.
"""
rubric = RubricPage(self.browser)
rubric.wait_for_page()
return rubric
@property
def written_feedback(self):
"""
Return the written feedback from the grader (if any).
If no feedback available, returns None.
"""
feedback = self.q(css='div.written-feedback').text
if len(feedback) > 0:
return feedback[0]
else:
return None
@property
def alert_message(self):
"""
Alert message displayed to the user.
"""
alerts = self.q(css="div.open-ended-alert").text
if len(alerts) < 1:
return ""
else:
return alerts[0]
@property
def grader_status(self):
"""
Status message from the grader.
If not present, return an empty string.
"""
status_list = self.q(css='div.grader-status').text
if len(status_list) < 1:
self.warning("No grader status found")
return ""
elif len(status_list) > 1:
self.warning("Multiple grader statuses found; returning the first one")
return status_list[0]
def set_response(self, response_str):
"""
Input a response to the prompt.
"""
input_css = "textarea.short-form-response"
self.q(css=input_css).fill(response_str)
def save_response(self):
"""
Save the response for later submission.
"""
self.q(css='input.save-button').first.click()
EmptyPromise(
lambda: 'save' in self.alert_message.lower(),
"Status message saved"
).fulfill()
def submit_response(self):
"""
Submit a response for grading.
"""
self.q(css='input.submit-button').first.click()
# modal dialog confirmation
self.q(css='button.ok-button').first.click()
# Ensure that the submission completes
self._wait_for_submitted(self.assessment_type)
def _wait_for_submitted(self, assessment_type):
"""
Wait for the submission to complete.
`assessment_type` is either 'self', 'ai', or 'peer'
"""
if assessment_type == 'self':
RubricPage(self.browser).wait_for_page()
elif assessment_type == 'ai' or assessment_type == "peer":
EmptyPromise(
lambda: self.grader_status != 'Unanswered',
"Problem status is no longer 'unanswered'"
).fulfill()
else:
self.warning("Unrecognized assessment type '{0}'".format(assessment_type))
EmptyPromise(lambda: True, "Unrecognized assessment type").fulfill()
| agpl-3.0 |
WeichenXu123/spark | examples/src/main/python/ml/min_hash_lsh_example.py | 52 | 3222 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating MinHashLSH.
Run with:
bin/spark-submit examples/src/main/python/ml/min_hash_lsh_example.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.feature import MinHashLSH
from pyspark.ml.linalg import Vectors
from pyspark.sql.functions import col
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("MinHashLSHExample") \
.getOrCreate()
# $example on$
dataA = [(0, Vectors.sparse(6, [0, 1, 2], [1.0, 1.0, 1.0]),),
(1, Vectors.sparse(6, [2, 3, 4], [1.0, 1.0, 1.0]),),
(2, Vectors.sparse(6, [0, 2, 4], [1.0, 1.0, 1.0]),)]
dfA = spark.createDataFrame(dataA, ["id", "features"])
dataB = [(3, Vectors.sparse(6, [1, 3, 5], [1.0, 1.0, 1.0]),),
(4, Vectors.sparse(6, [2, 3, 5], [1.0, 1.0, 1.0]),),
(5, Vectors.sparse(6, [1, 2, 4], [1.0, 1.0, 1.0]),)]
dfB = spark.createDataFrame(dataB, ["id", "features"])
key = Vectors.sparse(6, [1, 3], [1.0, 1.0])
mh = MinHashLSH(inputCol="features", outputCol="hashes", numHashTables=5)
model = mh.fit(dfA)
# Feature Transformation
print("The hashed dataset where hashed values are stored in the column 'hashes':")
model.transform(dfA).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate
# similarity join.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxSimilarityJoin(transformedA, transformedB, 0.6)`
print("Approximately joining dfA and dfB on distance smaller than 0.6:")
model.approxSimilarityJoin(dfA, dfB, 0.6, distCol="JaccardDistance")\
.select(col("datasetA.id").alias("idA"),
col("datasetB.id").alias("idB"),
col("JaccardDistance")).show()
# Compute the locality sensitive hashes for the input rows, then perform approximate nearest
# neighbor search.
# We could avoid computing hashes by passing in the already-transformed dataset, e.g.
# `model.approxNearestNeighbors(transformedA, key, 2)`
# It may return less than 2 rows when not enough approximate near-neighbor candidates are
# found.
print("Approximately searching dfA for 2 nearest neighbors of the key:")
model.approxNearestNeighbors(dfA, key, 2).show()
# $example off$
spark.stop()
| apache-2.0 |
a2ultimate/ultimate-league-app | src/ultimate/utils/google_api.py | 2 | 7829 | from datetime import datetime
import dateutil.parser
import httplib2
import logging
from django.conf import settings
from django.utils.timezone import make_aware
from googleapiclient.discovery import build
from oauth2client.service_account import ServiceAccountCredentials
logger = logging.getLogger('a2u.email_groups')
class GoogleAppsApi:
http = None
service = None
def __init__(self):
credentials_file = getattr(settings, 'GOOGLE_APPS_API_CREDENTIALS_FILE', False)
scopes = getattr(settings, 'GOOGLE_APPS_API_SCOPES', False)
account = getattr(settings, 'GOOGLE_APPS_API_ACCOUNT', False)
if credentials_file and scopes and account:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
credentials_file, scopes=scopes)
credentials._kwargs['sub'] = account
self.http = httplib2.Http()
self.http = credentials.authorize(self.http)
def prepare_group_for_sync(self, group_name, group_id=None, group_email_address=None, force=False):
logger.debug('Preparing group "{}" for sync...'.format(group_name))
if force:
self.delete_group(group_id=group_id, group_email_address=group_email_address)
else:
self.remove_all_group_members(
group_id=group_id,
group_email_address=group_email_address,
group_name=group_name)
return self.get_or_create_group(
group_email_address=group_email_address, group_name=group_name)
# TODO need paging for when you have over 200 groups
def get_or_create_group(self, group_email_address, group_name=''):
logger.debug(' Getting or creating group {}...'.format(group_email_address))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
groups_response = None
target_group = None
try:
logger.debug(' Looking for existing group...')
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
except Exception as e:
return None
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
logger.debug(' Group found!')
target_group = group
# couldn't find group, create it
if not target_group:
logger.debug(' Group not found...creating {}...'.format(group_email_address))
body = { 'email': group_email_address, }
if group_name:
body.update({ 'name': group_name, })
try:
target_group = service.groups().insert(body=body).execute(http=self.http)
logger.debug(' Success!')
except Exception as e:
logger.debug(' Failure!')
return None
group_id = target_group.get('id', None)
return group_id
def delete_group(self, group_id=None, group_email_address=None):
logger.debug(' Deleting existing group...')
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
if group_email_address and not group_id:
try:
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
group_id = group.get('id', None)
except Exception as e:
return False
if group_id:
try:
service.groups().delete(groupKey=group_id).execute(http=self.http)
logger.debug(' Success!')
except Exception as e:
logger.debug(' Failure!')
return False
return True
def remove_all_group_members(self, group_id=None, group_email_address=None, group_name=None):
logger.debug(' Removing all members from {}...'.format(group_email_address))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
if group_email_address and not group_id:
try:
groups_response = service.groups().list(customer='my_customer', domain='lists.annarborultimate.org', query='email={}'.format(group_email_address)).execute(http=self.http)
if groups_response and groups_response.get('groups'):
for group in groups_response.get('groups'):
if group.get('email') == group_email_address:
group_id = group.get('id', None)
except Exception as e:
logger.debug(' Group could not be found')
return False
if group_id:
try:
members_response = service.members().list(groupKey=group_id).execute(http=self.http)
if members_response and members_response.get('members'):
for member in members_response.get('members'):
member_id = member.get('id', None)
service.members().delete(groupKey=group_id, memberKey=member_id).execute(http=self.http)
except Exception as e:
logger.debug(' Group could not be found')
return False
logger.debug(' Done')
def add_group_member(self, email_address, group_id=None, group_email_address=None, group_name=None):
logger.debug('Adding {} to {}...'.format(email_address, group_email_address or 'group'))
service = build('admin', 'directory_v1', http=self.http, cache_discovery=False)
body = {
'email': email_address,
'role': 'MEMBER'
}
response = False
# look for group
if not group_id and group_email_address:
group_id = self.get_or_create_group(
group_email_address=group_email_address, group_name=group_name)
if group_id:
try:
response = service.members().insert(groupKey=group_id, body=body).execute(http=self.http)
logger.debug(' Success!')
except:
logger.debug(' Failure!')
return False
return response
def get_calendar_events(self, calendar_id, since, until):
service = build(serviceName='calendar', version='v3', http=self.http, cache_discovery=False)
since = (datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) - since).isoformat('T') + 'Z'
until = (datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0) + until).isoformat('T') + 'Z'
try:
events_response = service.events().list(
calendarId=calendar_id,
orderBy='startTime',
singleEvents=True,
timeMin=since,
timeMax=until,
).execute(http=self.http)
except Exception as e:
return None
events = []
for event in events_response['items']:
events.append({
'summary': event.get('summary'),
'start': dateutil.parser.parse(event['start']['dateTime']),
'end': event['end']['dateTime'],
'location': event.get('location'),
'description': event.get('description'),
})
return events
| bsd-3-clause |
Rapportus/ansible-modules-extras | cloud/vmware/vmware_dns_config.py | 75 | 3970 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_dns_config
short_description: Manage VMware ESXi DNS Configuration
description:
- Manage VMware ESXi DNS Configuration
version_added: 2.0
author: "Joseph Callen (@jcpowermac)"
notes:
- Tested on vSphere 5.5
requirements:
- "python >= 2.6"
- PyVmomi
options:
change_hostname_to:
description:
- The hostname that an ESXi host should be changed to.
required: True
domainname:
description:
- The domain the ESXi host should be apart of.
required: True
dns_servers:
description:
- The DNS servers that the host should be configured to use.
required: True
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_dns_config command from Ansible Playbooks
- name: Configure ESXi hostname and DNS servers
local_action:
module: vmware_dns_config
hostname: esxi_hostname
username: root
password: your_password
change_hostname_to: esx01
domainname: foo.org
dns_servers:
- 8.8.8.8
- 8.8.4.4
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def configure_dns(host_system, hostname, domainname, dns_servers):
changed = False
host_config_manager = host_system.configManager
host_network_system = host_config_manager.networkSystem
config = host_network_system.dnsConfig
config.dhcp = False
if config.address != dns_servers:
config.address = dns_servers
changed = True
if config.domainName != domainname:
config.domainName = domainname
changed = True
if config.hostName != hostname:
config.hostName = hostname
changed = True
if changed:
host_network_system.UpdateDnsConfig(config)
return changed
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(change_hostname_to=dict(required=True, type='str'),
domainname=dict(required=True, type='str'),
dns_servers=dict(required=True, type='list')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
change_hostname_to = module.params['change_hostname_to']
domainname = module.params['domainname']
dns_servers = module.params['dns_servers']
try:
content = connect_to_api(module)
host = get_all_objs(content, [vim.HostSystem])
if not host:
module.fail_json(msg="Unable to locate Physical Host.")
host_system = host.keys()[0]
changed = configure_dns(host_system, change_hostname_to, domainname, dns_servers)
module.exit_json(changed=changed)
except vmodl.RuntimeFault as runtime_fault:
module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
module.fail_json(msg=method_fault.msg)
except Exception as e:
module.fail_json(msg=str(e))
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
ecell/ecell3 | ecell/pyecell/ecell/analysis/PathwayProxy.py | 1 | 13263 | #!/usr/bin/env python
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
# This file is part of the E-Cell System
#
# Copyright (C) 1996-2016 Keio University
# Copyright (C) 2008-2016 RIKEN
# Copyright (C) 2005-2009 The Molecular Sciences Institute
#
#::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
#
#
# E-Cell System is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# E-Cell System is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with E-Cell System -- see the file COPYING.
# If not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
#END_HEADER
"""
A program for handling and defining a pathway.
This program is the extension package for E-Cell System Version 3.
"""
__program__ = 'PathwayProxy'
__version__ = '1.0'
__author__ = 'Kazunari Kaizu <[email protected]>'
__coyright__ = ''
__license__ = ''
import ecell.eml
from ecell.ecssupport import *
from ecell.analysis.util import createVariableReferenceFullID
import copy
import numpy
class PathwayProxy:
def __init__( self, anEmlSupport, processList=None ):
'''
anEmlSupport: Eml support object
processList: (list) a list of process full path
'''
self.theEmlSupport = anEmlSupport
if processList:
self.setProcessList( processList )
else:
self.setProcessList( [] )
# end of __init__
def setProcessList( self, processList ):
'''
set and detect a pathway
processList: (list) a list of process full ID
'''
# check the existence of processes,
# and create relatedVariableList
self.__processList = []
self.__variableList = []
for processFullID in processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
self.__processList.append( processFullID )
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__processList.sort()
self.__variableList.sort()
# end of setProcessList
def getProcessList( self ):
'''
return processList
'''
return copy.copy( self.__processList )
# end of getProcessList
def addProcess( self, processFullID ):
'''
add a process to the pathway
processFullID: (str) a process full ID
'''
if not self.__processList.count( processFullID ) == 0:
return
# elif not ecell.eml.Eml.isEntityExist( processFullID ):
# return
# add process
self.__processList.append( processFullID )
self.__processList.sort()
# update the related variable list
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
return
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if self.__variableList.count( fullIDString ) == 0:
self.__variableList.append( fullIDString )
self.__variableList.sort()
# end of addProcess
def removeProcess( self, processIndexList ):
'''
remove processes from the pathway
processIndexList: (list) a list of indices of processes
'''
indexList = copy.copy( processIndexList )
indexList.sort()
indexList.reverse()
removedProcessList = []
for i in indexList:
if len( self.__processList ) > i:
removedProcessList.append( self.__processList.pop( i ) )
removedVariableList = []
for processFullID in removedProcessList:
# if not ecell.eml.Eml.isEntityExist( self.theEmlSupport, processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if removedVariableList.count( fullIDString ) == 0:
removedVariableList.append( fullIDString )
for processFullID in self.__processList:
# if not self.theEmlSupport.isEntityExist( processFullID ):
# continue
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
if not removedVariableList.count( fullIDString ) == 0:
removedVariableList.remove( fullIDString )
for variableFullID in removedVariableList:
self.__variableList.remove( variableFullID )
# end of removeProcess
def take( self, processIndexList ):
'''
create and return a sub-pathway
processIndexList: (list) a list of indices of processes
return PathwayProxy
'''
processList = []
for i in processIndexList:
if len( self.__processList ) > i:
processList.append( self.__processList[ i ] )
subPathway = PathwayProxy( self.theEmlSupport, processList )
return subPathway
# end of removeProcess
def getVariableList( self ):
'''
return relatedVariableList
'''
return copy.copy( self.__variableList )
# end of getVariableList
def removeVariable( self, variableIndexList ):
'''
remove variables from the pathway
variableIndexList: (list) a list of indices of variables
'''
indexList = copy.copy( variableIndexList )
indexList.sort()
indexList.reverse()
for i in indexList:
if len( self.__variableList ) > i:
self.__variableList.pop( i )
# end of removeVariable
def addVariable( self, variableFullID ):
'''
recover a removed variable to the pathway
variableFullID: (str) a variable full ID
'''
if not self.__variableList.count( variableFullID ) == 0:
return 1
# elif not ecell.eml.Eml.isEntityExist( variableFullID ):
# return 0
for processFullID in self.__processList:
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = fullID[ 1 ] + ':' + fullID[ 2 ]
if fullIDString == variableFullID:
self.__variableList.append( variableFullID )
self.__variableList.sort()
return 1
return 0
# end of addProcess
def getIncidentMatrix( self, mode=0 ):
'''
create the incident matrix (array)
mode: (0 or 1) 0 means that only the \'write\' variables are checked. 0 is set as default.
return incidentMatrix
'''
incidentMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ) )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if mode:
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
incidentMatrix[ i ][ j ] = 1
else:
incidentMatrix[ i ][ j ] = 1
return incidentMatrix
# end of getIncidentMatrix
def getStoichiometryMatrix( self ):
'''
create the stoichiometry matrix (array)
return stoichiometryMatrix
'''
stoichiometryMatrix = numpy.zeros( ( len( self.__variableList ), len( self.__processList ) ), float )
for j in range( len( self.__processList ) ):
processFullID = self.__processList[ j ]
try:
aVariableReferenceList = self.theEmlSupport.getEntityProperty( processFullID + ':VariableReferenceList' )
except AttributeError, e:
continue
for aVariableReference in aVariableReferenceList:
fullID = createVariableReferenceFullID( aVariableReference[ 1 ], processFullID )
fullIDString = ecell.ecssupport.createFullIDString( fullID )
try:
i = self.__variableList.index( fullIDString )
except ValueError:
# should some warning message be showed?
continue
if len( aVariableReference ) > 2:
coeff = int( aVariableReference[ 2 ] )
if coeff != 0:
stoichiometryMatrix[ i ][ j ] += coeff
return stoichiometryMatrix
# end of getStoichiometryMatrix
def getReversibilityList( self ):
'''
check and return the reversibilities (isReversible) for processes
default value is 0, irreversible
return reversibilityList
'''
reversibilityList = []
for processFullID in self.__processList:
propertyList = self.theEmlSupport.getEntityPropertyList( processFullID )
if propertyList.count( 'isReversible' ) != 0:
# isReversible is handled as float
isReversible = float( self.theEmlSupport.getEntityProperty( processFullID + ':isReversible' )[ 0 ] )
reversibilityList.append( int( isReversible ) )
else:
# default value, irreversible
reversibilityList.append( 0 )
return reversibilityList
# end of getReversibilityList
# end of PathwayProxy
if __name__ == '__main__':
from emlsupport import EmlSupport
import sys
import os
def main( filename ):
anEmlSupport = EmlSupport( filename )
pathwayProxy = anEmlSupport.createPathwayProxy()
print 'process list ='
print pathwayProxy.getProcessList()
print 'related variable list ='
print pathwayProxy.getVariableList()
print 'incident matrix ='
print pathwayProxy.getIncidentMatrix()
print 'stoichiometry matrix ='
print pathwayProxy.getStoichiometryMatrix()
print 'reversibility list ='
print pathwayProxy.getReversibilityList()
# end of main
if len( sys.argv ) > 1:
main( sys.argv[ 1 ] )
else:
filename = '../../../../doc/samples/Heinrich/Heinrich.eml'
main( os.path.abspath( filename ) )
| lgpl-3.0 |
pymedusa/Medusa | ext/boto/pyami/scriptbase.py | 153 | 1427 | import os
import sys
from boto.utils import ShellCommand, get_ts
import boto
import boto.utils
class ScriptBase(object):
def __init__(self, config_file=None):
self.instance_id = boto.config.get('Instance', 'instance-id', 'default')
self.name = self.__class__.__name__
self.ts = get_ts()
if config_file:
boto.config.read(config_file)
def notify(self, subject, body=''):
boto.utils.notify(subject, body)
def mkdir(self, path):
if not os.path.isdir(path):
try:
os.mkdir(path)
except:
boto.log.error('Error creating directory: %s' % path)
def umount(self, path):
if os.path.ismount(path):
self.run('umount %s' % path)
def run(self, command, notify=True, exit_on_error=False, cwd=None):
self.last_command = ShellCommand(command, cwd=cwd)
if self.last_command.status != 0:
boto.log.error('Error running command: "%s". Output: "%s"' % (command, self.last_command.output))
if notify:
self.notify('Error encountered',
'Error running the following command:\n\t%s\n\nCommand output:\n\t%s' % \
(command, self.last_command.output))
if exit_on_error:
sys.exit(-1)
return self.last_command.status
def main(self):
pass
| gpl-3.0 |
Ichag/odoo | addons/hr_expense/report/__init__.py | 380 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_expense_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
AydinSakar/node-gyp | gyp/pylib/gyp/xcode_emulation.py | 65 | 42931 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.app'
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library'):
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def GetInclude(self, lang):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self.compiled_headers[lang]
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self.compiled_headers[lang] + '.gch'
def GetObjDependencies(self, sources, objs):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang)))
return result
def GetPchBuildCommands(self):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c'), '-x c-header', 'c', self.header),
(self._Gch('cc'), '-x c++-header', 'cc', self.header),
(self._Gch('m'), '-x objective-c-header', 'm', self.header),
(self._Gch('mm'), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the sourc plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
| mit |
BayanGroup/ansible | lib/ansible/utils/module_docs_fragments/mysql.py | 18 | 2735 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Jonathan Mainguy <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# Standard mysql documentation fragment
DOCUMENTATION = '''
options:
login_user:
description:
- The username used to authenticate with
required: false
default: null
login_password:
description:
- The password used to authenticate with
required: false
default: null
login_host:
description:
- Host running the database
required: false
default: localhost
login_port:
description:
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
required: false
default: 3306
login_unix_socket:
description:
- The path to a Unix domain socket for local connections
required: false
default: null
config_file:
description:
- Specify a config file from which user and password are to be read
required: false
default: '~/.my.cnf'
version_added: "2.0"
ssl_ca:
required: false
default: null
version_added: "2.0"
description:
- The path to a Certificate Authority (CA) certificate. This option, if used, must specify the same certificate as used by the server.
ssl_cert:
required: false
default: null
version_added: "2.0"
description:
- The path to a client public key certificate.
ssl_key:
required: false
default: null
version_added: "2.0"
description:
- The path to the client private key.
requirements:
- MySQLdb
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
is as easy as yum install MySQL-python. (See M(yum).)
- Both C(login_password) and C(login_user) are required when you are
passing credentials. If none are present, the module will attempt to read
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
default login of 'root' with no password.
'''
| gpl-3.0 |
mmilaprat/policycompass-services | apps/metricsmanager/api.py | 2 | 5677 | import json
from django.core.exceptions import ValidationError
from django import shortcuts
from rest_framework.views import APIView
from rest_framework.reverse import reverse
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework import generics, status
from policycompass_services import permissions
from .serializers import *
from .normalization import get_normalizers
from . import formula, services
class MetricsBase(APIView):
def get(self, request, format=None):
"""
:type request: Request
:param request:
:return:
"""
result = {
"Metrics": reverse('metrics-create-list', request=request),
"Normalizer": reverse('normalizers-list', request=request),
"Calculator": reverse('calculate-dataset', request=request)
}
return Response(result)
class FormulasValidate(APIView):
def get(self, request):
if "formula" not in request.QUERY_PARAMS:
return Response({"formula": "Can not be empty"},
status=status.HTTP_400_BAD_REQUEST)
if "variables" not in request.QUERY_PARAMS:
return Response({"variables": "Can not be empty"},
status=status.HTTP_400_BAD_REQUEST)
formula_str = request.QUERY_PARAMS["formula"]
try:
variables = json.loads(request.QUERY_PARAMS["variables"])
except ValueError as e:
return Response(
{"variables": "Unable to parse json: {}".format(e)},
status=status.HTTP_400_BAD_REQUEST)
try:
variables = formula.validate_variables(variables)
formula.validate_formula(formula_str, variables)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
class NormalizersList(APIView):
def get(self, request):
normalizers = get_normalizers().values()
serializer = NormalizerSerializer(normalizers, many=True)
return Response(serializer.data)
class MetricsCreate(generics.ListCreateAPIView):
model = Metric
serializer_class = MetricSerializer
paginate_by = 10
paginate_by_param = 'page_size'
permission_classes = IsAuthenticatedOrReadOnly,
def pre_save(self, obj):
obj.creator_path = self.request.user.resource_path
class MetricsDetail(generics.RetrieveUpdateDestroyAPIView):
model = Metric
serializer_class = MetricSerializer
permission_classes = permissions.IsCreatorOrReadOnly,
class DatasetCalculateView(APIView):
permission_classes = IsAuthenticatedOrReadOnly,
def post(self, request):
"""
Compute a new dataset from a given formula and mappings.
Example data:
{
"title": "Some test",
"formula": "0.5 * norm(__1__, 0, 100) + 0.5 * norm(__2__, 0, 200)",
"datasets": [
{
"variable": "__1__",
"dataset": 1,
},
{
"variable": "__1__",
"dataset": 1,
}
],
"indicator_id": 0,
"unit_id": 0,
}
"""
# check resquest data
serializer = CalculateSerializer(data=request.DATA,
files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
data = serializer.object
try:
formula.validate_formula(data["formula"], data["datasets"])
data = services.validate_operationalize(data)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
creator_path = self.request.user.resource_path
dataset_id = services.compute_dataset(
creator_path=creator_path,
**data)
return Response({
"dataset": {
"id": dataset_id
}
})
class MetriscOperationalize(APIView):
permission_classes = IsAuthenticatedOrReadOnly,
def post(self, request, metrics_id: int):
"""
Compute a new dataset from a given metric and mappings for variables.
Example data:
{
"title" : "Some test",
"datasets": [
{
"variable": "__1__",
"dataset": 1,
}
],
"unit_id": 0,
}
"""
# check if metric exists
metric = shortcuts.get_object_or_404(Metric, pk=metrics_id)
# check resquest data
serializer = OperationalizeSerializer(data=request.DATA,
files=request.FILES)
if not serializer.is_valid():
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
data = serializer.object
try:
data = services.validate_operationalize(data)
except ValidationError as e:
return Response(e.error_dict, status=status.HTTP_400_BAD_REQUEST)
creator_path = self.request.user.resource_path
dataset_id = services.compute_dataset(
creator_path=creator_path,
formula=metric.formula,
indicator_id=metric.indicator_id,
metric_id=metric.pk,
**data)
return Response({
"dataset": {
"id": dataset_id
}
})
| agpl-3.0 |
yosukesuzuki/let-me-notify | project/kay/management/gae_bulkloader.py | 10 | 125396 | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Imports data over HTTP.
Usage:
%(arg0)s [flags]
--debug Show debugging information. (Optional)
--app_id=<string> Application ID of endpoint (Optional for
*.appspot.com)
--auth_domain=<domain> The auth domain to use for logging in and for
UserProperties. (Default: gmail.com)
--bandwidth_limit=<int> The maximum number of bytes per second for the
aggregate transfer of data to the server. Bursts
may exceed this, but overall transfer rate is
restricted to this rate. (Default 250000)
--batch_size=<int> Number of Entity objects to include in each post to
the URL endpoint. The more data per row/Entity, the
smaller the batch size should be. (Default 10)
--config_file=<path> File containing Model and Loader definitions.
(Required unless --dump or --restore are used)
--db_filename=<path> Specific progress database to write to, or to
resume from. If not supplied, then a new database
will be started, named:
bulkloader-progress-TIMESTAMP.
The special filename "skip" may be used to simply
skip reading/writing any progress information.
--download Export entities to a file.
--dry_run Do not execute any remote_api calls.
--dump Use zero-configuration dump format.
--email=<string> The username to use. Will prompt if omitted.
--exporter_opts=<string>
A string to pass to the Exporter.initialize method.
--filename=<path> Path to the file to import. (Required)
--has_header Skip the first row of the input.
--http_limit=<int> The maximum numer of HTTP requests per second to
send to the server. (Default: 8)
--kind=<string> Name of the Entity object kind to put in the
datastore. (Required)
--loader_opts=<string> A string to pass to the Loader.initialize method.
--log_file=<path> File to write bulkloader logs. If not supplied
then a new log file will be created, named:
bulkloader-log-TIMESTAMP.
--map Map an action across datastore entities.
--mapper_opts=<string> A string to pass to the Mapper.Initialize method.
--num_threads=<int> Number of threads to use for uploading entities
(Default 10)
--passin Read the login password from stdin.
--restore Restore from zero-configuration dump format.
--result_db_filename=<path>
Result database to write to for downloads.
--rps_limit=<int> The maximum number of records per second to
transfer to the server. (Default: 20)
--url=<string> URL endpoint to post to for importing data.
(Required)
The exit status will be 0 on success, non-zero on import failure.
Works with the remote_api mix-in library for google.appengine.ext.remote_api.
Please look there for documentation about how to setup the server side.
Example:
%(arg0)s --url=http://app.appspot.com/remote_api --kind=Model \
--filename=data.csv --config_file=loader_config.py
"""
import csv
import errno
import getopt
import getpass
import imp
import logging
import os
import Queue
import re
import shutil
import signal
import StringIO
import sys
import threading
import time
import traceback
import urllib2
import urlparse
from google.appengine.datastore import entity_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import datastore
from google.appengine.api import datastore_errors
from google.appengine.datastore import datastore_pb
from google.appengine.ext import db
from google.appengine.ext import key_range as key_range_module
from google.appengine.ext.db import polymodel
from google.appengine.ext.remote_api import remote_api_stub
from google.appengine.ext.remote_api import throttle as remote_api_throttle
from google.appengine.runtime import apiproxy_errors
from google.appengine.tools import adaptive_thread_pool
from google.appengine.tools import appengine_rpc
from google.appengine.tools.requeue import ReQueue
try:
import sqlite3
except ImportError:
pass
logger = logging.getLogger('google.appengine.tools.bulkloader')
KeyRange = key_range_module.KeyRange
DEFAULT_THREAD_COUNT = 10
DEFAULT_BATCH_SIZE = 10
DEFAULT_DOWNLOAD_BATCH_SIZE = 100
DEFAULT_QUEUE_SIZE = DEFAULT_THREAD_COUNT * 10
_THREAD_SHOULD_EXIT = '_THREAD_SHOULD_EXIT'
STATE_READ = 0
STATE_SENDING = 1
STATE_SENT = 2
STATE_NOT_SENT = 3
STATE_GETTING = 1
STATE_GOT = 2
STATE_ERROR = 3
DATA_CONSUMED_TO_HERE = 'DATA_CONSUMED_TO_HERE'
INITIAL_BACKOFF = 1.0
BACKOFF_FACTOR = 2.0
DEFAULT_BANDWIDTH_LIMIT = 250000
DEFAULT_RPS_LIMIT = 20
DEFAULT_REQUEST_LIMIT = 8
MAXIMUM_INCREASE_DURATION = 5.0
MAXIMUM_HOLD_DURATION = 12.0
def ImportStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_SENDING: 'Sending batch to server.',
STATE_SENT: 'Batch successfully sent.',
STATE_NOT_SENT: 'Error while sending batch.'
}[state])
def ExportStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_GETTING: 'Fetching batch from server',
STATE_GOT: 'Batch successfully fetched.',
STATE_ERROR: 'Error while fetching batch'
}[state])
def MapStateMessage(state):
"""Converts a numeric state identifier to a status message."""
return ({
STATE_READ: 'Batch read from file.',
STATE_GETTING: 'Querying for batch from server',
STATE_GOT: 'Batch successfully fetched.',
STATE_ERROR: 'Error while fetching or mapping.'
}[state])
def ExportStateName(state):
"""Converts a numeric state identifier to a string."""
return ({
STATE_READ: 'READ',
STATE_GETTING: 'GETTING',
STATE_GOT: 'GOT',
STATE_ERROR: 'NOT_GOT'
}[state])
def ImportStateName(state):
"""Converts a numeric state identifier to a string."""
return ({
STATE_READ: 'READ',
STATE_GETTING: 'SENDING',
STATE_GOT: 'SENT',
STATE_NOT_SENT: 'NOT_SENT'
}[state])
class Error(Exception):
"""Base-class for exceptions in this module."""
class MissingPropertyError(Error):
"""An expected field is missing from an entity, and no default was given."""
class FatalServerError(Error):
"""An unrecoverable error occurred while posting data to the server."""
class ResumeError(Error):
"""Error while trying to resume a partial upload."""
class ConfigurationError(Error):
"""Error in configuration options."""
class AuthenticationError(Error):
"""Error while trying to authenticate with the server."""
class FileNotFoundError(Error):
"""A filename passed in by the user refers to a non-existent input file."""
class FileNotReadableError(Error):
"""A filename passed in by the user refers to a non-readable input file."""
class FileExistsError(Error):
"""A filename passed in by the user refers to an existing output file."""
class FileNotWritableError(Error):
"""A filename passed in by the user refers to a non-writable output file."""
class BadStateError(Error):
"""A work item in an unexpected state was encountered."""
class KeyRangeError(Error):
"""An error during construction of a KeyRangeItem."""
class FieldSizeLimitError(Error):
"""The csv module tried to read a field larger than the size limit."""
def __init__(self, limit):
self.message = """
A field in your CSV input file has exceeded the current limit of %d.
You can raise this limit by adding the following lines to your config file:
import csv
csv.field_size_limit(new_limit)
where new_limit is number larger than the size in bytes of the largest
field in your CSV.
""" % limit
Error.__init__(self, self.message)
class NameClashError(Error):
"""A name clash occurred while trying to alias old method names."""
def __init__(self, old_name, new_name, klass):
Error.__init__(self, old_name, new_name, klass)
self.old_name = old_name
self.new_name = new_name
self.klass = klass
def GetCSVGeneratorFactory(kind, csv_filename, batch_size, csv_has_header,
openfile=open, create_csv_reader=csv.reader):
"""Return a factory that creates a CSV-based UploadWorkItem generator.
Args:
kind: The kind of the entities being uploaded.
csv_filename: File on disk containing CSV data.
batch_size: Maximum number of CSV rows to stash into an UploadWorkItem.
csv_has_header: Whether to skip the first row of the CSV.
openfile: Used for dependency injection.
create_csv_reader: Used for dependency injection.
Returns:
A callable (accepting the Progress Queue and Progress Generators
as input) which creates the UploadWorkItem generator.
"""
loader = Loader.RegisteredLoader(kind)
loader._Loader__openfile = openfile
loader._Loader__create_csv_reader = create_csv_reader
record_generator = loader.generate_records(csv_filename)
def CreateGenerator(request_manager, progress_queue, progress_generator):
"""Initialize a UploadWorkItem generator.
Args:
request_manager: A RequestManager instance.
progress_queue: A ProgressQueue instance to send progress information.
progress_generator: A generator of progress information or None.
Returns:
An UploadWorkItemGenerator instance.
"""
return UploadWorkItemGenerator(request_manager,
progress_queue,
progress_generator,
record_generator,
csv_has_header,
batch_size)
return CreateGenerator
class UploadWorkItemGenerator(object):
"""Reads rows from a row generator and generates UploadWorkItems."""
def __init__(self,
request_manager,
progress_queue,
progress_generator,
record_generator,
skip_first,
batch_size):
"""Initialize a WorkItemGenerator.
Args:
request_manager: A RequestManager instance with which to associate
WorkItems.
progress_queue: A progress queue with which to associate WorkItems.
progress_generator: A generator of progress information.
record_generator: A generator of data records.
skip_first: Whether to skip the first data record.
batch_size: The number of data records per WorkItem.
"""
self.request_manager = request_manager
self.progress_queue = progress_queue
self.progress_generator = progress_generator
self.reader = record_generator
self.skip_first = skip_first
self.batch_size = batch_size
self.line_number = 1
self.column_count = None
self.read_rows = []
self.row_count = 0
self.xfer_count = 0
def _AdvanceTo(self, line):
"""Advance the reader to the given line.
Args:
line: A line number to advance to.
"""
while self.line_number < line:
self.reader.next()
self.line_number += 1
self.row_count += 1
self.xfer_count += 1
def _ReadRows(self, key_start, key_end):
"""Attempts to read and encode rows [key_start, key_end].
The encoded rows are stored in self.read_rows.
Args:
key_start: The starting line number.
key_end: The ending line number.
Raises:
StopIteration: if the reader runs out of rows
ResumeError: if there are an inconsistent number of columns.
"""
assert self.line_number == key_start
self.read_rows = []
while self.line_number <= key_end:
row = self.reader.next()
self.row_count += 1
if self.column_count is None:
self.column_count = len(row)
else:
if self.column_count != len(row):
raise ResumeError('Column count mismatch, %d: %s' %
(self.column_count, str(row)))
self.read_rows.append((self.line_number, row))
self.line_number += 1
def _MakeItem(self, key_start, key_end, rows, progress_key=None):
"""Makes a UploadWorkItem containing the given rows, with the given keys.
Args:
key_start: The start key for the UploadWorkItem.
key_end: The end key for the UploadWorkItem.
rows: A list of the rows for the UploadWorkItem.
progress_key: The progress key for the UploadWorkItem
Returns:
An UploadWorkItem instance for the given batch.
"""
assert rows
item = UploadWorkItem(self.request_manager, self.progress_queue, rows,
key_start, key_end, progress_key=progress_key)
return item
def Batches(self):
"""Reads from the record_generator and generates UploadWorkItems.
Yields:
Instances of class UploadWorkItem
Raises:
ResumeError: If the progress database and data file indicate a different
number of rows.
"""
if self.skip_first:
logger.info('Skipping header line.')
try:
self.reader.next()
except StopIteration:
return
exhausted = False
self.line_number = 1
self.column_count = None
logger.info('Starting import; maximum %d entities per post',
self.batch_size)
state = None
if self.progress_generator:
for progress_key, state, key_start, key_end in self.progress_generator:
if key_start:
try:
self._AdvanceTo(key_start)
self._ReadRows(key_start, key_end)
yield self._MakeItem(key_start,
key_end,
self.read_rows,
progress_key=progress_key)
except StopIteration:
logger.error('Mismatch between data file and progress database')
raise ResumeError(
'Mismatch between data file and progress database')
elif state == DATA_CONSUMED_TO_HERE:
try:
self._AdvanceTo(key_end + 1)
except StopIteration:
state = None
if self.progress_generator is None or state == DATA_CONSUMED_TO_HERE:
while not exhausted:
key_start = self.line_number
key_end = self.line_number + self.batch_size - 1
try:
self._ReadRows(key_start, key_end)
except StopIteration:
exhausted = True
key_end = self.line_number - 1
if key_start <= key_end:
yield self._MakeItem(key_start, key_end, self.read_rows)
class CSVGenerator(object):
"""Reads a CSV file and generates data records."""
def __init__(self,
csv_filename,
openfile=open,
create_csv_reader=csv.reader):
"""Initializes a CSV generator.
Args:
csv_filename: File on disk containing CSV data.
openfile: Used for dependency injection of 'open'.
create_csv_reader: Used for dependency injection of 'csv.reader'.
"""
self.csv_filename = csv_filename
self.openfile = openfile
self.create_csv_reader = create_csv_reader
def Records(self):
"""Reads the CSV data file and generates row records.
Yields:
Lists of strings
Raises:
ResumeError: If the progress database and data file indicate a different
number of rows.
"""
csv_file = self.openfile(self.csv_filename, 'rb')
reader = self.create_csv_reader(csv_file, skipinitialspace=True)
try:
for record in reader:
yield record
except csv.Error, e:
if e.args and e.args[0].startswith('field larger than field limit'):
limit = e.args[1]
raise FieldSizeLimitError(limit)
else:
raise
class KeyRangeItemGenerator(object):
"""Generates ranges of keys to download.
Reads progress information from the progress database and creates
KeyRangeItem objects corresponding to incompletely downloaded parts of an
export.
"""
def __init__(self, request_manager, kind, progress_queue, progress_generator,
key_range_item_factory):
"""Initialize the KeyRangeItemGenerator.
Args:
request_manager: A RequestManager instance.
kind: The kind of entities being transferred.
progress_queue: A queue used for tracking progress information.
progress_generator: A generator of prior progress information, or None
if there is no prior status.
key_range_item_factory: A factory to produce KeyRangeItems.
"""
self.request_manager = request_manager
self.kind = kind
self.row_count = 0
self.xfer_count = 0
self.progress_queue = progress_queue
self.progress_generator = progress_generator
self.key_range_item_factory = key_range_item_factory
def Batches(self):
"""Iterate through saved progress information.
Yields:
KeyRangeItem instances corresponding to undownloaded key ranges.
"""
if self.progress_generator is not None:
for progress_key, state, key_start, key_end in self.progress_generator:
if state is not None and state != STATE_GOT and key_start is not None:
key_start = ParseKey(key_start)
key_end = ParseKey(key_end)
key_range = KeyRange(key_start=key_start,
key_end=key_end)
result = self.key_range_item_factory(self.request_manager,
self.progress_queue,
self.kind,
key_range,
progress_key=progress_key,
state=STATE_READ)
yield result
else:
key_range = KeyRange()
yield self.key_range_item_factory(self.request_manager,
self.progress_queue,
self.kind,
key_range)
class DownloadResult(object):
"""Holds the result of an entity download."""
def __init__(self, continued, direction, keys, entities):
self.continued = continued
self.direction = direction
self.keys = keys
self.entities = entities
self.count = len(keys)
assert self.count == len(entities)
assert direction in (key_range_module.KeyRange.ASC,
key_range_module.KeyRange.DESC)
if self.count > 0:
if direction == key_range_module.KeyRange.ASC:
self.key_start = keys[0]
self.key_end = keys[-1]
else:
self.key_start = keys[-1]
self.key_end = keys[0]
def Entities(self):
"""Returns the list of entities for this result in key order."""
if self.direction == key_range_module.KeyRange.ASC:
return list(self.entities)
else:
result = list(self.entities)
result.reverse()
return result
def __str__(self):
return 'continued = %s\n%s' % (
str(self.continued), '\n'.join(str(self.entities)))
class _WorkItem(adaptive_thread_pool.WorkItem):
"""Holds a description of a unit of upload or download work."""
def __init__(self, progress_queue, key_start, key_end, state_namer,
state=STATE_READ, progress_key=None):
"""Initialize the _WorkItem instance.
Args:
progress_queue: A queue used for tracking progress information.
key_start: The start key of the work item.
key_end: The end key of the work item.
state_namer: Function to describe work item states.
state: The initial state of the work item.
progress_key: If this WorkItem represents state from a prior run,
then this will be the key within the progress database.
"""
adaptive_thread_pool.WorkItem.__init__(self,
'[%s-%s]' % (key_start, key_end))
self.progress_queue = progress_queue
self.state_namer = state_namer
self.state = state
self.progress_key = progress_key
self.progress_event = threading.Event()
self.key_start = key_start
self.key_end = key_end
self.error = None
self.traceback = None
def _TransferItem(self, thread_pool):
raise NotImplementedError()
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.error:
exc_info = sys.exc_info()
self.error = exc_info[1]
self.traceback = exc_info[2]
def PerformWork(self, thread_pool):
"""Perform the work of this work item and report the results.
Args:
thread_pool: An AdaptiveThreadPool instance.
Returns:
A tuple (status, instruction) of the work status and an instruction
for the ThreadGate.
"""
status = adaptive_thread_pool.WorkItem.FAILURE
instruction = adaptive_thread_pool.ThreadGate.DECREASE
try:
self.MarkAsTransferring()
try:
transfer_time = self._TransferItem(thread_pool)
if transfer_time is None:
status = adaptive_thread_pool.WorkItem.RETRY
instruction = adaptive_thread_pool.ThreadGate.HOLD
else:
logger.debug('[%s] %s Transferred %d entities in %0.1f seconds',
threading.currentThread().getName(), self, self.count,
transfer_time)
sys.stdout.write('.')
sys.stdout.flush()
status = adaptive_thread_pool.WorkItem.SUCCESS
if transfer_time <= MAXIMUM_INCREASE_DURATION:
instruction = adaptive_thread_pool.ThreadGate.INCREASE
elif transfer_time <= MAXIMUM_HOLD_DURATION:
instruction = adaptive_thread_pool.ThreadGate.HOLD
except (db.InternalError, db.NotSavedError, db.Timeout,
db.TransactionFailedError,
apiproxy_errors.OverQuotaError,
apiproxy_errors.DeadlineExceededError,
apiproxy_errors.ApplicationError), e:
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal datastore error: %s', e)
except urllib2.HTTPError, e:
http_status = e.code
if http_status == 403 or (http_status >= 500 and http_status < 600):
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal HTTP error: %d %s',
http_status, e.msg)
else:
self.SetError()
status = adaptive_thread_pool.WorkItem.FAILURE
except urllib2.URLError, e:
if IsURLErrorFatal(e):
self.SetError()
status = adaptive_thread_pool.WorkItem.FAILURE
else:
status = adaptive_thread_pool.WorkItem.RETRY
logger.exception('Retrying on non-fatal URL error: %s', e.reason)
finally:
if status == adaptive_thread_pool.WorkItem.SUCCESS:
self.MarkAsTransferred()
else:
self.MarkAsError()
return (status, instruction)
def _AssertInState(self, *states):
"""Raises an Error if the state of this range is not in states."""
if not self.state in states:
raise BadStateError('%s:%s not in %s' %
(str(self),
self.state_namer(self.state),
map(self.state_namer, states)))
def _AssertProgressKey(self):
"""Raises an Error if the progress key is None."""
if self.progress_key is None:
raise BadStateError('%s: Progress key is missing' % str(self))
def MarkAsRead(self):
"""Mark this _WorkItem as read, updating the progress database."""
self._AssertInState(STATE_READ)
self._StateTransition(STATE_READ, blocking=True)
def MarkAsTransferring(self):
"""Mark this _WorkItem as transferring, updating the progress database."""
self._AssertInState(STATE_READ, STATE_ERROR)
self._AssertProgressKey()
self._StateTransition(STATE_GETTING, blocking=True)
def MarkAsTransferred(self):
"""Mark this _WorkItem as transferred, updating the progress database."""
raise NotImplementedError()
def MarkAsError(self):
"""Mark this _WorkItem as failed, updating the progress database."""
self._AssertInState(STATE_GETTING)
self._AssertProgressKey()
self._StateTransition(STATE_ERROR, blocking=True)
def _StateTransition(self, new_state, blocking=False):
"""Transition the work item to a new state, storing progress information.
Args:
new_state: The state to transition to.
blocking: Whether to block for the progress thread to acknowledge the
transition.
"""
assert not self.progress_event.isSet()
self.state = new_state
self.progress_queue.put(self)
if blocking:
self.progress_event.wait()
self.progress_event.clear()
class UploadWorkItem(_WorkItem):
"""Holds a unit of uploading work.
A UploadWorkItem represents a number of entities that need to be uploaded to
Google App Engine. These entities are encoded in the "content" field of
the UploadWorkItem, and will be POST'd as-is to the server.
The entities are identified by a range of numeric keys, inclusively. In
the case of a resumption of an upload, or a replay to correct errors,
these keys must be able to identify the same set of entities.
Note that keys specify a range. The entities do not have to sequentially
fill the entire range, they must simply bound a range of valid keys.
"""
def __init__(self, request_manager, progress_queue, rows, key_start, key_end,
progress_key=None):
"""Initialize the UploadWorkItem instance.
Args:
request_manager: A RequestManager instance.
progress_queue: A queue used for tracking progress information.
rows: A list of pairs of a line number and a list of column values
key_start: The (numeric) starting key, inclusive.
key_end: The (numeric) ending key, inclusive.
progress_key: If this UploadWorkItem represents state from a prior run,
then this will be the key within the progress database.
"""
_WorkItem.__init__(self, progress_queue, key_start, key_end,
ImportStateName, state=STATE_READ,
progress_key=progress_key)
assert isinstance(key_start, (int, long))
assert isinstance(key_end, (int, long))
assert key_start <= key_end
self.request_manager = request_manager
self.rows = rows
self.content = None
self.count = len(rows)
def __str__(self):
return '[%s-%s]' % (self.key_start, self.key_end)
def _TransferItem(self, thread_pool, get_time=time.time):
"""Transfers the entities associated with an item.
Args:
thread_pool: An AdaptiveThreadPool instance.
get_time: Used for dependency injection.
"""
t = get_time()
if not self.content:
self.content = self.request_manager.EncodeContent(self.rows)
try:
self.request_manager.PostEntities(self.content)
except:
raise
return get_time() - t
def MarkAsTransferred(self):
"""Mark this UploadWorkItem as sucessfully-sent to the server."""
self._AssertInState(STATE_SENDING)
self._AssertProgressKey()
self._StateTransition(STATE_SENT, blocking=False)
def GetImplementationClass(kind_or_class_key):
"""Returns the implementation class for a given kind or class key.
Args:
kind_or_class_key: A kind string or a tuple of kind strings.
Return:
A db.Model subclass for the given kind or class key.
"""
if isinstance(kind_or_class_key, tuple):
try:
implementation_class = polymodel._class_map[kind_or_class_key]
except KeyError:
raise db.KindError('No implementation for class \'%s\'' %
kind_or_class_key)
else:
implementation_class = db.class_for_kind(kind_or_class_key)
return implementation_class
def KeyLEQ(key1, key2):
"""Compare two keys for less-than-or-equal-to.
All keys with numeric ids come before all keys with names. None represents
an unbounded end-point so it is both greater and less than any other key.
Args:
key1: An int or datastore.Key instance.
key2: An int or datastore.Key instance.
Returns:
True if key1 <= key2
"""
if key1 is None or key2 is None:
return True
return key1 <= key2
class KeyRangeItem(_WorkItem):
"""Represents an item of work that scans over a key range.
A KeyRangeItem object represents holds a KeyRange
and has an associated state: STATE_READ, STATE_GETTING, STATE_GOT,
and STATE_ERROR.
- STATE_READ indicates the range ready to be downloaded by a worker thread.
- STATE_GETTING indicates the range is currently being downloaded.
- STATE_GOT indicates that the range was successfully downloaded
- STATE_ERROR indicates that an error occurred during the last download
attempt
KeyRangeItems not in the STATE_GOT state are stored in the progress database.
When a piece of KeyRangeItem work is downloaded, the download may cover only
a portion of the range. In this case, the old KeyRangeItem is removed from
the progress database and ranges covering the undownloaded range are
generated and stored as STATE_READ in the export progress database.
"""
def __init__(self,
request_manager,
progress_queue,
kind,
key_range,
progress_key=None,
state=STATE_READ):
"""Initialize a KeyRangeItem object.
Args:
request_manager: A RequestManager instance.
progress_queue: A queue used for tracking progress information.
kind: The kind of entities for this range.
key_range: A KeyRange instance for this work item.
progress_key: The key for this range within the progress database.
state: The initial state of this range.
"""
_WorkItem.__init__(self, progress_queue, key_range.key_start,
key_range.key_end, ExportStateName, state=state,
progress_key=progress_key)
self.request_manager = request_manager
self.kind = kind
self.key_range = key_range
self.download_result = None
self.count = 0
self.key_start = key_range.key_start
self.key_end = key_range.key_end
def __str__(self):
return str(self.key_range)
def __repr__(self):
return self.__str__()
def MarkAsTransferred(self):
"""Mark this KeyRangeItem as transferred, updating the progress database."""
pass
def Process(self, download_result, thread_pool, batch_size,
new_state=STATE_GOT):
"""Mark this KeyRangeItem as success, updating the progress database.
Process will split this KeyRangeItem based on the content of
download_result and adds the unfinished ranges to the work queue.
Args:
download_result: A DownloadResult instance.
thread_pool: An AdaptiveThreadPool instance.
batch_size: The number of entities to transfer per request.
new_state: The state to transition the completed range to.
"""
self._AssertInState(STATE_GETTING)
self._AssertProgressKey()
self.download_result = download_result
self.count = len(download_result.keys)
if download_result.continued:
self._FinishedRange()._StateTransition(new_state, blocking=True)
self._AddUnfinishedRanges(thread_pool, batch_size)
else:
self._StateTransition(new_state, blocking=True)
def _FinishedRange(self):
"""Returns the range completed by the download_result.
Returns:
A KeyRangeItem representing a completed range.
"""
assert self.download_result is not None
if self.key_range.direction == key_range_module.KeyRange.ASC:
key_start = self.key_range.key_start
if self.download_result.continued:
key_end = self.download_result.key_end
else:
key_end = self.key_range.key_end
else:
key_end = self.key_range.key_end
if self.download_result.continued:
key_start = self.download_result.key_start
else:
key_start = self.key_range.key_start
key_range = KeyRange(key_start=key_start,
key_end=key_end,
direction=self.key_range.direction)
result = self.__class__(self.request_manager,
self.progress_queue,
self.kind,
key_range,
progress_key=self.progress_key,
state=self.state)
result.download_result = self.download_result
result.count = self.count
return result
def _SplitAndAddRanges(self, thread_pool, batch_size):
"""Split the key range [key_start, key_end] into a list of ranges."""
if self.download_result.direction == key_range_module.KeyRange.ASC:
key_range = KeyRange(
key_start=self.download_result.key_end,
key_end=self.key_range.key_end,
include_start=False)
else:
key_range = KeyRange(
key_start=self.key_range.key_start,
key_end=self.download_result.key_start,
include_end=False)
if thread_pool.QueuedItemCount() > 2 * thread_pool.num_threads():
ranges = [key_range]
else:
ranges = key_range.split_range(batch_size=batch_size)
for key_range in ranges:
key_range_item = self.__class__(self.request_manager,
self.progress_queue,
self.kind,
key_range)
key_range_item.MarkAsRead()
thread_pool.SubmitItem(key_range_item, block=True)
def _AddUnfinishedRanges(self, thread_pool, batch_size):
"""Adds incomplete KeyRanges to the thread_pool.
Args:
thread_pool: An AdaptiveThreadPool instance.
batch_size: The number of entities to transfer per request.
Returns:
A list of KeyRanges representing incomplete datastore key ranges.
Raises:
KeyRangeError: if this key range has already been completely transferred.
"""
assert self.download_result is not None
if self.download_result.continued:
self._SplitAndAddRanges(thread_pool, batch_size)
else:
raise KeyRangeError('No unfinished part of key range.')
class DownloadItem(KeyRangeItem):
"""A KeyRangeItem for downloading key ranges."""
def _TransferItem(self, thread_pool, get_time=time.time):
"""Transfers the entities associated with an item."""
t = get_time()
download_result = self.request_manager.GetEntities(self)
transfer_time = get_time() - t
self.Process(download_result, thread_pool,
self.request_manager.batch_size)
return transfer_time
class MapperItem(KeyRangeItem):
"""A KeyRangeItem for mapping over key ranges."""
def _TransferItem(self, thread_pool, get_time=time.time):
t = get_time()
download_result = self.request_manager.GetEntities(self)
transfer_time = get_time() - t
mapper = self.request_manager.GetMapper()
try:
mapper.batch_apply(download_result.Entities())
except MapperRetry:
return None
self.Process(download_result, thread_pool,
self.request_manager.batch_size)
return transfer_time
class RequestManager(object):
"""A class which wraps a connection to the server."""
def __init__(self,
app_id,
host_port,
url_path,
kind,
throttle,
batch_size,
secure,
email,
passin,
dry_run=False):
"""Initialize a RequestManager object.
Args:
app_id: String containing the application id for requests.
host_port: String containing the "host:port" pair; the port is optional.
url_path: partial URL (path) to post entity data to.
kind: Kind of the Entity records being posted.
throttle: A Throttle instance.
batch_size: The number of entities to transfer per request.
secure: Use SSL when communicating with server.
email: If not none, the username to log in with.
passin: If True, the password will be read from standard in.
"""
self.app_id = app_id
self.host_port = host_port
self.host = host_port.split(':')[0]
if url_path and url_path[0] != '/':
url_path = '/' + url_path
self.url_path = url_path
self.kind = kind
self.throttle = throttle
self.batch_size = batch_size
self.secure = secure
self.authenticated = False
self.auth_called = False
self.parallel_download = True
self.email = email
self.passin = passin
self.mapper = None
self.dry_run = dry_run
if self.dry_run:
logger.info('Running in dry run mode, skipping remote_api setup')
return
logger.debug('Configuring remote_api. url_path = %s, '
'servername = %s' % (url_path, host_port))
def CookieHttpRpcServer(*args, **kwargs):
kwargs['save_cookies'] = True
kwargs['account_type'] = 'HOSTED_OR_GOOGLE'
return appengine_rpc.HttpRpcServer(*args, **kwargs)
remote_api_stub.ConfigureRemoteDatastore(
app_id,
url_path,
self.AuthFunction,
servername=host_port,
rpc_server_factory=CookieHttpRpcServer,
secure=self.secure)
remote_api_throttle.ThrottleRemoteDatastore(self.throttle)
logger.debug('Bulkloader using app_id: %s', os.environ['APPLICATION_ID'])
def Authenticate(self):
"""Invoke authentication if necessary."""
logger.info('Connecting to %s%s', self.host_port, self.url_path)
if self.dry_run:
self.authenticated = True
return
remote_api_stub.MaybeInvokeAuthentication()
self.authenticated = True
def AuthFunction(self,
raw_input_fn=raw_input,
password_input_fn=getpass.getpass):
"""Prompts the user for a username and password.
Caches the results the first time it is called and returns the
same result every subsequent time.
Args:
raw_input_fn: Used for dependency injection.
password_input_fn: Used for dependency injection.
Returns:
A pair of the username and password.
"""
if self.email:
email = self.email
else:
print 'Please enter login credentials for %s' % (
self.host)
email = raw_input_fn('Email: ')
if email:
password_prompt = 'Password for %s: ' % email
if self.passin:
password = raw_input_fn(password_prompt)
else:
password = password_input_fn(password_prompt)
else:
password = None
self.auth_called = True
return (email, password)
def EncodeContent(self, rows, loader=None):
"""Encodes row data to the wire format.
Args:
rows: A list of pairs of a line number and a list of column values.
loader: Used for dependency injection.
Returns:
A list of datastore.Entity instances.
Raises:
ConfigurationError: if no loader is defined for self.kind
"""
if not loader:
try:
loader = Loader.RegisteredLoader(self.kind)
except KeyError:
logger.error('No Loader defined for kind %s.' % self.kind)
raise ConfigurationError('No Loader defined for kind %s.' % self.kind)
entities = []
for line_number, values in rows:
key = loader.generate_key(line_number, values)
if isinstance(key, datastore.Key):
parent = key.parent()
key = key.name()
else:
parent = None
entity = loader.create_entity(values, key_name=key, parent=parent)
def ToEntity(entity):
if isinstance(entity, db.Model):
return entity._populate_entity()
else:
return entity
if isinstance(entity, list):
entities.extend(map(ToEntity, entity))
elif entity:
entities.append(ToEntity(entity))
return entities
def PostEntities(self, entities):
"""Posts Entity records to a remote endpoint over HTTP.
Args:
entities: A list of datastore entities.
"""
if self.dry_run:
return
datastore.Put(entities)
def _QueryForPbs(self, query):
"""Perform the given query and return a list of entity_pb's."""
try:
query_pb = query._ToPb(limit=self.batch_size)
result_pb = datastore_pb.QueryResult()
apiproxy_stub_map.MakeSyncCall('datastore_v3', 'RunQuery', query_pb,
result_pb)
next_pb = datastore_pb.NextRequest()
next_pb.set_count(self.batch_size)
next_pb.mutable_cursor().CopyFrom(result_pb.cursor())
result_pb = datastore_pb.QueryResult()
apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Next', next_pb, result_pb)
return result_pb.result_list()
except apiproxy_errors.ApplicationError, e:
raise datastore._ToDatastoreError(e)
def GetEntities(self, key_range_item, key_factory=datastore.Key):
"""Gets Entity records from a remote endpoint over HTTP.
Args:
key_range_item: Range of keys to get.
key_factory: Used for dependency injection.
Returns:
A DownloadResult instance.
Raises:
ConfigurationError: if no Exporter is defined for self.kind
"""
keys = []
entities = []
if self.parallel_download:
query = key_range_item.key_range.make_directed_datastore_query(self.kind)
try:
results = self._QueryForPbs(query)
except datastore_errors.NeedIndexError:
logger.info('%s: No descending index on __key__, '
'performing serial download', self.kind)
self.parallel_download = False
if not self.parallel_download:
key_range_item.key_range.direction = key_range_module.KeyRange.ASC
query = key_range_item.key_range.make_ascending_datastore_query(self.kind)
results = self._QueryForPbs(query)
size = len(results)
for entity in results:
key = key_factory()
key._Key__reference = entity.key()
entities.append(entity)
keys.append(key)
continued = (size == self.batch_size)
key_range_item.count = size
return DownloadResult(continued, key_range_item.key_range.direction,
keys, entities)
def GetMapper(self):
"""Returns a mapper for the registered kind.
Returns:
A Mapper instance.
Raises:
ConfigurationError: if no Mapper is defined for self.kind
"""
if not self.mapper:
try:
self.mapper = Mapper.RegisteredMapper(self.kind)
except KeyError:
logger.error('No Mapper defined for kind %s.' % self.kind)
raise ConfigurationError('No Mapper defined for kind %s.' % self.kind)
return self.mapper
def InterruptibleSleep(sleep_time):
"""Puts thread to sleep, checking this threads exit_flag twice a second.
Args:
sleep_time: Time to sleep.
"""
slept = 0.0
epsilon = .0001
thread = threading.currentThread()
while slept < sleep_time - epsilon:
remaining = sleep_time - slept
this_sleep_time = min(remaining, 0.5)
time.sleep(this_sleep_time)
slept += this_sleep_time
if thread.exit_flag:
return
class _ThreadBase(threading.Thread):
"""Provide some basic features for the threads used in the uploader.
This abstract base class is used to provide some common features:
* Flag to ask thread to exit as soon as possible.
* Record exit/error status for the primary thread to pick up.
* Capture exceptions and record them for pickup.
* Some basic logging of thread start/stop.
* All threads are "daemon" threads.
* Friendly names for presenting to users.
Concrete sub-classes must implement PerformWork().
Either self.NAME should be set or GetFriendlyName() be overridden to
return a human-friendly name for this thread.
The run() method starts the thread and prints start/exit messages.
self.exit_flag is intended to signal that this thread should exit
when it gets the chance. PerformWork() should check self.exit_flag
whenever it has the opportunity to exit gracefully.
"""
def __init__(self):
threading.Thread.__init__(self)
self.setDaemon(True)
self.exit_flag = False
self.error = None
self.traceback = None
def run(self):
"""Perform the work of the thread."""
logger.debug('[%s] %s: started', self.getName(), self.__class__.__name__)
try:
self.PerformWork()
except:
self.SetError()
logger.exception('[%s] %s:', self.getName(), self.__class__.__name__)
logger.debug('[%s] %s: exiting', self.getName(), self.__class__.__name__)
def SetError(self):
"""Sets the error and traceback information for this thread.
This must be called from an exception handler.
"""
if not self.error:
exc_info = sys.exc_info()
self.error = exc_info[1]
self.traceback = exc_info[2]
def PerformWork(self):
"""Perform the thread-specific work."""
raise NotImplementedError()
def CheckError(self):
"""If an error is present, then log it."""
if self.error:
logger.error('Error in %s: %s', self.GetFriendlyName(), self.error)
if self.traceback:
logger.debug(''.join(traceback.format_exception(self.error.__class__,
self.error,
self.traceback)))
def GetFriendlyName(self):
"""Returns a human-friendly description of the thread."""
if hasattr(self, 'NAME'):
return self.NAME
return 'unknown thread'
non_fatal_error_codes = set([errno.EAGAIN,
errno.ENETUNREACH,
errno.ENETRESET,
errno.ECONNRESET,
errno.ETIMEDOUT,
errno.EHOSTUNREACH])
def IsURLErrorFatal(error):
"""Returns False if the given URLError may be from a transient failure.
Args:
error: A urllib2.URLError instance.
"""
assert isinstance(error, urllib2.URLError)
if not hasattr(error, 'reason'):
return True
if not isinstance(error.reason[0], int):
return True
return error.reason[0] not in non_fatal_error_codes
class DataSourceThread(_ThreadBase):
"""A thread which reads WorkItems and pushes them into queue.
This thread will read/consume WorkItems from a generator (produced by
the generator factory). These WorkItems will then be pushed into the
thread_pool. Note that reading will block if/when the thread_pool becomes
full. Information on content consumed from the generator will be pushed
into the progress_queue.
"""
NAME = 'data source thread'
def __init__(self,
request_manager,
thread_pool,
progress_queue,
workitem_generator_factory,
progress_generator_factory):
"""Initialize the DataSourceThread instance.
Args:
request_manager: A RequestManager instance.
thread_pool: An AdaptiveThreadPool instance.
progress_queue: A queue used for tracking progress information.
workitem_generator_factory: A factory that creates a WorkItem generator
progress_generator_factory: A factory that creates a generator which
produces prior progress status, or None if there is no prior status
to use.
"""
_ThreadBase.__init__(self)
self.request_manager = request_manager
self.thread_pool = thread_pool
self.progress_queue = progress_queue
self.workitem_generator_factory = workitem_generator_factory
self.progress_generator_factory = progress_generator_factory
self.entity_count = 0
def PerformWork(self):
"""Performs the work of a DataSourceThread."""
if self.progress_generator_factory:
progress_gen = self.progress_generator_factory()
else:
progress_gen = None
content_gen = self.workitem_generator_factory(self.request_manager,
self.progress_queue,
progress_gen)
self.xfer_count = 0
self.read_count = 0
self.read_all = False
for item in content_gen.Batches():
item.MarkAsRead()
while not self.exit_flag:
try:
self.thread_pool.SubmitItem(item, block=True, timeout=1.0)
self.entity_count += item.count
break
except Queue.Full:
pass
if self.exit_flag:
break
if not self.exit_flag:
self.read_all = True
self.read_count = content_gen.row_count
self.xfer_count = content_gen.xfer_count
def _RunningInThread(thread):
"""Return True if we are running within the specified thread."""
return threading.currentThread().getName() == thread.getName()
class _Database(object):
"""Base class for database connections in this module.
The table is created by a primary thread (the python main thread)
but all future lookups and updates are performed by a secondary
thread.
"""
SIGNATURE_TABLE_NAME = 'bulkloader_database_signature'
def __init__(self,
db_filename,
create_table,
signature,
index=None,
commit_periodicity=100):
"""Initialize the _Database instance.
Args:
db_filename: The sqlite3 file to use for the database.
create_table: A string containing the SQL table creation command.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
index: An optional string to create an index for the database.
commit_periodicity: Number of operations between database commits.
"""
self.db_filename = db_filename
logger.info('Opening database: %s', db_filename)
self.primary_conn = sqlite3.connect(db_filename, isolation_level=None)
self.primary_thread = threading.currentThread()
self.secondary_conn = None
self.secondary_thread = None
self.operation_count = 0
self.commit_periodicity = commit_periodicity
try:
self.primary_conn.execute(create_table)
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
raise
if index:
try:
self.primary_conn.execute(index)
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
raise
self.existing_table = False
signature_cursor = self.primary_conn.cursor()
create_signature = """
create table %s (
value TEXT not null)
""" % _Database.SIGNATURE_TABLE_NAME
try:
self.primary_conn.execute(create_signature)
self.primary_conn.cursor().execute(
'insert into %s (value) values (?)' % _Database.SIGNATURE_TABLE_NAME,
(signature,))
except sqlite3.OperationalError, e:
if 'already exists' not in e.message:
logger.exception('Exception creating table:')
raise
else:
self.existing_table = True
signature_cursor.execute(
'select * from %s' % _Database.SIGNATURE_TABLE_NAME)
(result,) = signature_cursor.fetchone()
if result and result != signature:
logger.error('Database signature mismatch:\n\n'
'Found:\n'
'%s\n\n'
'Expecting:\n'
'%s\n',
result, signature)
raise ResumeError('Database signature mismatch: %s != %s' % (
signature, result))
def ThreadComplete(self):
"""Finalize any operations the secondary thread has performed.
The database aggregates lots of operations into a single commit, and
this method is used to commit any pending operations as the thread
is about to shut down.
"""
if self.secondary_conn:
self._MaybeCommit(force_commit=True)
def _MaybeCommit(self, force_commit=False):
"""Periodically commit changes into the SQLite database.
Committing every operation is quite expensive, and slows down the
operation of the script. Thus, we only commit after every N operations,
as determined by the self.commit_periodicity value. Optionally, the
caller can force a commit.
Args:
force_commit: Pass True in order for a commit to occur regardless
of the current operation count.
"""
self.operation_count += 1
if force_commit or (self.operation_count % self.commit_periodicity) == 0:
self.secondary_conn.commit()
def _OpenSecondaryConnection(self):
"""Possibly open a database connection for the secondary thread.
If the connection is not open (for the calling thread, which is assumed
to be the unique secondary thread), then open it. We also open a couple
cursors for later use (and reuse).
"""
if self.secondary_conn:
return
assert not _RunningInThread(self.primary_thread)
self.secondary_thread = threading.currentThread()
self.secondary_conn = sqlite3.connect(self.db_filename)
self.insert_cursor = self.secondary_conn.cursor()
self.update_cursor = self.secondary_conn.cursor()
zero_matcher = re.compile(r'\x00')
zero_one_matcher = re.compile(r'\x00\x01')
def KeyStr(key):
"""Returns a string to represent a key, preserving ordering.
Unlike datastore.Key.__str__(), we have the property:
key1 < key2 ==> KeyStr(key1) < KeyStr(key2)
The key string is constructed from the key path as follows:
(1) Strings are prepended with ':' and numeric id's are padded to
20 digits.
(2) Any null characters (u'\0') present are replaced with u'\0\1'
(3) The sequence u'\0\0' is used to separate each component of the path.
(1) assures that names and ids compare properly, while (2) and (3) enforce
the part-by-part comparison of pieces of the path.
Args:
key: A datastore.Key instance.
Returns:
A string representation of the key, which preserves ordering.
"""
assert isinstance(key, datastore.Key)
path = key.to_path()
out_path = []
for part in path:
if isinstance(part, (int, long)):
part = '%020d' % part
else:
part = ':%s' % part
out_path.append(zero_matcher.sub(u'\0\1', part))
out_str = u'\0\0'.join(out_path)
return out_str
def StrKey(key_str):
"""The inverse of the KeyStr function.
Args:
key_str: A string in the range of KeyStr.
Returns:
A datastore.Key instance k, such that KeyStr(k) == key_str.
"""
parts = key_str.split(u'\0\0')
for i in xrange(len(parts)):
if parts[i][0] == ':':
part = parts[i][1:]
part = zero_one_matcher.sub(u'\0', part)
parts[i] = part
else:
parts[i] = int(parts[i])
return datastore.Key.from_path(*parts)
class ResultDatabase(_Database):
"""Persistently record all the entities downloaded during an export.
The entities are held in the database by their unique datastore key
in order to avoid duplication if an export is restarted.
"""
def __init__(self, db_filename, signature, commit_periodicity=1):
"""Initialize a ResultDatabase object.
Args:
db_filename: The name of the SQLite database to use.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
commit_periodicity: How many operations to perform between commits.
"""
self.complete = False
create_table = ('create table result (\n'
'id BLOB primary key,\n'
'value BLOB not null)')
_Database.__init__(self,
db_filename,
create_table,
signature,
commit_periodicity=commit_periodicity)
if self.existing_table:
cursor = self.primary_conn.cursor()
cursor.execute('select count(*) from result')
self.existing_count = int(cursor.fetchone()[0])
else:
self.existing_count = 0
self.count = self.existing_count
def _StoreEntity(self, entity_id, entity):
"""Store an entity in the result database.
Args:
entity_id: A datastore.Key for the entity.
entity: The entity to store.
Returns:
True if this entities is not already present in the result database.
"""
assert _RunningInThread(self.secondary_thread)
assert isinstance(entity_id, datastore.Key), (
'expected a datastore.Key, got a %s' % entity_id.__class__.__name__)
key_str = buffer(KeyStr(entity_id).encode('utf-8'))
self.insert_cursor.execute(
'select count(*) from result where id = ?', (key_str,))
already_present = self.insert_cursor.fetchone()[0]
result = True
if already_present:
result = False
self.insert_cursor.execute('delete from result where id = ?',
(key_str,))
else:
self.count += 1
value = entity.Encode()
self.insert_cursor.execute(
'insert into result (id, value) values (?, ?)',
(key_str, buffer(value)))
return result
def StoreEntities(self, keys, entities):
"""Store a group of entities in the result database.
Args:
keys: A list of entity keys.
entities: A list of entities.
Returns:
The number of new entities stored in the result database.
"""
self._OpenSecondaryConnection()
t = time.time()
count = 0
for entity_id, entity in zip(keys,
entities):
if self._StoreEntity(entity_id, entity):
count += 1
logger.debug('%s insert: delta=%.3f',
self.db_filename,
time.time() - t)
logger.debug('Entities transferred total: %s', self.count)
self._MaybeCommit()
return count
def ResultsComplete(self):
"""Marks the result database as containing complete results."""
self.complete = True
def AllEntities(self):
"""Yields all pairs of (id, value) from the result table."""
conn = sqlite3.connect(self.db_filename, isolation_level=None)
cursor = conn.cursor()
cursor.execute(
'select id, value from result order by id')
for unused_entity_id, entity in cursor:
entity_proto = entity_pb.EntityProto(contents=entity)
yield datastore.Entity._FromPb(entity_proto)
class _ProgressDatabase(_Database):
"""Persistently record all progress information during an upload.
This class wraps a very simple SQLite database which records each of
the relevant details from a chunk of work. If the loader is
resumed, then data is replayed out of the database.
"""
def __init__(self,
db_filename,
sql_type,
py_type,
signature,
commit_periodicity=100):
"""Initialize the ProgressDatabase instance.
Args:
db_filename: The name of the SQLite database to use.
sql_type: A string of the SQL type to use for entity keys.
py_type: The python type of entity keys.
signature: A string identifying the important invocation options,
used to make sure we are not using an old database.
commit_periodicity: How many operations to perform between commits.
"""
self.prior_key_end = None
create_table = ('create table progress (\n'
'id integer primary key autoincrement,\n'
'state integer not null,\n'
'key_start %s,\n'
'key_end %s)'
% (sql_type, sql_type))
self.py_type = py_type
index = 'create index i_state on progress (state)'
_Database.__init__(self,
db_filename,
create_table,
signature,
index=index,
commit_periodicity=commit_periodicity)
def UseProgressData(self):
"""Returns True if the database has progress information.
Note there are two basic cases for progress information:
1) All saved records indicate a successful upload. In this case, we
need to skip everything transmitted so far and then send the rest.
2) Some records for incomplete transfer are present. These need to be
sent again, and then we resume sending after all the successful
data.
Returns:
True: if the database has progress information.
Raises:
ResumeError: if there is an error retrieving rows from the database.
"""
assert _RunningInThread(self.primary_thread)
cursor = self.primary_conn.cursor()
cursor.execute('select count(*) from progress')
row = cursor.fetchone()
if row is None:
raise ResumeError('Cannot retrieve progress information from database.')
return row[0] != 0
def StoreKeys(self, key_start, key_end):
"""Record a new progress record, returning a key for later updates.
The specified progress information will be persisted into the database.
A unique key will be returned that identifies this progress state. The
key is later used to (quickly) update this record.
For the progress resumption to proceed properly, calls to StoreKeys
MUST specify monotonically increasing key ranges. This will result in
a database whereby the ID, KEY_START, and KEY_END rows are all
increasing (rather than having ranges out of order).
NOTE: the above precondition is NOT tested by this method (since it
would imply an additional table read or two on each invocation).
Args:
key_start: The starting key of the WorkItem (inclusive)
key_end: The end key of the WorkItem (inclusive)
Returns:
A string to later be used as a unique key to update this state.
"""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
assert (not key_start) or isinstance(key_start, self.py_type), (
'%s is a %s, %s expected %s' % (key_start,
key_start.__class__,
self.__class__.__name__,
self.py_type))
assert (not key_end) or isinstance(key_end, self.py_type), (
'%s is a %s, %s expected %s' % (key_end,
key_end.__class__,
self.__class__.__name__,
self.py_type))
assert KeyLEQ(key_start, key_end), '%s not less than %s' % (
repr(key_start), repr(key_end))
self.insert_cursor.execute(
'insert into progress (state, key_start, key_end) values (?, ?, ?)',
(STATE_READ, unicode(key_start), unicode(key_end)))
progress_key = self.insert_cursor.lastrowid
self._MaybeCommit()
return progress_key
def UpdateState(self, key, new_state):
"""Update a specified progress record with new information.
Args:
key: The key for this progress record, returned from StoreKeys
new_state: The new state to associate with this progress record.
"""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
assert isinstance(new_state, int)
self.update_cursor.execute('update progress set state=? where id=?',
(new_state, key))
self._MaybeCommit()
def DeleteKey(self, progress_key):
"""Delete the entities with the given key from the result database."""
self._OpenSecondaryConnection()
assert _RunningInThread(self.secondary_thread)
t = time.time()
self.insert_cursor.execute(
'delete from progress where rowid = ?', (progress_key,))
logger.debug('delete: delta=%.3f', time.time() - t)
self._MaybeCommit()
def GetProgressStatusGenerator(self):
"""Get a generator which yields progress information.
The returned generator will yield a series of 4-tuples that specify
progress information about a prior run of the uploader. The 4-tuples
have the following values:
progress_key: The unique key to later update this record with new
progress information.
state: The last state saved for this progress record.
key_start: The starting key of the items for uploading (inclusive).
key_end: The ending key of the items for uploading (inclusive).
After all incompletely-transferred records are provided, then one
more 4-tuple will be generated:
None
DATA_CONSUMED_TO_HERE: A unique string value indicating this record
is being provided.
None
key_end: An integer value specifying the last data source key that
was handled by the previous run of the uploader.
The caller should begin uploading records which occur after key_end.
Yields:
Four-tuples of (progress_key, state, key_start, key_end)
"""
conn = sqlite3.connect(self.db_filename, isolation_level=None)
cursor = conn.cursor()
cursor.execute('select max(key_end) from progress')
result = cursor.fetchone()
if result is not None:
key_end = result[0]
else:
logger.debug('No rows in progress database.')
return
self.prior_key_end = key_end
cursor.execute(
'select id, state, key_start, key_end from progress'
' where state != ?'
' order by id',
(STATE_SENT,))
rows = cursor.fetchall()
for row in rows:
if row is None:
break
progress_key, state, key_start, key_end = row
yield progress_key, state, key_start, key_end
yield None, DATA_CONSUMED_TO_HERE, None, key_end
def ProgressDatabase(db_filename, signature):
"""Returns a database to store upload progress information."""
return _ProgressDatabase(db_filename, 'INTEGER', int, signature)
class ExportProgressDatabase(_ProgressDatabase):
"""A database to store download progress information."""
def __init__(self, db_filename, signature):
"""Initialize an ExportProgressDatabase."""
_ProgressDatabase.__init__(self,
db_filename,
'TEXT',
datastore.Key,
signature,
commit_periodicity=1)
def UseProgressData(self):
"""Check if the progress database contains progress data.
Returns:
True: if the database contains progress data.
"""
return self.existing_table
class StubProgressDatabase(object):
"""A stub implementation of ProgressDatabase which does nothing."""
def UseProgressData(self):
"""Whether the stub database has progress information (it doesn't)."""
return False
def StoreKeys(self, unused_key_start, unused_key_end):
"""Pretend to store a key in the stub database."""
return 'fake-key'
def UpdateState(self, unused_key, unused_new_state):
"""Pretend to update the state of a progress item."""
pass
def ThreadComplete(self):
"""Finalize operations on the stub database (i.e. do nothing)."""
pass
class _ProgressThreadBase(_ThreadBase):
"""A thread which records progress information for the upload process.
The progress information is stored into the provided progress database.
This class is not responsible for replaying a prior run's progress
information out of the database. Separate mechanisms must be used to
resume a prior upload attempt.
"""
NAME = 'progress tracking thread'
def __init__(self, progress_queue, progress_db):
"""Initialize the ProgressTrackerThread instance.
Args:
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ThreadBase.__init__(self)
self.progress_queue = progress_queue
self.db = progress_db
self.entities_transferred = 0
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.entities_transferred
def UpdateProgress(self, item):
"""Updates the progress information for the given item.
Args:
item: A work item whose new state will be recorded
"""
raise NotImplementedError()
def WorkFinished(self):
"""Performs final actions after the entity transfer is complete."""
raise NotImplementedError()
def PerformWork(self):
"""Performs the work of a ProgressTrackerThread."""
while not self.exit_flag:
try:
item = self.progress_queue.get(block=True, timeout=1.0)
except Queue.Empty:
continue
if item == _THREAD_SHOULD_EXIT:
break
if item.state == STATE_READ and item.progress_key is None:
item.progress_key = self.db.StoreKeys(item.key_start, item.key_end)
else:
assert item.progress_key is not None
self.UpdateProgress(item)
item.progress_event.set()
self.progress_queue.task_done()
self.db.ThreadComplete()
class ProgressTrackerThread(_ProgressThreadBase):
"""A thread which records progress information for the upload process.
The progress information is stored into the provided progress database.
This class is not responsible for replaying a prior run's progress
information out of the database. Separate mechanisms must be used to
resume a prior upload attempt.
"""
NAME = 'progress tracking thread'
def __init__(self, progress_queue, progress_db):
"""Initialize the ProgressTrackerThread instance.
Args:
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
def UpdateProgress(self, item):
"""Update the state of the given WorkItem.
Args:
item: A WorkItem instance.
"""
self.db.UpdateState(item.progress_key, item.state)
if item.state == STATE_SENT:
self.entities_transferred += item.count
def WorkFinished(self):
"""Performs final actions after the entity transfer is complete."""
pass
class ExportProgressThread(_ProgressThreadBase):
"""A thread to record progress information and write record data for exports.
The progress information is stored into a provided progress database.
Exported results are stored in the result database and dumped to an output
file at the end of the download.
"""
def __init__(self, kind, progress_queue, progress_db, result_db):
"""Initialize the ExportProgressThread instance.
Args:
kind: The kind of entities being stored in the database.
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
result_db: The database for holding exported entities; should be an
instance of ResultDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
self.kind = kind
self.existing_count = result_db.existing_count
self.result_db = result_db
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.result_db.count
def WorkFinished(self):
"""Write the contents of the result database."""
exporter = Exporter.RegisteredExporter(self.kind)
exporter.output_entities(self.result_db.AllEntities())
def UpdateProgress(self, item):
"""Update the state of the given KeyRangeItem.
Args:
item: A KeyRange instance.
"""
if item.state == STATE_GOT:
count = self.result_db.StoreEntities(item.download_result.keys,
item.download_result.entities)
self.db.DeleteKey(item.progress_key)
self.entities_transferred += count
else:
self.db.UpdateState(item.progress_key, item.state)
class MapperProgressThread(_ProgressThreadBase):
"""A thread to record progress information for maps over the datastore."""
def __init__(self, kind, progress_queue, progress_db):
"""Initialize the MapperProgressThread instance.
Args:
kind: The kind of entities being stored in the database.
progress_queue: A Queue used for tracking progress information.
progress_db: The database for tracking progress information; should
be an instance of ProgressDatabase.
"""
_ProgressThreadBase.__init__(self, progress_queue, progress_db)
self.kind = kind
self.mapper = Mapper.RegisteredMapper(self.kind)
def EntitiesTransferred(self):
"""Return the total number of unique entities transferred."""
return self.entities_transferred
def WorkFinished(self):
"""Perform actions after map is complete."""
pass
def UpdateProgress(self, item):
"""Update the state of the given KeyRangeItem.
Args:
item: A KeyRange instance.
"""
if item.state == STATE_GOT:
self.entities_transferred += item.count
self.db.DeleteKey(item.progress_key)
else:
self.db.UpdateState(item.progress_key, item.state)
def ParseKey(key_string):
"""Turn a key stored in the database into a Key or None.
Args:
key_string: The string representation of a Key.
Returns:
A datastore.Key instance or None
"""
if not key_string:
return None
if key_string == 'None':
return None
return datastore.Key(encoded=key_string)
def Validate(value, typ):
"""Checks that value is non-empty and of the right type.
Args:
value: any value
typ: a type or tuple of types
Raises:
ValueError: if value is None or empty.
TypeError: if it's not the given type.
"""
if not value:
raise ValueError('Value should not be empty; received %s.' % value)
elif not isinstance(value, typ):
raise TypeError('Expected a %s, but received %s (a %s).' %
(typ, value, value.__class__))
def CheckFile(filename):
"""Check that the given file exists and can be opened for reading.
Args:
filename: The name of the file.
Raises:
FileNotFoundError: if the given filename is not found
FileNotReadableError: if the given filename is not readable.
"""
if not os.path.exists(filename):
raise FileNotFoundError('%s: file not found' % filename)
elif not os.access(filename, os.R_OK):
raise FileNotReadableError('%s: file not readable' % filename)
class Loader(object):
"""A base class for creating datastore entities from input data.
To add a handler for bulk loading a new entity kind into your datastore,
write a subclass of this class that calls Loader.__init__ from your
class's __init__.
If you need to run extra code to convert entities from the input
data, create new properties, or otherwise modify the entities before
they're inserted, override handle_entity.
See the create_entity method for the creation of entities from the
(parsed) input data.
"""
__loaders = {}
kind = None
__properties = None
def __init__(self, kind, properties):
"""Constructor.
Populates this Loader's kind and properties map.
Args:
kind: a string containing the entity kind that this loader handles
properties: list of (name, converter) tuples.
This is used to automatically convert the input columns into
properties. The converter should be a function that takes one
argument, a string value from the input file, and returns a
correctly typed property value that should be inserted. The
tuples in this list should match the columns in your input file,
in order.
For example:
[('name', str),
('id_number', int),
('email', datastore_types.Email),
('user', users.User),
('birthdate', lambda x: datetime.datetime.fromtimestamp(float(x))),
('description', datastore_types.Text),
]
"""
Validate(kind, (basestring, tuple))
self.kind = kind
self.__openfile = open
self.__create_csv_reader = csv.reader
GetImplementationClass(kind)
Validate(properties, list)
for name, fn in properties:
Validate(name, basestring)
assert callable(fn), (
'Conversion function %s for property %s is not callable.' % (fn, name))
self.__properties = properties
@staticmethod
def RegisterLoader(loader):
"""Register loader and the Loader instance for its kind.
Args:
loader: A Loader instance.
"""
Loader.__loaders[loader.kind] = loader
def alias_old_names(self):
"""Aliases method names so that Loaders defined with old names work."""
aliases = (
('CreateEntity', 'create_entity'),
('HandleEntity', 'handle_entity'),
('GenerateKey', 'generate_key'),
)
for old_name, new_name in aliases:
setattr(Loader, old_name, getattr(Loader, new_name))
if hasattr(self.__class__, old_name) and not (
getattr(self.__class__, old_name).im_func ==
getattr(Loader, new_name).im_func):
if hasattr(self.__class__, new_name) and not (
getattr(self.__class__, new_name).im_func ==
getattr(Loader, new_name).im_func):
raise NameClashError(old_name, new_name, self.__class__)
setattr(self, new_name, getattr(self, old_name))
def create_entity(self, values, key_name=None, parent=None):
"""Creates a entity from a list of property values.
Args:
values: list/tuple of str
key_name: if provided, the name for the (single) resulting entity
parent: A datastore.Key instance for the parent, or None
Returns:
list of db.Model
The returned entities are populated with the property values from the
argument, converted to native types using the properties map given in
the constructor, and passed through handle_entity. They're ready to be
inserted.
Raises:
AssertionError: if the number of values doesn't match the number
of properties in the properties map.
ValueError: if any element of values is None or empty.
TypeError: if values is not a list or tuple.
"""
Validate(values, (list, tuple))
assert len(values) == len(self.__properties), (
'Expected %d columns, found %d.' %
(len(self.__properties), len(values)))
model_class = GetImplementationClass(self.kind)
properties = {
'key_name': key_name,
'parent': parent,
}
for (name, converter), val in zip(self.__properties, values):
if converter is bool and val.lower() in ('0', 'false', 'no'):
val = False
properties[name] = converter(val)
entity = model_class(**properties)
entities = self.handle_entity(entity)
if entities:
if not isinstance(entities, (list, tuple)):
entities = [entities]
for entity in entities:
if not isinstance(entity, db.Model):
raise TypeError('Expected a db.Model, received %s (a %s).' %
(entity, entity.__class__))
return entities
def generate_key(self, i, values):
"""Generates a key_name to be used in creating the underlying object.
The default implementation returns None.
This method can be overridden to control the key generation for
uploaded entities. The value returned should be None (to use a
server generated numeric key), or a string which neither starts
with a digit nor has the form __*__ (see
http://code.google.com/appengine/docs/python/datastore/keysandentitygroups.html),
or a datastore.Key instance.
If you generate your own string keys, keep in mind:
1. The key name for each entity must be unique.
2. If an entity of the same kind and key already exists in the
datastore, it will be overwritten.
Args:
i: Number corresponding to this object (assume it's run in a loop,
this is your current count.
values: list/tuple of str.
Returns:
A string to be used as the key_name for an entity.
"""
return None
def handle_entity(self, entity):
"""Subclasses can override this to add custom entity conversion code.
This is called for each entity, after its properties are populated
from the input but before it is stored. Subclasses can override
this to add custom entity handling code.
The entity to be inserted should be returned. If multiple entities
should be inserted, return a list of entities. If no entities
should be inserted, return None or [].
Args:
entity: db.Model
Returns:
db.Model or list of db.Model
"""
return entity
def initialize(self, filename, loader_opts):
"""Performs initialization and validation of the input file.
This implementation checks that the input file exists and can be
opened for reading.
Args:
filename: The string given as the --filename flag argument.
loader_opts: The string given as the --loader_opts flag argument.
"""
CheckFile(filename)
def finalize(self):
"""Performs finalization actions after the upload completes."""
pass
def generate_records(self, filename):
"""Subclasses can override this to add custom data input code.
This method must yield fixed-length lists of strings.
The default implementation uses csv.reader to read CSV rows
from filename.
Args:
filename: The string input for the --filename option.
Yields:
Lists of strings.
"""
csv_generator = CSVGenerator(filename, openfile=self.__openfile,
create_csv_reader=self.__create_csv_reader
).Records()
return csv_generator
@staticmethod
def RegisteredLoaders():
"""Returns a dict of the Loader instances that have been created."""
return dict(Loader.__loaders)
@staticmethod
def RegisteredLoader(kind):
"""Returns the loader instance for the given kind if it exists."""
return Loader.__loaders[kind]
class RestoreThread(_ThreadBase):
"""A thread to read saved entity_pbs from sqlite3."""
NAME = 'RestoreThread'
_ENTITIES_DONE = 'Entities Done'
def __init__(self, queue, filename):
_ThreadBase.__init__(self)
self.queue = queue
self.filename = filename
def PerformWork(self):
db_conn = sqlite3.connect(self.filename)
cursor = db_conn.cursor()
cursor.execute('select id, value from result')
for entity_id, value in cursor:
self.queue.put([entity_id, value], block=True)
self.queue.put(RestoreThread._ENTITIES_DONE, block=True)
class RestoreLoader(Loader):
"""A Loader which imports protobuffers from a file."""
def __init__(self, kind, app_id):
self.kind = kind
self.app_id = app_id
def initialize(self, filename, loader_opts):
CheckFile(filename)
self.queue = Queue.Queue(1000)
restore_thread = RestoreThread(self.queue, filename)
restore_thread.start()
def generate_records(self, filename):
while True:
record = self.queue.get(block=True)
if id(record) == id(RestoreThread._ENTITIES_DONE):
break
yield record
def create_entity(self, values, key_name=None, parent=None):
def convert_key(key, app_id):
path = key.to_path()
kwargs = {'_app_id_namespace': app_id}
return db.Key.from_path(*path,**kwargs)
import copy
key = StrKey(unicode(values[0], 'utf-8'))
entity_proto = entity_pb.EntityProto(contents=str(values[1]))
entity_proto.mutable_key().CopyFrom(key._Key__reference)
entity = datastore.Entity._FromPb(entity_proto)
new_entity = copy.copy(entity)
for k,v in entity.iteritems():
if isinstance(v, db.Key):
new_entity[k] = convert_key(v, self.app_id)
if isinstance(v, list):
new_list = []
for item in v:
if isinstance(item, db.Key):
new_list.append(convert_key(item, self.app_id))
else:
new_list.append(item)
new_entity[k] = new_list
return new_entity
class Exporter(object):
"""A base class for serializing datastore entities.
To add a handler for exporting an entity kind from your datastore,
write a subclass of this class that calls Exporter.__init__ from your
class's __init__.
If you need to run extra code to convert entities from the input
data, create new properties, or otherwise modify the entities before
they're inserted, override handle_entity.
See the output_entities method for the writing of data from entities.
"""
__exporters = {}
kind = None
__properties = None
def __init__(self, kind, properties):
"""Constructor.
Populates this Exporters's kind and properties map.
Args:
kind: a string containing the entity kind that this exporter handles
properties: list of (name, converter, default) tuples.
This is used to automatically convert the entities to strings.
The converter should be a function that takes one argument, a property
value of the appropriate type, and returns a str or unicode. The default
is a string to be used if the property is not present, or None to fail
with an error if the property is missing.
For example:
[('name', str, None),
('id_number', str, None),
('email', str, ''),
('user', str, None),
('birthdate',
lambda x: str(datetime.datetime.fromtimestamp(float(x))),
None),
('description', str, ''),
]
"""
Validate(kind, basestring)
self.kind = kind
GetImplementationClass(kind)
Validate(properties, list)
for name, fn, default in properties:
Validate(name, basestring)
assert callable(fn), (
'Conversion function %s for property %s is not callable.' % (
fn, name))
if default:
Validate(default, basestring)
self.__properties = properties
@staticmethod
def RegisterExporter(exporter):
"""Register exporter and the Exporter instance for its kind.
Args:
exporter: A Exporter instance.
"""
Exporter.__exporters[exporter.kind] = exporter
def __ExtractProperties(self, entity):
"""Converts an entity into a list of string values.
Args:
entity: An entity to extract the properties from.
Returns:
A list of the properties of the entity.
Raises:
MissingPropertyError: if an expected field on the entity is missing.
"""
encoding = []
for name, fn, default in self.__properties:
try:
encoding.append(fn(entity[name]))
except AttributeError:
if default is None:
raise MissingPropertyError(name)
else:
encoding.append(default)
return encoding
def __EncodeEntity(self, entity):
"""Convert the given entity into CSV string.
Args:
entity: The entity to encode.
Returns:
A CSV string.
"""
output = StringIO.StringIO()
writer = csv.writer(output, lineterminator='')
writer.writerow(self.__ExtractProperties(entity))
return output.getvalue()
def __SerializeEntity(self, entity):
"""Creates a string representation of an entity.
Args:
entity: The entity to serialize.
Returns:
A serialized representation of an entity.
"""
encoding = self.__EncodeEntity(entity)
if not isinstance(encoding, unicode):
encoding = unicode(encoding, 'utf-8')
encoding = encoding.encode('utf-8')
return encoding
def output_entities(self, entity_generator):
"""Outputs the downloaded entities.
This implementation writes CSV.
Args:
entity_generator: A generator that yields the downloaded entities
in key order.
"""
CheckOutputFile(self.output_filename)
output_file = open(self.output_filename, 'w')
logger.debug('Export complete, writing to file')
output_file.writelines(self.__SerializeEntity(entity) + '\n'
for entity in entity_generator)
def initialize(self, filename, exporter_opts):
"""Performs initialization and validation of the output file.
This implementation checks that the input file exists and can be
opened for writing.
Args:
filename: The string given as the --filename flag argument.
exporter_opts: The string given as the --exporter_opts flag argument.
"""
CheckOutputFile(filename)
self.output_filename = filename
def finalize(self):
"""Performs finalization actions after the download completes."""
pass
@staticmethod
def RegisteredExporters():
"""Returns a dictionary of the exporter instances that have been created."""
return dict(Exporter.__exporters)
@staticmethod
def RegisteredExporter(kind):
"""Returns an exporter instance for the given kind if it exists."""
return Exporter.__exporters[kind]
class DumpExporter(Exporter):
"""An exporter which dumps protobuffers to a file."""
def __init__(self, kind, result_db_filename):
self.kind = kind
self.result_db_filename = result_db_filename
def output_entities(self, entity_generator):
shutil.copyfile(self.result_db_filename, self.output_filename)
class MapperRetry(Error):
"""An exception that indicates a non-fatal error during mapping."""
class Mapper(object):
"""A base class for serializing datastore entities.
To add a handler for exporting an entity kind from your datastore,
write a subclass of this class that calls Mapper.__init__ from your
class's __init__.
You need to implement to batch_apply or apply method on your subclass
for the map to do anything.
"""
__mappers = {}
kind = None
def __init__(self, kind):
"""Constructor.
Populates this Mappers's kind.
Args:
kind: a string containing the entity kind that this mapper handles
"""
Validate(kind, basestring)
self.kind = kind
GetImplementationClass(kind)
@staticmethod
def RegisterMapper(mapper):
"""Register mapper and the Mapper instance for its kind.
Args:
mapper: A Mapper instance.
"""
Mapper.__mappers[mapper.kind] = mapper
def initialize(self, mapper_opts):
"""Performs initialization.
Args:
mapper_opts: The string given as the --mapper_opts flag argument.
"""
pass
def finalize(self):
"""Performs finalization actions after the download completes."""
pass
def apply(self, entity):
print 'Default map function doing nothing to %s' % entity
def batch_apply(self, entities):
for entity in entities:
self.apply(entity)
@staticmethod
def RegisteredMappers():
"""Returns a dictionary of the mapper instances that have been created."""
return dict(Mapper.__mappers)
@staticmethod
def RegisteredMapper(kind):
"""Returns an mapper instance for the given kind if it exists."""
return Mapper.__mappers[kind]
class QueueJoinThread(threading.Thread):
"""A thread that joins a queue and exits.
Queue joins do not have a timeout. To simulate a queue join with
timeout, run this thread and join it with a timeout.
"""
def __init__(self, queue):
"""Initialize a QueueJoinThread.
Args:
queue: The queue for this thread to join.
"""
threading.Thread.__init__(self)
assert isinstance(queue, (Queue.Queue, ReQueue))
self.queue = queue
def run(self):
"""Perform the queue join in this thread."""
self.queue.join()
def InterruptibleQueueJoin(queue,
thread_local,
thread_pool,
queue_join_thread_factory=QueueJoinThread,
check_workers=True):
"""Repeatedly joins the given ReQueue or Queue.Queue with short timeout.
Between each timeout on the join, worker threads are checked.
Args:
queue: A Queue.Queue or ReQueue instance.
thread_local: A threading.local instance which indicates interrupts.
thread_pool: An AdaptiveThreadPool instance.
queue_join_thread_factory: Used for dependency injection.
check_workers: Whether to interrupt the join on worker death.
Returns:
True unless the queue join is interrupted by SIGINT or worker death.
"""
thread = queue_join_thread_factory(queue)
thread.start()
while True:
thread.join(timeout=.5)
if not thread.isAlive():
return True
if thread_local.shut_down:
logger.debug('Queue join interrupted')
return False
if check_workers:
for worker_thread in thread_pool.Threads():
if not worker_thread.isAlive():
return False
def ShutdownThreads(data_source_thread, thread_pool):
"""Shuts down the worker and data source threads.
Args:
data_source_thread: A running DataSourceThread instance.
thread_pool: An AdaptiveThreadPool instance with workers registered.
"""
logger.info('An error occurred. Shutting down...')
data_source_thread.exit_flag = True
thread_pool.Shutdown()
data_source_thread.join(timeout=3.0)
if data_source_thread.isAlive():
logger.warn('%s hung while trying to exit',
data_source_thread.GetFriendlyName())
class BulkTransporterApp(object):
"""Class to wrap bulk transport application functionality."""
def __init__(self,
arg_dict,
input_generator_factory,
throttle,
progress_db,
progresstrackerthread_factory,
max_queue_size=DEFAULT_QUEUE_SIZE,
request_manager_factory=RequestManager,
datasourcethread_factory=DataSourceThread,
progress_queue_factory=Queue.Queue,
thread_pool_factory=adaptive_thread_pool.AdaptiveThreadPool):
"""Instantiate a BulkTransporterApp.
Uploads or downloads data to or from application using HTTP requests.
When run, the class will spin up a number of threads to read entities
from the data source, pass those to a number of worker threads
for sending to the application, and track all of the progress in a
small database in case an error or pause/termination requires a
restart/resumption of the upload process.
Args:
arg_dict: Dictionary of command line options.
input_generator_factory: A factory that creates a WorkItem generator.
throttle: A Throttle instance.
progress_db: The database to use for replaying/recording progress.
progresstrackerthread_factory: Used for dependency injection.
max_queue_size: Maximum size of the queues before they should block.
request_manager_factory: Used for dependency injection.
datasourcethread_factory: Used for dependency injection.
progress_queue_factory: Used for dependency injection.
thread_pool_factory: Used for dependency injection.
"""
self.app_id = arg_dict['app_id']
self.post_url = arg_dict['url']
self.kind = arg_dict['kind']
self.batch_size = arg_dict['batch_size']
self.input_generator_factory = input_generator_factory
self.num_threads = arg_dict['num_threads']
self.email = arg_dict['email']
self.passin = arg_dict['passin']
self.dry_run = arg_dict['dry_run']
self.throttle = throttle
self.progress_db = progress_db
self.progresstrackerthread_factory = progresstrackerthread_factory
self.max_queue_size = max_queue_size
self.request_manager_factory = request_manager_factory
self.datasourcethread_factory = datasourcethread_factory
self.progress_queue_factory = progress_queue_factory
self.thread_pool_factory = thread_pool_factory
(scheme,
self.host_port, self.url_path,
unused_query, unused_fragment) = urlparse.urlsplit(self.post_url)
self.secure = (scheme == 'https')
def Run(self):
"""Perform the work of the BulkTransporterApp.
Raises:
AuthenticationError: If authentication is required and fails.
Returns:
Error code suitable for sys.exit, e.g. 0 on success, 1 on failure.
"""
self.error = False
thread_pool = self.thread_pool_factory(
self.num_threads, queue_size=self.max_queue_size)
self.throttle.Register(threading.currentThread())
threading.currentThread().exit_flag = False
progress_queue = self.progress_queue_factory(self.max_queue_size)
request_manager = self.request_manager_factory(self.app_id,
self.host_port,
self.url_path,
self.kind,
self.throttle,
self.batch_size,
self.secure,
self.email,
self.passin,
self.dry_run)
try:
request_manager.Authenticate()
except Exception, e:
self.error = True
if not isinstance(e, urllib2.HTTPError) or (
e.code != 302 and e.code != 401):
logger.exception('Exception during authentication')
raise AuthenticationError()
if (request_manager.auth_called and
not request_manager.authenticated):
self.error = True
raise AuthenticationError('Authentication failed')
for thread in thread_pool.Threads():
self.throttle.Register(thread)
self.progress_thread = self.progresstrackerthread_factory(
progress_queue, self.progress_db)
if self.progress_db.UseProgressData():
logger.debug('Restarting upload using progress database')
progress_generator_factory = self.progress_db.GetProgressStatusGenerator
else:
progress_generator_factory = None
self.data_source_thread = (
self.datasourcethread_factory(request_manager,
thread_pool,
progress_queue,
self.input_generator_factory,
progress_generator_factory))
thread_local = threading.local()
thread_local.shut_down = False
def Interrupt(unused_signum, unused_frame):
"""Shutdown gracefully in response to a signal."""
thread_local.shut_down = True
self.error = True
signal.signal(signal.SIGINT, Interrupt)
self.progress_thread.start()
self.data_source_thread.start()
while not thread_local.shut_down:
self.data_source_thread.join(timeout=0.25)
if self.data_source_thread.isAlive():
for thread in list(thread_pool.Threads()) + [self.progress_thread]:
if not thread.isAlive():
logger.info('Unexpected thread death: %s', thread.getName())
thread_local.shut_down = True
self.error = True
break
else:
break
def _Join(ob, msg):
logger.debug('Waiting for %s...', msg)
if isinstance(ob, threading.Thread):
ob.join(timeout=3.0)
if ob.isAlive():
logger.debug('Joining %s failed', ob)
else:
logger.debug('... done.')
elif isinstance(ob, (Queue.Queue, ReQueue)):
if not InterruptibleQueueJoin(ob, thread_local, thread_pool):
ShutdownThreads(self.data_source_thread, thread_pool)
else:
ob.join()
logger.debug('... done.')
if self.data_source_thread.error or thread_local.shut_down:
ShutdownThreads(self.data_source_thread, thread_pool)
else:
_Join(thread_pool.requeue, 'worker threads to finish')
thread_pool.Shutdown()
thread_pool.JoinThreads()
thread_pool.CheckErrors()
print ''
if self.progress_thread.isAlive():
InterruptibleQueueJoin(progress_queue, thread_local, thread_pool,
check_workers=False)
else:
logger.warn('Progress thread exited prematurely')
progress_queue.put(_THREAD_SHOULD_EXIT)
_Join(self.progress_thread, 'progress_thread to terminate')
self.progress_thread.CheckError()
if not thread_local.shut_down:
self.progress_thread.WorkFinished()
self.data_source_thread.CheckError()
return self.ReportStatus()
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
raise NotImplementedError()
class BulkUploaderApp(BulkTransporterApp):
"""Class to encapsulate bulk uploader functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_up, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_UP)
s_total_up, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_UP)
total_up += s_total_up
total = total_up
logger.info('%d entites total, %d previously transferred',
self.data_source_thread.read_count,
self.data_source_thread.xfer_count)
transfer_count = self.progress_thread.EntitiesTransferred()
logger.info('%d entities (%d bytes) transferred in %.1f seconds',
transfer_count, total, duration)
if (self.data_source_thread.read_all and
transfer_count +
self.data_source_thread.xfer_count >=
self.data_source_thread.read_count):
logger.info('All entities successfully transferred')
return 0
else:
logger.info('Some entities not successfully transferred')
return 1
class BulkDownloaderApp(BulkTransporterApp):
"""Class to encapsulate bulk downloader functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_down, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_DOWN)
s_total_down, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_DOWN)
total_down += s_total_down
total = total_down
existing_count = self.progress_thread.existing_count
xfer_count = self.progress_thread.EntitiesTransferred()
logger.info('Have %d entities, %d previously transferred',
xfer_count, existing_count)
logger.info('%d entities (%d bytes) transferred in %.1f seconds',
xfer_count, total, duration)
if self.error:
return 1
else:
return 0
class BulkMapperApp(BulkTransporterApp):
"""Class to encapsulate bulk map functionality."""
def __init__(self, *args, **kwargs):
BulkTransporterApp.__init__(self, *args, **kwargs)
def ReportStatus(self):
"""Display a message reporting the final status of the transfer."""
total_down, duration = self.throttle.TotalTransferred(
remote_api_throttle.BANDWIDTH_DOWN)
s_total_down, unused_duration = self.throttle.TotalTransferred(
remote_api_throttle.HTTPS_BANDWIDTH_DOWN)
total_down += s_total_down
total = total_down
xfer_count = self.progress_thread.EntitiesTransferred()
logger.info('The following may be inaccurate if any mapper tasks '
'encountered errors and had to be retried.')
logger.info('Applied mapper to %s entities.',
xfer_count)
logger.info('%s entities (%s bytes) transferred in %.1f seconds',
xfer_count, total, duration)
if self.error:
return 1
else:
return 0
def PrintUsageExit(code):
"""Prints usage information and exits with a status code.
Args:
code: Status code to pass to sys.exit() after displaying usage information.
"""
print __doc__ % {'arg0': sys.argv[0]}
sys.stdout.flush()
sys.stderr.flush()
sys.exit(code)
REQUIRED_OPTION = object()
FLAG_SPEC = ['debug',
'help',
'url=',
'filename=',
'batch_size=',
'kind=',
'num_threads=',
'bandwidth_limit=',
'rps_limit=',
'http_limit=',
'db_filename=',
'app_id=',
'config_file=',
'has_header',
'csv_has_header',
'auth_domain=',
'result_db_filename=',
'download',
'loader_opts=',
'exporter_opts=',
'log_file=',
'mapper_opts=',
'email=',
'passin',
'map',
'dry_run',
'dump',
'restore',
]
def ParseArguments(argv, die_fn=lambda: PrintUsageExit(1)):
"""Parses command-line arguments.
Prints out a help message if -h or --help is supplied.
Args:
argv: List of command-line arguments.
die_fn: Function to invoke to end the program.
Returns:
A dictionary containing the value of command-line options.
"""
opts, unused_args = getopt.getopt(
argv[1:],
'h',
FLAG_SPEC)
arg_dict = {}
arg_dict['url'] = REQUIRED_OPTION
arg_dict['filename'] = None
arg_dict['config_file'] = None
arg_dict['kind'] = None
arg_dict['batch_size'] = None
arg_dict['num_threads'] = DEFAULT_THREAD_COUNT
arg_dict['bandwidth_limit'] = DEFAULT_BANDWIDTH_LIMIT
arg_dict['rps_limit'] = DEFAULT_RPS_LIMIT
arg_dict['http_limit'] = DEFAULT_REQUEST_LIMIT
arg_dict['db_filename'] = None
arg_dict['app_id'] = ''
arg_dict['auth_domain'] = 'gmail.com'
arg_dict['has_header'] = False
arg_dict['result_db_filename'] = None
arg_dict['download'] = False
arg_dict['loader_opts'] = None
arg_dict['exporter_opts'] = None
arg_dict['debug'] = False
arg_dict['log_file'] = None
arg_dict['email'] = None
arg_dict['passin'] = False
arg_dict['mapper_opts'] = None
arg_dict['map'] = False
arg_dict['dry_run'] = False
arg_dict['dump'] = False
arg_dict['restore'] = False
def ExpandFilename(filename):
"""Expand shell variables and ~usernames in filename."""
return os.path.expandvars(os.path.expanduser(filename))
for option, value in opts:
if option == '--debug':
arg_dict['debug'] = True
elif option in ('-h', '--help'):
PrintUsageExit(0)
elif option == '--url':
arg_dict['url'] = value
elif option == '--filename':
arg_dict['filename'] = ExpandFilename(value)
elif option == '--batch_size':
arg_dict['batch_size'] = int(value)
elif option == '--kind':
arg_dict['kind'] = value
elif option == '--num_threads':
arg_dict['num_threads'] = int(value)
elif option == '--bandwidth_limit':
arg_dict['bandwidth_limit'] = int(value)
elif option == '--rps_limit':
arg_dict['rps_limit'] = int(value)
elif option == '--http_limit':
arg_dict['http_limit'] = int(value)
elif option == '--db_filename':
arg_dict['db_filename'] = ExpandFilename(value)
elif option == '--app_id':
arg_dict['app_id'] = value
elif option == '--config_file':
arg_dict['config_file'] = ExpandFilename(value)
elif option == '--auth_domain':
arg_dict['auth_domain'] = value
elif option == '--has_header':
arg_dict['has_header'] = True
elif option == '--csv_has_header':
print >>sys.stderr, ('--csv_has_header is deprecated, please use '
'--has_header.')
arg_dict['has_header'] = True
elif option == '--result_db_filename':
arg_dict['result_db_filename'] = ExpandFilename(value)
elif option == '--download':
arg_dict['download'] = True
elif option == '--loader_opts':
arg_dict['loader_opts'] = value
elif option == '--exporter_opts':
arg_dict['exporter_opts'] = value
elif option == '--log_file':
arg_dict['log_file'] = ExpandFilename(value)
elif option == '--email':
arg_dict['email'] = value
elif option == '--passin':
arg_dict['passin'] = True
elif option == '--map':
arg_dict['map'] = True
elif option == '--mapper_opts':
arg_dict['mapper_opts'] = value
elif option == '--dry_run':
arg_dict['dry_run'] = True
elif option == '--dump':
arg_dict['dump'] = True
elif option == '--restore':
arg_dict['restore'] = True
return ProcessArguments(arg_dict, die_fn=die_fn)
def ThrottleLayout(bandwidth_limit, http_limit, rps_limit):
"""Return a dictionary indicating the throttle options."""
bulkloader_limits = dict(remote_api_throttle.NO_LIMITS)
bulkloader_limits.update({
remote_api_throttle.BANDWIDTH_UP: bandwidth_limit,
remote_api_throttle.BANDWIDTH_DOWN: bandwidth_limit,
remote_api_throttle.REQUESTS: http_limit,
remote_api_throttle.HTTPS_BANDWIDTH_UP: bandwidth_limit,
remote_api_throttle.HTTPS_BANDWIDTH_DOWN: bandwidth_limit,
remote_api_throttle.HTTPS_REQUESTS: http_limit,
remote_api_throttle.ENTITIES_FETCHED: rps_limit,
remote_api_throttle.ENTITIES_MODIFIED: rps_limit,
})
return bulkloader_limits
def CheckOutputFile(filename):
"""Check that the given file does not exist and can be opened for writing.
Args:
filename: The name of the file.
Raises:
FileExistsError: if the given filename is not found
FileNotWritableError: if the given filename is not readable.
"""
full_path = os.path.abspath(filename)
if os.path.exists(full_path):
raise FileExistsError('%s: output file exists' % filename)
elif not os.access(os.path.dirname(full_path), os.W_OK):
raise FileNotWritableError(
'%s: not writable' % os.path.dirname(full_path))
def LoadConfig(config_file_name, exit_fn=sys.exit):
"""Loads a config file and registers any Loader classes present.
Args:
config_file_name: The name of the configuration file.
exit_fn: Used for dependency injection.
"""
if config_file_name:
config_file = open(config_file_name, 'r')
try:
bulkloader_config = imp.load_module(
'bulkloader_config', config_file, config_file_name,
('', 'r', imp.PY_SOURCE))
sys.modules['bulkloader_config'] = bulkloader_config
if hasattr(bulkloader_config, 'loaders'):
for cls in bulkloader_config.loaders:
Loader.RegisterLoader(cls())
if hasattr(bulkloader_config, 'exporters'):
for cls in bulkloader_config.exporters:
Exporter.RegisterExporter(cls())
if hasattr(bulkloader_config, 'mappers'):
for cls in bulkloader_config.mappers:
Mapper.RegisterMapper(cls())
except NameError, e:
m = re.search(r"[^']*'([^']*)'.*", str(e))
if m.groups() and m.group(1) == 'Loader':
print >>sys.stderr, """
The config file format has changed and you appear to be using an old-style
config file. Please make the following changes:
1. At the top of the file, add this:
from google.appengine.tools.bulkloader import Loader
2. For each of your Loader subclasses add the following at the end of the
__init__ definitioion:
self.alias_old_names()
3. At the bottom of the file, add this:
loaders = [MyLoader1,...,MyLoaderN]
Where MyLoader1,...,MyLoaderN are the Loader subclasses you want the bulkloader
to have access to.
"""
exit_fn(1)
else:
raise
except Exception, e:
if isinstance(e, NameClashError) or 'bulkloader_config' in vars() and (
hasattr(bulkloader_config, 'bulkloader') and
isinstance(e, bulkloader_config.bulkloader.NameClashError)):
print >> sys.stderr, (
'Found both %s and %s while aliasing old names on %s.'%
(e.old_name, e.new_name, e.klass))
exit_fn(1)
else:
raise
def GetArgument(kwargs, name, die_fn):
"""Get the value of the key name in kwargs, or die with die_fn.
Args:
kwargs: A dictionary containing the options for the bulkloader.
name: The name of a bulkloader option.
die_fn: The function to call to exit the program.
Returns:
The value of kwargs[name] is name in kwargs
"""
if name in kwargs:
return kwargs[name]
else:
print >>sys.stderr, '%s argument required' % name
die_fn()
def _MakeSignature(app_id=None,
url=None,
kind=None,
db_filename=None,
perform_map=None,
download=None,
has_header=None,
result_db_filename=None,
dump=None,
restore=None):
"""Returns a string that identifies the important options for the database."""
if download:
result_db_line = 'result_db: %s' % result_db_filename
else:
result_db_line = ''
return u"""
app_id: %s
url: %s
kind: %s
download: %s
map: %s
dump: %s
restore: %s
progress_db: %s
has_header: %s
%s
""" % (app_id, url, kind, download, perform_map, dump, restore, db_filename,
has_header, result_db_line)
def ProcessArguments(arg_dict,
die_fn=lambda: sys.exit(1)):
"""Processes non command-line input arguments.
Args:
arg_dict: Dictionary containing the values of bulkloader options.
die_fn: Function to call in case of an error during argument processing.
Returns:
A dictionary of bulkloader options.
"""
app_id = GetArgument(arg_dict, 'app_id', die_fn)
url = GetArgument(arg_dict, 'url', die_fn)
dump = GetArgument(arg_dict, 'dump', die_fn)
restore = GetArgument(arg_dict, 'restore', die_fn)
filename = GetArgument(arg_dict, 'filename', die_fn)
batch_size = GetArgument(arg_dict, 'batch_size', die_fn)
kind = GetArgument(arg_dict, 'kind', die_fn)
db_filename = GetArgument(arg_dict, 'db_filename', die_fn)
config_file = GetArgument(arg_dict, 'config_file', die_fn)
result_db_filename = GetArgument(arg_dict, 'result_db_filename', die_fn)
download = GetArgument(arg_dict, 'download', die_fn)
log_file = GetArgument(arg_dict, 'log_file', die_fn)
perform_map = GetArgument(arg_dict, 'map', die_fn)
errors = []
if batch_size is None:
if download or perform_map:
arg_dict['batch_size'] = DEFAULT_DOWNLOAD_BATCH_SIZE
else:
arg_dict['batch_size'] = DEFAULT_BATCH_SIZE
elif batch_size <= 0:
errors.append('batch_size must be at least 1')
if db_filename is None:
arg_dict['db_filename'] = time.strftime(
'bulkloader-progress-%Y%m%d.%H%M%S.sql3')
if result_db_filename is None:
arg_dict['result_db_filename'] = time.strftime(
'bulkloader-results-%Y%m%d.%H%M%S.sql3')
if log_file is None:
arg_dict['log_file'] = time.strftime('bulkloader-log-%Y%m%d.%H%M%S')
required = '%s argument required'
if config_file is None and not dump and not restore:
errors.append('One of --config_file, --dump, or --restore is required')
if url is REQUIRED_OPTION:
errors.append(required % 'url')
if not filename and not perform_map:
errors.append(required % 'filename')
if kind is None:
if download or map:
errors.append('kind argument required for this operation')
elif not dump and not restore:
errors.append(
'kind argument required unless --dump or --restore is specified')
if not app_id:
if url and url is not REQUIRED_OPTION:
(unused_scheme, host_port, unused_url_path,
unused_query, unused_fragment) = urlparse.urlsplit(url)
suffix_idx = host_port.find('.appspot.com')
if suffix_idx > -1:
arg_dict['app_id'] = host_port[:suffix_idx]
elif host_port.split(':')[0].endswith('google.com'):
arg_dict['app_id'] = host_port.split('.')[0]
else:
errors.append('app_id argument required for non appspot.com domains')
if errors:
print >>sys.stderr, '\n'.join(errors)
die_fn()
return arg_dict
def ParseKind(kind):
if kind and kind[0] == '(' and kind[-1] == ')':
return tuple(kind[1:-1].split(','))
else:
return kind
def _PerformBulkload(arg_dict,
check_file=CheckFile,
check_output_file=CheckOutputFile):
"""Runs the bulkloader, given the command line options.
Args:
arg_dict: Dictionary of bulkloader options.
check_file: Used for dependency injection.
check_output_file: Used for dependency injection.
Returns:
An exit code.
Raises:
ConfigurationError: if inconsistent options are passed.
"""
app_id = arg_dict['app_id']
url = arg_dict['url']
filename = arg_dict['filename']
batch_size = arg_dict['batch_size']
kind = arg_dict['kind']
num_threads = arg_dict['num_threads']
bandwidth_limit = arg_dict['bandwidth_limit']
rps_limit = arg_dict['rps_limit']
http_limit = arg_dict['http_limit']
db_filename = arg_dict['db_filename']
config_file = arg_dict['config_file']
auth_domain = arg_dict['auth_domain']
has_header = arg_dict['has_header']
download = arg_dict['download']
result_db_filename = arg_dict['result_db_filename']
loader_opts = arg_dict['loader_opts']
exporter_opts = arg_dict['exporter_opts']
mapper_opts = arg_dict['mapper_opts']
email = arg_dict['email']
passin = arg_dict['passin']
perform_map = arg_dict['map']
dump = arg_dict['dump']
restore = arg_dict['restore']
os.environ['AUTH_DOMAIN'] = auth_domain
kind = ParseKind(kind)
if not dump and not restore:
check_file(config_file)
if download and perform_map:
logger.error('--download and --map are mutually exclusive.')
if download or dump:
check_output_file(filename)
elif not perform_map:
check_file(filename)
if dump:
Exporter.RegisterExporter(DumpExporter(kind, result_db_filename))
elif restore:
Loader.RegisterLoader(RestoreLoader(kind, app_id))
else:
LoadConfig(config_file)
os.environ['APPLICATION_ID'] = app_id
throttle_layout = ThrottleLayout(bandwidth_limit, http_limit, rps_limit)
logger.info('Throttling transfers:')
logger.info('Bandwidth: %s bytes/second', bandwidth_limit)
logger.info('HTTP connections: %s/second', http_limit)
logger.info('Entities inserted/fetched/modified: %s/second', rps_limit)
throttle = remote_api_throttle.Throttle(layout=throttle_layout)
signature = _MakeSignature(app_id=app_id,
url=url,
kind=kind,
db_filename=db_filename,
download=download,
perform_map=perform_map,
has_header=has_header,
result_db_filename=result_db_filename,
dump=dump,
restore=restore)
max_queue_size = max(DEFAULT_QUEUE_SIZE, 3 * num_threads + 5)
if db_filename == 'skip':
progress_db = StubProgressDatabase()
elif not download and not perform_map and not dump:
progress_db = ProgressDatabase(db_filename, signature)
else:
progress_db = ExportProgressDatabase(db_filename, signature)
return_code = 1
if not download and not perform_map and not dump:
loader = Loader.RegisteredLoader(kind)
try:
loader.initialize(filename, loader_opts)
workitem_generator_factory = GetCSVGeneratorFactory(
kind, filename, batch_size, has_header)
app = BulkUploaderApp(arg_dict,
workitem_generator_factory,
throttle,
progress_db,
ProgressTrackerThread,
max_queue_size,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
loader.finalize()
elif not perform_map:
result_db = ResultDatabase(result_db_filename, signature)
exporter = Exporter.RegisteredExporter(kind)
try:
exporter.initialize(filename, exporter_opts)
def KeyRangeGeneratorFactory(request_manager, progress_queue,
progress_gen):
return KeyRangeItemGenerator(request_manager, kind, progress_queue,
progress_gen, DownloadItem)
def ExportProgressThreadFactory(progress_queue, progress_db):
return ExportProgressThread(kind,
progress_queue,
progress_db,
result_db)
app = BulkDownloaderApp(arg_dict,
KeyRangeGeneratorFactory,
throttle,
progress_db,
ExportProgressThreadFactory,
0,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
exporter.finalize()
elif not download:
mapper = Mapper.RegisteredMapper(kind)
try:
mapper.initialize(mapper_opts)
def KeyRangeGeneratorFactory(request_manager, progress_queue,
progress_gen):
return KeyRangeItemGenerator(request_manager, kind, progress_queue,
progress_gen, MapperItem)
def MapperProgressThreadFactory(progress_queue, progress_db):
return MapperProgressThread(kind,
progress_queue,
progress_db)
app = BulkMapperApp(arg_dict,
KeyRangeGeneratorFactory,
throttle,
progress_db,
MapperProgressThreadFactory,
0,
RequestManager,
DataSourceThread,
Queue.Queue)
try:
return_code = app.Run()
except AuthenticationError:
logger.info('Authentication Failed')
finally:
mapper.finalize()
return return_code
def SetupLogging(arg_dict):
"""Sets up logging for the bulkloader.
Args:
arg_dict: Dictionary mapping flag names to their arguments.
"""
format = '[%(levelname)-8s %(asctime)s %(filename)s] %(message)s'
debug = arg_dict['debug']
log_file = arg_dict['log_file']
logger.setLevel(logging.DEBUG)
logger.propagate = False
file_handler = logging.FileHandler(log_file, 'w')
file_handler.setLevel(logging.DEBUG)
file_formatter = logging.Formatter(format)
file_handler.setFormatter(file_formatter)
logger.addHandler(file_handler)
console = logging.StreamHandler()
level = logging.INFO
if debug:
level = logging.DEBUG
console.setLevel(level)
console_format = '[%(levelname)-8s] %(message)s'
formatter = logging.Formatter(console_format)
console.setFormatter(formatter)
logger.addHandler(console)
logger.info('Logging to %s', log_file)
remote_api_throttle.logger.setLevel(level)
remote_api_throttle.logger.addHandler(file_handler)
remote_api_throttle.logger.addHandler(console)
appengine_rpc.logger.setLevel(logging.WARN)
adaptive_thread_pool.logger.setLevel(logging.DEBUG)
adaptive_thread_pool.logger.addHandler(console)
adaptive_thread_pool.logger.addHandler(file_handler)
adaptive_thread_pool.logger.propagate = False
def Run(arg_dict):
"""Sets up and runs the bulkloader, given the options as keyword arguments.
Args:
arg_dict: Dictionary of bulkloader options
Returns:
An exit code.
"""
arg_dict = ProcessArguments(arg_dict)
SetupLogging(arg_dict)
return _PerformBulkload(arg_dict)
def main(argv):
"""Runs the importer from the command line."""
arg_dict = ParseArguments(argv)
errors = ['%s argument required' % key
for (key, value) in arg_dict.iteritems()
if value is REQUIRED_OPTION]
if errors:
print >>sys.stderr, '\n'.join(errors)
PrintUsageExit(1)
SetupLogging(arg_dict)
return _PerformBulkload(arg_dict)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| mit |
adieu/allbuttonspressed | docutils/parsers/rst/languages/eo.py | 6 | 3808 | # $Id: eo.py 6460 2010-10-29 22:18:44Z milde $
# Author: Marcelo Huerta San Martin <[email protected]>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Esperanto-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__ = 'reStructuredText'
directives = {
# language-dependent: fixed
u'atentu': 'attention',
u'zorgu': 'caution',
u'dangxero': 'danger',
u'dan\u011dero': 'danger',
u'eraro': 'error',
u'spuro': 'hint',
u'grava': 'important',
u'noto': 'note',
u'helpeto': 'tip',
u'averto': 'warning',
u'admono': 'admonition',
u'flankteksto': 'sidebar',
u'temo': 'topic',
u'linea-bloko': 'line-block',
u'analizota-literalo': 'parsed-literal',
u'rubriko': 'rubric',
u'epigrafo': 'epigraph',
u'elstarajxoj': 'highlights',
u'elstara\u0135oj': 'highlights',
u'ekstera-citajxo': 'pull-quote',
u'ekstera-cita\u0135o': 'pull-quote',
u'kombinajxo': 'compound',
u'kombina\u0135o': 'compound',
u'tekstingo': 'container',
u'enhavilo': 'container',
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'tabelo': 'table',
u'tabelo-vdk': 'csv-table', # "valoroj disigitaj per komoj"
u'tabelo-csv': 'csv-table',
u'tabelo-lista': 'list-table',
u'meta': 'meta',
'math (translation required)': 'math',
#'imagemap': 'imagemap',
u'bildo': 'image',
u'figuro': 'figure',
u'inkludi': 'include',
u'senanaliza': 'raw',
u'anstatauxi': 'replace',
u'anstata\u016di': 'replace',
u'unicode': 'unicode',
u'dato': 'date',
u'klaso': 'class',
u'rolo': 'role',
u'preterlasita-rolo': 'default-role',
u'titolo': 'title',
u'enhavo': 'contents',
u'seknum': 'sectnum',
u'sekcia-numerado': 'sectnum',
u'kapsekcio': 'header',
u'piedsekcio': 'footer',
#'footnotes': 'footnotes',
#'citations': 'citations',
u'celaj-notoj': 'target-notes',
u'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""Esperanto name to registered (in directives/__init__.py) directive name
mapping."""
roles = {
# language-dependent: fixed
u'mallongigo': 'abbreviation',
u'mall': 'abbreviation',
u'komenclitero': 'acronym',
u'kl': 'acronym',
u'indekso': 'index',
u'i': 'index',
u'subskribo': 'subscript',
u'sub': 'subscript',
u'supraskribo': 'superscript',
u'sup': 'superscript',
u'titola-referenco': 'title-reference',
u'titolo': 'title-reference',
u't': 'title-reference',
u'pep-referenco': 'pep-reference',
u'pep': 'pep-reference',
u'rfc-referenco': 'rfc-reference',
u'rfc': 'rfc-reference',
u'emfazo': 'emphasis',
u'forta': 'strong',
u'litera': 'literal',
'math (translation required)': 'math',
u'nomita-referenco': 'named-reference',
u'nenomita-referenco': 'anonymous-reference',
u'piednota-referenco': 'footnote-reference',
u'citajxo-referenco': 'citation-reference',
u'cita\u0135o-referenco': 'citation-reference',
u'anstatauxa-referenco': 'substitution-reference',
u'anstata\u016da-referenco': 'substitution-reference',
u'celo': 'target',
u'uri-referenco': 'uri-reference',
u'uri': 'uri-reference',
u'url': 'uri-reference',
u'senanaliza': 'raw',
}
"""Mapping of Esperanto role names to canonical role names for interpreted text.
"""
| bsd-3-clause |
Syralist/pixels_clock | clock.py | 1 | 3227 | # -*- coding: utf-8 -*-
import pygame, led, sys, os, random, csv
import smbus
from pygame.locals import *
from led.PixelEventHandler import *
from time import gmtime, strftime
""" A very simple arcade shooter demo :)
"""
random.seed()
BLACK = pygame.Color(0,0,0)
WHITE = pygame.Color(255, 255, 255)
RED = pygame.Color(255, 0, 0)
GREEN = pygame.Color(0, 255, 0)
adress = 0x48
LM75 = smbus.SMBus(1)
# detect if a serial/USB port is given as argument
hasSerialPortParameter = ( sys.argv.__len__() > 1 )
# use 90 x 20 matrix when no usb port for real display provided
fallbackSize = ( 90, 20 )
if hasSerialPortParameter:
serialPort = sys.argv[1]
print "INITIALIZING WITH USB-PORT: " + serialPort
ledDisplay = led.teensy.TeensyDisplay(serialPort, fallbackSize)
else:
print "INITIALIZING WITH SERVER DISPLAY AND SIMULATOR."
ledDisplay = led.dsclient.DisplayServerClientDisplay('localhost', 8123, fallbackSize)
# use same size for sim and real LED panel
size = ledDisplay.size()
simDisplay = led.sim.SimDisplay(size)
screen = pygame.Surface(size)
gamestate = 0 #1=alive; 0=dead
def main():
pygame.init()
pygame.font.init()
clock = pygame.time.Clock()
pygame.joystick.init()
gameover = False
# Initialize first joystick
if pygame.joystick.get_count() > 0:
stick = pygame.joystick.Joystick(0)
stick.init()
global gamestate
scored = False
# Clear event list before starting the game
pygame.event.clear()
while not gameover:
# Process event queue
for pgevent in pygame.event.get():
if pgevent.type == QUIT:
pygame.quit()
sys.exit()
event = process_event(pgevent)
# End the game
if event.button == EXIT:
gameover = True
# Keypresses on keyboard and joystick axis motions / button presses
elif event.type == PUSH:
# Movements
if event.button == UP:
pass
elif event.button == DOWN:
pass
elif event.button == RIGHT:
pass
elif event.button == LEFT:
pass
# Tower selection
elif event.button == B2:
pass
# Tower placement
elif event.button == P1:
gameover = True
# Only on Keyboard
elif pgevent.type == KEYDOWN and pgevent.key == K_ESCAPE:
gameover = True
screen.fill(BLACK)
font = pygame.font.SysFont("Arial", 12)
text1 = font.render(strftime("%H:%M:%S"), 0, RED)
text1pos = text1.get_rect()
text1pos.midtop = (screen.get_rect().centerx, -1)
screen.blit(text1,text1pos)
try:
temp = LM75.read_byte(adress)
except:
temp = -1
text2 = font.render("T: "+str(temp)+"'C", 0, GREEN)
text2pos = text2.get_rect()
text2pos.midbottom = (screen.get_rect().centerx, 23)
screen.blit(text2,text2pos)
simDisplay.update(screen)
ledDisplay.update(screen)
clock.tick(10)
main()
| gpl-3.0 |