content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from collections import Counter
def answer(q,inf):
s = Counter(q.split(' ')); r = [-1,-1]
for i,j in enumerate(inf):
check = sum(s.get(w,0) for w in j.split(' '))
if check != 0 and check > r[1]: r = [i,check]
return None if r == [-1,-1] else inf[r[0]]
| python |
from PySide6.QtCore import *
from PySide6.QtGui import *
from PySide6.QtWidgets import *
from ..core.widget import set_contents_margins
class QXVBoxLayout(QVBoxLayout):
def __init__(self, widgets=None, contents_margins=0, spacing=0):
super().__init__()
set_contents_margins(self, contents_margins)
if widgets is not None:
for widget in widgets:
alignment = None
if isinstance(widget, int):
thickness=widget
widget = QWidget()
widget.setFixedHeight(thickness)
widget.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Fixed)
if isinstance(widget, (tuple,list)):
widget, alignment = widget
if isinstance(widget, QLayout):
self.addLayout(widget)
else:
self.addWidget(widget)
if alignment is not None:
self.setAlignment(widget, alignment)
if spacing is not None:
self.setSpacing(spacing)
| python |
# input_text = 'R5, L5, R5, R3'
input_text = open('input1.txt').read()
dir_x, dir_y = (0, 1) # North
dest_x, dest_y = (0, 0)
for step in input_text.split(', '):
side = step[0]
distance = int(step[1:])
if side == 'R':
dir_x, dir_y = dir_y, -dir_x
else:
dir_x, dir_y = -dir_y, dir_x
dest_x += dir_x * distance
dest_y += dir_y * distance
x, y = (0, 0)
distance = 0
while True:
if (dest_x - x) > 0:
x += 1
elif (dest_x - x) < 0:
x -= 1
elif (dest_y - y) > 0:
y += 1
elif (dest_y - y) < 0:
y -= 1
else:
break
distance += 1
print(distance) | python |
"""
Import from other sources to database.
"""
| python |
import itertools
from datetime import datetime
import requests
import rdflib
import os
import pandas as pd
from SPARQLWrapper import SPARQLWrapper, TURTLE, JSON, POST
# -----------------------------------------------------------------------------
def addTestData(target, loadConfig):
"""This function reads the test data and stores it into several named graphs (one file for one named graph).
The config looks like the following: {'http://first-named-graph': 'filename1', 'http://second-named-graph': 'filename2'}
The data could already be in quad format, but we are more flexible if we can configure which data is stored in which named graph.
"""
for ng in loadConfig:
filename = loadConfig[ng]
if os.path.isfile(filename):
with open(filename, 'r') as dataIn:
if isinstance(target, rdflib.ConjunctiveGraph):
namedGraphURI = rdflib.URIRef(ng)
target.get_context(namedGraphURI).parse(filename, format='turtle')
else:
addDataToBlazegraph(url=target, namedGraph=ng, filename=filename, fileFormat='text/turtle')
# -----------------------------------------------------------------------------
def loadData(url, loadConfig):
"""This function reads the given config containing the source of RDF data and its type to store it in a SPARQL endpoint at 'url'."""
for graph in loadConfig:
filename = loadConfig['graph']
if os.path.isfile(filename):
if filename.endswith('.ttl'):
addDataToBlazegraph(url=url, namedGraph=graph, filename=filename, fileFormat='text/turtle')
elif filename.endswith('.sparql'):
addDataToBlazegraph(url=url, namedGraph=graph, filename=filename, fileFormat='application/sparql-update')
# -----------------------------------------------------------------------------
def addDataToBlazegraph(url, filename, fileFormat, namedGraph=None, auth=None):
print(f'## Add data from {filename} to {namedGraph} of {url}\n')
with open(filename, 'rb') as fileIn:
#r = requests.post(url, files={'file': (filename, fileIn, fileFormat)}, headers={'Content-Type': fileFormat}, params={'context-uri': namedGraph})
if namedGraph:
r = requests.post(url, data=fileIn.read(), headers={'Content-Type': fileFormat}, params={'context-uri': namedGraph}, auth=auth)
else:
r = requests.post(url, data=fileIn.read(), headers={'Content-Type': fileFormat}, auth=auth)
print(r.headers)
print(r.content)
# -----------------------------------------------------------------------------
def query(target, queryString, outputWriter):
"""This function executes the given SPARQL query against the target and writes the output to outputWriter."""
res = None
if isinstance(target, rdflib.ConjunctiveGraph):
# target is a local rdflib graph
print(target)
res = target.query(queryString)
for row in res:
print(row)
else:
# SPARQLWrapper has issues retrieving CSV from Blazegraph, thus we send the query low level via a request
res = requests.post(target, data=queryString, headers={'Accept': 'text/csv', 'Content-Type': 'application/sparql-query'})
outputWriter.write(res.content)
# ------------------------------------------------------------
def readSPARQLQuery(filename):
"""Read a SPARQL query from file and return the content as a string."""
content = ""
with open(filename, 'r') as reader:
content = reader.read()
return content
# -----------------------------------------------------------------------------
def addToMismatchLog(mismatchLog, dateType, roleType, contributorURI, s, value):
"""This function logs mismatching dates in the given data structure.
>>> log = {}
>>> addToMismatchLog(log, 'Birth', 'author', '123', 'KBR', '1988')
>>> log['Birth']['author']['123']['KBR'] == {'1988'}
True
A log is added also if there is already a log entry for another source of that contributor
>>> log = { 'Birth': {'author': {'123': {'ISNI': {'1989'}}}}}
>>> addToMismatchLog(log, 'Birth', 'author', '123', 'KBR', '1988')
>>> log['Birth']['author']['123']['KBR'] == {'1988'} and log['Birth']['author']['123']['ISNI'] == {'1989'}
True
"""
if dateType in mismatchLog:
if roleType in mismatchLog[dateType]:
if contributorURI in mismatchLog[dateType][roleType]:
if s in mismatchLog[dateType][roleType][contributorURI]:
mismatchLog[dateType][roleType][contributorURI][s].add(value)
else:
mismatchLog[dateType][roleType][contributorURI][s] = set([value])
else:
mismatchLog[dateType][roleType][contributorURI] = { s: set([value]) }
else:
mismatchLog[dateType][roleType] = {contributorURI: { s: set([value]) } }
else:
mismatchLog[dateType] = {roleType: {contributorURI: { s: set([value]) } }}
# -----------------------------------------------------------------------------
def datesMatch(fullDates, yearMonthDates, years):
"""This function checks if the different provided dates describe the same date,
e.g. 1988-04-25, 1988 and 1988-04 would match resulting in True, otherwise False.
>>> datesMatch(set(['1988-04-25']), set(['1988-04']), set(['1988']))
True
>>> datesMatch(set(['1988-04-25']), [], set(['1988']))
True
>>> datesMatch(set(['1988-04-25']), set([]), set([]))
True
>>> datesMatch(set([]), set(['1988-04']), set([]))
True
>>> datesMatch(set([]), set([]), set(['1988']))
True
>>> datesMatch(set(['1988-04-25']), set(['1988-04']), set(['1988', '1988', '1989']))
False
>>> datesMatch(set(['1988-04-25']), set(['1988-04', '1988-06']), set(['1988', '1988']))
False
>>> datesMatch(set(['1988-04-25', '1988-05-25']), set(['1988-04']), set(['1988', '1988', '1989']))
False
>>> datesMatch([], [], [])
False
"""
# The given dates are stored in sets, if one set has more than 1 element
# there are at least 2 different values
if len(fullDates) > 1: return False
if len(yearMonthDates) > 1: return False
if len(years) > 1: return False
# compare the differently detailed dates
# full date with year month
if len(fullDates) > 0 and len(yearMonthDates) > 0:
fullDate = datetime.strptime(next(iter(fullDates)), '%Y-%m-%d').date()
yearMonth = datetime.strptime(next(iter(yearMonthDates)), '%Y-%m').date()
if fullDate.year != yearMonth.year or fullDate.month != yearMonth.month:
return False
# full date with year
if len(fullDates) > 0 and len(years) > 0:
fullDate = datetime.strptime(next(iter(fullDates)), '%Y-%m-%d').date()
year = datetime.strptime(next(iter(years)), '%Y').date().year
if fullDate.year != year:
return False
# year month with year
if len(yearMonthDates) > 0 and len(years) > 0:
yearMonth = datetime.strptime(next(iter(yearMonthDates)), '%Y-%m').date()
year = datetime.strptime(next(iter(years)), '%Y').date().year
if yearMonth.year != year:
return False
if len(fullDates) == 0 and len(yearMonthDates) == 0 and len(years) == 0:
return False
else:
return True
# -----------------------------------------------------------------------------
def concatenateDates(fullDates, yearMonthDates, years):
"""This function combines several dates in a human readable fashion.
>>> concatenateDates(set(['1988-04-25']), set(['1988-05']), set())
'1988-04-25 or 1988-05'
>>> concatenateDates(set(['1988-04-25', '1988-04-24']), set(['1988-05']), set())
'1988-04-24 or 1988-04-25 or 1988-05'
>>> concatenateDates(set(['1988-04-25', '1988-04-24']), set(['1988-05']), set(['1989']))
'1988-04-24 or 1988-04-25 or 1988-05 or 1989'
"""
elements = [fullDates, yearMonthDates, years]
singleList = set().union(*elements)
return ' or '.join(sorted(singleList))
# -----------------------------------------------------------------------------
def mostCompleteDate(dates):
"""This function returns the most complete date from the given array, if there is a mismatch both are returned.
>>> mostCompleteDate(['1988-04-25', '1988'])
'1988-04-25'
>>> mostCompleteDate(['1988-04-25'])
'1988-04-25'
>>> mostCompleteDate(['1988', '1988-04'])
'1988-04'
>>> mostCompleteDate(['1988'])
'1988'
"""
fullDates = set()
yearMonthDates = set()
years = set()
if len(dates) > 0:
for d in dates:
try:
fullDate = datetime.strptime(d, '%Y-%m-%d').date()
fullDates.add(d)
except:
try:
yearMonth = datetime.strptime(d, '%Y-%m').date()
yearMonthDates.add(d)
except:
try:
year = datetime.strptime(d, '%Y').date().year
years.add(d)
except:
pass
if datesMatch(fullDates, yearMonthDates, years):
# preferably return a full date, thus start with that
if len(fullDates) > 0:
return fullDates.pop()
elif len(yearMonthDates) > 0:
return yearMonthDates.pop()
elif len(years) > 0:
return years.pop()
else:
# the values match, but technically they are all empty
return ''
else:
return concatenateDates(fullDates, yearMonthDates, years)
else:
return ''
# -----------------------------------------------------------------------------
def selectDate(row, role, dateType, sources, rowIDCol, mismatchLog):
"""This function chooses the most complete date for the given role and row, possible dateTypes are 'Birth' and 'Death'.
Select the most complete date betwen the sources
>>> row = {'authorBirthDateKBR': '1988-04-25', 'authorBirthDateISNI': '1988'}
>>> selectDate(row, 'author', 'Birth', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorBirthDate'] == '1988-04-25'
True
>>> row = {'authorBirthDateKBR': '', 'authorBirthDateISNI': '1988'}
>>> selectDate(row, 'author', 'Birth', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorBirthDate'] == '1988'
True
Keep it empty if none of the sources provide a date
>>> row = {'authorBirthDateKBR': '', 'authorBirthDateISNI': ''}
>>> selectDate(row, 'author', 'Birth', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorBirthDate'] == ''
True
It also works for other roles than author
>>> row = {'translatorBirthDateKBR': '1988-04-25', 'translatorBirthDateISNI': '1988'}
>>> selectDate(row, 'translator', 'Birth', ['KBR', 'ISNI'], 'translatorKBRIdentifier', {})
>>> row['translatorBirthDate'] == '1988-04-25'
True
>>> row = {'illustratorBirthDateKBR': '1988-04-25', 'illustratorBirthDateISNI': '1988'}
>>> selectDate(row, 'illustrator', 'Birth', ['KBR', 'ISNI'], 'illustratorKBRIdentifier', {})
>>> row['illustratorBirthDate'] == '1988-04-25'
True
>>> row = {'scenaristBirthDateKBR': '1988-04-25', 'scenaristBirthDateISNI': '1988'}
>>> selectDate(row, 'scenarist', 'Birth', ['KBR', 'ISNI'], 'scenaristKBRIdentifier', {})
>>> row['scenaristBirthDate'] == '1988-04-25'
True
Log an error if a mismatch was found and keep both in the output
>>> row = {'authorKBRIdentifier': '1234', 'authorBirthDateKBR': '1988-04-25', 'authorBirthDateISNI': '1989'}
>>> selectDate(row, 'author', 'Birth', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorBirthDate'] == '1988-04-25 or 1989'
True
The same works also for death dates
>>> row = {'authorDeathDateKBR': '1988-04-25', 'authorDeathDateISNI': '1988'}
>>> selectDate(row, 'author', 'Death', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorDeathDate'] == '1988-04-25'
True
The same works also for death dates
>>> row = {'authorDeathDate': '1988-04-25', 'authorDeathDateISNI': '1988'}
>>> selectDate(row, 'author', 'Death', ['KBR', 'ISNI'], 'authorKBRIdentifier', {})
>>> row['authorDeathDate'] == '1988-04-25'
True
"""
# extract all possible dates based on different sources
dates = []
for s in sources:
colName = f'{role}{dateType}Date{s}'
if colName in row:
dates.append(row[colName])
# extract all possible dates without a source identifier, e.g. authorDeathDate
noSourceColName = f'{role}{dateType}Date'
if noSourceColName in row:
dates.append(row[noSourceColName])
outputColName = f'{role}{dateType}Date'
# set the selected value
row[outputColName] = mostCompleteDate(dates)
# In case the different dates do not match log it
# the date should then be e.g. "1972-04 or 1970"
if 'or' in row[outputColName]:
contributorURI = row[rowIDCol]
# log the mismatching data and then remove the initial sources
for s in sources:
colName = f'{role}{dateType}Date{s}'
value = row[colName]
addToMismatchLog(mismatchLog, dateType, role, contributorURI, s, value)
row.pop(colName)
else:
# only remove the initial sources
for s in sources:
colName = f'{role}{dateType}Date{s}'
if colName in row:
row.pop(colName)
# -----------------------------------------------------------------------------
def addKeysWithoutValueToDict(valDict, keyArray):
"""This function adds keys from keyArray to valDict in case it does not exist yet, the default value is an empty string
>>> addKeysWithoutValueToDict({'a': 'valA', 'b': 'valB'}, ['a', 'b', 'c'])
{'a': 'valA', 'b': 'valB', 'c': ''}
"""
for key in keyArray:
if key not in valDict:
valDict[key] = ''
return valDict
# -----------------------------------------------------------------------------
def mergeDictionaries(inputDict, separator=';'):
"""This function merges two or more dictionaries whereas values from different sources for the same key are combined by indicating the provenance.
For example sourceA = {'a': 'val1'} and sourceB = {'a': 'val2'} will be merged into {'a': 'val1 (sourceA)\nval2 (sourceB)}.
The given dictionary contains the two dictionaries with their respective names as keys (which will be used to indicate provenance)
>>> mergeDictionaries({'sourceA': {'a': 'val1'}, 'sourceB': {'a': 'val2'} })
{'a': 'val1 (sourceA);val2 (sourceB)'}
"""
keyValues = {}
for sourceName in inputDict:
for key in inputDict[sourceName]:
value = inputDict[sourceName][key]
valueString = f'{value} ({sourceName})'
if key in keyValues:
keyValues[key].append(valueString)
else:
keyValues[key] = [valueString]
outputDict = {}
for k in keyValues:
outputDict[k] = separator.join(keyValues[k])
return outputDict
# -----------------------------------------------------------------------------
def getContributorData(df, role, colNamesRaw):
"""
>>> df = pd.DataFrame({'authorColA': [1,2,3], 'authorColB': [1,2,3], 'authorColC': [4,5,6]})
>>> getContributorData(df, 'author', ['ColA', 'ColB'])
ColA ColB
0 1 1
1 2 2
2 3 3
"""
#colNamesRaw = ['Identifier', 'ISNI', 'Nationality', 'Gender', 'FamilyName', 'GivenName', 'BirthDate', 'DeathDate']
colNames = []
renameDict = {}
for c in colNamesRaw:
currentName = f'{role}{c}'
colNames.append(currentName)
renameDict[currentName] = c
df = df.rename(columns=renameDict)
return df[colNamesRaw]
# ---------------------------------------------------------------------------
def getDfCellValue(df, idColName, idColValue, colName):
"""Returns the value of a specific cell or raises errors in case the row isn't found or more than one value is found.
>>> data = pd.DataFrame([{"myID": 1, "name": "john", "myCol": "sven (12, 34)"},{"myID": 2, "name": "jane"}])
>>> getDfCellValue(data, "myID", 1, "myCol")
'sven (12, 34)'
>>> getDfCellValue(data, "myID", 11, "myCol")
Traceback (most recent call last):
...
ValueError: No row with ID "11" in column "myID" found!
>>> getDfCellValue(data, "myIDColumnWhichDoesNotExist", 11, "myCol")
Traceback (most recent call last):
...
KeyError: 'ID column "myIDColumnWhichDoesNotExist" does not exist!'
>>> getDfCellValue(data, "myID", 1, "myColWhichDoesNotExist")
Traceback (most recent call last):
...
KeyError: 'Value column "myColWhichDoesNotExist" does not exist!'
>>> data2 = pd.DataFrame([{"myID": 1, "name": "john", "myCol": "sven (12, 34)"},{"myID": 1, "name": "jane"}])
>>> getDfCellValue(data2, "myID", 1, "myCol")
Traceback (most recent call last):
...
ValueError: More than one row with ID "1" in column "myID" found!
>>> data3 = pd.DataFrame([{"targetTextKBRIdentifier": 1, "name": "john", "targetTextBnFIdentifier": "", "name": ""},{"targetTextKBRIdentifier": 2, "name": "jane"}, {"targetTextBnFIdentifier": "2", "name": "jane"}])
>>> getDfCellValue(data3, "targetTextKBRIdentifier", 2, "targetTextBnFIdentifier")
Traceback (most recent call last):
...
KeyError: 'No value found in column "targetTextKBRIdentifier"'
"""
if idColName not in df:
raise KeyError(f'ID column "{idColName}" does not exist!')
if colName not in df:
raise KeyError(f'Value column "{colName}" does not exist!')
selection = (df.loc[df[idColName] == idColValue, colName])
if selection.size > 1:
raise ValueError(f'More than one row with ID "{idColValue}" in column "{idColName}" found!')
elif selection.size == 1:
if selection.isna().all():
raise KeyError(f'No value found in column "{idColName}"')
else:
return selection.item()
return selection
else:
raise ValueError(f'No row with ID "{idColValue}" in column "{idColName}" found!')
# -----------------------------------------------------------------------------
if __name__ == "__main__":
import doctest
doctest.testmod()
| python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Plivo Team. See LICENSE for details
import base64
import re
import uuid
import os
import os.path
from datetime import datetime
import urllib
import urllib2
import urlparse
import traceback
import redis
import redis.exceptions
import flask
from flask import request
from werkzeug.datastructures import MultiDict
from werkzeug.exceptions import Unauthorized
# remove depracated warning in python2.6
try:
from hashlib import md5 as _md5
except ImportError:
import md5
_md5 = md5.new
from plivo.rest.freeswitch.helpers import is_valid_url, get_conf_value, \
get_post_param, get_http_param
MIME_TYPES = {'audio/mpeg': 'mp3',
'audio/x-wav': 'wav',
'application/srgs+xml': 'grxml',
'application/x-jsgf': 'jsgf',
}
def ip_protect(decorated_func):
def wrapper(obj):
if obj._validate_ip_auth():
return decorated_func(obj)
wrapper.__name__ = decorated_func.__name__
wrapper.__doc__ = decorated_func.__doc__
return wrapper
class UnsupportedResourceFormat(Exception):
pass
class ResourceCache(object):
"""Uses redis cache as a backend for storing cached files infos and datas.
"""
def __init__(self, redis_host='localhost', redis_port=6379, redis_db=0, redis_pw=None,
proxy_url=None, http_timeout=60):
self.host = redis_host
self.port = redis_port
self.db = redis_db
self.pw = redis_pw
self.proxy_url = proxy_url
self.http_timeout = http_timeout
def get_cx(self):
return redis.Redis(host=self.host, port=self.port, db=self.db,
socket_timeout=5.0, password=self.pw)
def get_resource_params(self, url):
resource_key = self.get_resource_key(url)
cx = self.get_cx()
if cx.sismember("resource_key", resource_key):
resource_type = cx.hget("resource_key:%s" % resource_key, "resource_type")
etag = cx.hget("resource_key:%s" % resource_key, "etag")
last_modified = cx.hget("resource_key:%s" % resource_key, "last_modified")
return resource_key, resource_type, etag, last_modified
else:
return None, None, None, None
def update_resource_params(self, resource_key, resource_type, etag, last_modified, buffer):
if etag is None:
etag = ""
if last_modified is None:
last_modified = ""
cx = self.get_cx()
if not cx.sismember("resource_key", resource_key):
cx.sadd("resource_key", resource_key)
cx.hset("resource_key:%s" % resource_key, "resource_type", resource_type)
cx.hset("resource_key:%s" % resource_key, "etag", etag)
cx.hset("resource_key:%s" % resource_key, "last_modified", last_modified)
cx.hset("resource_key:%s" % resource_key, "file", buffer)
cx.hset("resource_key:%s" % resource_key, "last_update_time", str(datetime.now().strftime('%s')))
def delete_resource(self, resource_key):
cx = self.get_cx()
if cx.sismember("resource_key", resource_key):
cx.srem("resource_key", resource_key)
cx.delete("resource_key:%s" % resource_key)
def cache_resource(self, url):
if self.proxy_url is not None:
proxy = urllib2.ProxyHandler({'http': self.proxy_url})
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
request = urllib2.Request(url)
user_agent = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.35 Safari/535.1'
request.add_header('User-Agent', user_agent)
handler = urllib2.urlopen(request, timeout=self.http_timeout)
try:
resource_type = MIME_TYPES[handler.headers.get('Content-Type')]
if not resource_type:
raise UnsupportedResourceFormat("Resource format not found")
except KeyError:
raise UnsupportedResourceFormat("Resource format not supported")
etag = handler.headers.get('ETag')
last_modified = handler.headers.get('Last-Modified')
resource_key = self.get_resource_key(url)
stream = handler.read()
self.update_resource_params(resource_key, resource_type, etag, last_modified, stream)
return stream, resource_type
def get_stream(self, resource_key):
stream = self.get_cx().hget("resource_key:%s" % resource_key, "file")
resource_type = self.get_cx().hget("resource_key:%s" % resource_key, "resource_type")
return stream, resource_type
def get_resource_key(self, url):
return base64.urlsafe_b64encode(_md5(url).digest())
def is_resource_updated(self, url, etag, last_modified):
no_change = (False, None, None)
# if no ETag, then check for 'Last-Modified' header
if etag is not None and etag != "":
request = urllib2.Request(url)
request.add_header('If-None-Match', etag)
elif last_modified is not None and last_modified != "":
request = urllib2.Request(url)
request.add_header('If-Modified-Since', last_modified)
else:
return no_change
try:
second_try = urllib2.urlopen(request)
except urllib2.HTTPError, e:
# if http code is 304, no change
if e.code == 304:
return no_change
return True, etag, last_modified
def get_resource_type(server, url):
resource_type = None
resource_key, resource_type, etag, last_modified = server.cache.get_resource_params(url)
if resource_type:
return resource_type
full_file_name, stream, resource_type = get_resource(server, url)
return resource_type
def get_resource(server, url):
if not url:
return url
full_file_name = url
stream = ''
resource_type = None
if server.cache is not None:
# don't do cache if not a remote file
if not full_file_name[:7].lower() == "http://" \
and not full_file_name[:8].lower() == "https://":
return (full_file_name, stream, resource_type)
rk = server.cache.get_resource_key(url)
server.log.debug("Cache -- Resource key %s for %s" % (rk, url))
try:
resource_key, resource_type, etag, last_modified = server.cache.get_resource_params(url)
if resource_key is None:
server.log.info("Cache -- %s not found. Downloading" % url)
try:
stream, resource_type = server.cache.cache_resource(url)
except UnsupportedResourceFormat:
server.log.error("Cache -- Ignoring Unsupported File at - %s" % url)
else:
server.log.debug("Cache -- Checking if %s source is newer" % url)
updated, new_etag, new_last_modified = server.cache.is_resource_updated(url, etag, last_modified)
if not updated:
server.log.debug("Cache -- Using Cached %s" % url)
stream, resource_type = server.cache.get_stream(resource_key)
else:
server.log.debug("Cache -- Updating Cached %s" % url)
try:
stream, resource_type = server.cache.cache_resource(url)
except UnsupportedResourceFormat:
server.log.error("Cache -- Ignoring Unsupported File at - %s" % url)
except Exception, e:
server.log.error("Cache -- Failure !")
[ server.log.debug('Cache -- Error: %s' % line) for line in \
traceback.format_exc().splitlines() ]
if stream:
return (full_file_name, stream, resource_type)
if full_file_name[:7].lower() == "http://":
audio_path = full_file_name[7:]
full_file_name = "shout://%s" % audio_path
elif full_file_name[:8].lower() == "https://":
audio_path = full_file_name[8:]
full_file_name = "shout://%s" % audio_path
return (full_file_name, stream, resource_type)
class PlivoCacheApi(object):
_config = None
log = None
allowed_ips = []
def _validate_ip_auth(self):
"""Verify request is from allowed ips
"""
if not self.allowed_ips:
return True
remote_ip = request.remote_addr.strip()
if remote_ip in self.allowed_ips:
return True
self.log.debug("IP Auth Failed: remote ip %s not in %s" % (remote_ip, str(self.allowed_ips)))
raise Unauthorized("IP Auth Failed")
@ip_protect
def index(self):
return "OK"
@ip_protect
def do_cache(self):
url = get_http_param(request, "url")
if not url:
self.log.debug("No Url")
return "NO URL", 404
self.log.debug("Url is %s" % str(url))
try:
file_path, stream, resource_type = get_resource(self, url)
if not stream:
self.log.debug("Url %s: no stream" % str(url))
return "NO STREAM", 404
if resource_type == 'mp3':
_type = 'audio/mp3'
elif resource_type == 'wav':
_type = 'audio/wav'
elif resource_type == 'grxml':
_type = 'application/srgs+xml'
elif resource_type == 'jsgf':
_type = 'application/x-jsgf'
else:
self.log.debug("Url %s: not supported format" % str(url))
return "NOT SUPPORTED FORMAT", 404
self.log.debug("Url %s: stream found" % str(url))
return flask.Response(response=stream, status=200,
headers=None, mimetype=_type,
content_type=_type,
direct_passthrough=False)
except Exception, e:
self.log.error("/Cache/ Error: %s" % str(e))
[ self.log.error('/Cache/ Error: %s' % line) for line in \
traceback.format_exc().splitlines() ]
raise e
@ip_protect
def do_cache_type(self):
url = get_http_param(request, "url")
if not url:
self.log.debug("No Url")
return "NO URL", 404
self.log.debug("Url is %s" % str(url))
try:
resource_type = get_resource_type(self, url)
if not resource_type:
self.log.debug("Url %s: no type" % str(url))
return "NO TYPE", 404
self.log.debug("Url %s: type is %s" % (str(url), str(resource_type)))
return flask.jsonify(CacheType=resource_type)
except Exception, e:
self.log.error("/CacheType/ Error: %s" % str(e))
[ self.log.error('/CacheType/ Error: %s' % line) for line in \
traceback.format_exc().splitlines() ]
raise e
@ip_protect
def do_reload_config(self):
try:
self.reload()
return flask.jsonify(Success=True, Message="ReloadConfig done")
except Exception, e:
self.log.error("/ReloadConfig/ Error: %s" % str(e))
[ self.log.error('/ReloadConfig/ Error: %s' % line) for line in \
traceback.format_exc().splitlines() ]
raise e
| python |
from rest_framework.response import Response
from resumes.serializers import BasicSerializer, ProfileSerializer, ResumeSerializer, VolunteerSerializer, WorkSerializer
from resumes.models import Basic, Profile, Resume, Volunteer, Work
from django.shortcuts import render
from rest_framework.decorators import action
from rest_framework import viewsets, mixins, status
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from rest_framework_condition import etag
from resumes.utils import check_etag, custom_etag, custom_update
class ResumeAPIView(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin,
mixins.CreateModelMixin, mixins.DestroyModelMixin, viewsets.GenericViewSet):
queryset = Resume.objects.all()
serializer_class = ResumeSerializer
lookup_field = 'basics__name'
permission_classes = (IsAuthenticatedOrReadOnly,)
@etag(custom_etag)
def list(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
if page is not None:
serializer = self.get_serializer(page, many=True)
return self.get_paginated_response(serializer.data)
serializer = self.get_serializer(queryset, many=True)
return Response(serializer.data)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@etag(custom_etag)
def retrieve(self, request, *args, **kwargs):
instance = self.get_object()
serializer = self.get_serializer(instance)
return Response(serializer.data)
@etag(custom_etag)
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer
return custom_update(request, instance, serializer, partial)
def partial_update(self, request, *args, **kwargs):
kwargs['partial'] = True
return self.update(request, *args, **kwargs)
@action(detail=True, methods=['GET', 'PUT'])
def basics(self, request, basics__name):
basic = Basic.objects.get(name=basics__name)
if request.method == 'GET':
check_etag(
basic.resume,
[basic],
('name', 'label', 'picture', 'email', 'phone', 'website', 'summary', 'location_id')
)
serializer = BasicSerializer(instance=basic)
return Response(serializer.data)
elif request.method == 'PUT':
return custom_update(request, basic, BasicSerializer)
@action(detail=True, methods=['GET', 'POST'], url_path='basics/profiles')
def profiles(self, request, basics__name):
basic = Basic.objects.filter(name=basics__name).last()
profiles = basic.profiles.all()
check_etag(basic.resume, profiles, ('network', 'username', 'url'))
if request.method == 'GET':
serializer = ProfileSerializer(profiles, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = ProfileSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(basic_id=basic.id)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=True, methods=['PUT', 'DELETE'], url_path='basics/profiles/(?P<network>[\w.@+-]+)')
def edit_profiles(self, request, basics__name, network):
basic = Basic.objects.filter(name=basics__name).last()
profiles = basic.profiles.all()
check_etag(basic.resume, profiles, ('network', 'username', 'url'))
instance = Profile.objects.get(network=network)
if request.method == 'PUT':
return custom_update(request, instance, ProfileSerializer)
elif request.method == 'DELETE':
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['GET', 'POST'])
def work(self, request, basics__name):
resume = Basic.objects.filter(name=basics__name).last().resume
work = resume.work.all()
check_etag(
resume,
work,
('company', 'position', 'website', 'start_date', 'end_date', 'summary')
)
if request.method == 'GET':
serializer = WorkSerializer(work, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = WorkSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(resume_id=resume.id)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=True, methods=['PUT', 'DELETE'], url_path='work/(?P<company>[\w.@+-]+)')
def edit_work(self, request, basics__name, company):
resume = Basic.objects.filter(name=basics__name).last().resume
work = resume.work.all()
check_etag(resume, work, ('company', 'position', 'website', 'start_date', 'end_date', 'summary'))
instance = Work.objects.get(company=company)
if request.method == 'PUT':
return custom_update(request, instance, WorkSerializer)
elif request.method == 'DELETE':
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@action(detail=True, methods=['GET', 'POST'])
def volunteer(self, request, basics__name):
resume = Basic.objects.filter(name=basics__name).last().resume
volunteer = resume.volunteer.all()
check_etag(
resume,
volunteer,
('organization', 'position', 'website', 'start_date', 'end_date', 'summary')
)
if request.method == 'GET':
serializer = VolunteerSerializer(volunteer, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = VolunteerSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save(resume_id=resume.id)
headers = self.get_success_headers(serializer.data)
return Response(serializer.data, status=status.HTTP_201_CREATED, headers=headers)
@action(detail=True, methods=['PUT', 'DELETE'], url_path='volunteer/(?P<organization>[\w\ .@+-]+)')
def edit_volunteer(self, request, basics__name, organization):
resume = Basic.objects.filter(name=basics__name).last().resume
volunteer = resume.volunteer.all()
check_etag(resume, volunteer, ('organization', 'position', 'website', 'start_date', 'end_date', 'summary'))
instance = Volunteer.objects.get(organization=organization)
if request.method == 'PUT':
return custom_update(request, instance, VolunteerSerializer)
elif request.method == 'DELETE':
instance.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | python |
# "x" - Create. Creates the specified file, returns an error if the file exists
f = open("text1.txt", "x")
f.write("\nThis is new file")
f.close() | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
The iterative comb sort algorithm.
The comb sort algorithm was:
* designed by Włodzimierz Dobosiewicz and Artur Borowy in 1980;
* rediscovered and named by Stephen Lacey and Richard Box in 1991.
Notes
-----
The comb sort is a generalisation of the bubble sort (1-gap) algorithm.
References
----------
* https://en.wikipedia.org/wiki/Comb_sort
* https://www.geeksforgeeks.org/comb-sort/
* https://www.tutorialspoint.com/Comb-Sort
"""
__author__ = "Stanislav D. Kudriavtsev"
from typing import List, Sequence
# Complexity: worst case
# Time : O(n**2) (simple case of gap choice)
# Space: : O(1) -> this implementation requires O(n)
def comb_sort_iter(seq: Sequence) -> List:
"""
Sort a sequence with the iterative comb sort algorithm.
Parameters
----------
seq : Sequence
Returns
-------
List
"""
lst = list(seq) # copy -> purity sake
size = len(lst)
gap = size
shrink = 1.3
tosort = True
while (gap != 1) or tosort:
gap = max(int(gap / shrink), 1)
tosort = False
for i in range(size - gap):
j = i + gap
if lst[i] > lst[j]:
lst[i], lst[j] = lst[j], lst[i]
tosort = True
return lst
| python |
from tkinter import *
from tkinter.ttk import *
from time import strftime
root=Tk()
root.title('clock')
def time():
string=strftime('%H:%M:%S')
label.config(text=string)
label.after(1000,time)
label= Label(root, font=('ds-digital',80), background="black",foreground='yellow') #install ds-digital font just searching on google
label.pack(anchor='center')
time()
mainloop()
| python |
import re
from typing import Optional, Pattern
ESCAPE_STRING_RE = re.compile(r"(['\\])")
ESCAPE_COL_RE = re.compile(r"([`\\])")
NEGATE_RE = re.compile(r"^(-?)(.*)$")
SAFE_COL_RE = re.compile(r"^-?([a-zA-Z_][a-zA-Z0-9_\.]*)$")
# Alias escaping is different than column names when we introduce table aliases.
# Using the column escaping function would consider "." safe, which is not for
# an alias.
SAFE_ALIAS_RE = re.compile(r"^-?[a-zA-Z_][a-zA-Z0-9_]*$")
def escape_string(str: str) -> str:
str = ESCAPE_STRING_RE.sub(r"\\\1", str)
return "'{}'".format(str)
def escape_expression(expr: Optional[str], regex: Pattern[str]) -> Optional[str]:
if not expr:
return expr
elif regex.match(expr):
# Column/Alias is safe to use without wrapping.
return expr
else:
# Column/Alias needs special characters escaped, and to be wrapped with
# backticks. If the column starts with a '-', keep that outside the
# backticks as it is not part of the column name, but used by the query
# generator to signify the sort order if we are sorting by this column.
col = ESCAPE_COL_RE.sub(r"\\\1", expr)
return "{}`{}`".format(*NEGATE_RE.match(col).groups())
def escape_alias(alias: Optional[str]) -> Optional[str]:
return escape_expression(alias, SAFE_ALIAS_RE)
def escape_identifier(col: Optional[str]) -> Optional[str]:
return escape_expression(col, SAFE_COL_RE)
| python |
from sys import argv
script, first, second, third = argv
print("The script is called:", script)
print("your first variable is:", first)
print("Your second variable is:", second)
print("Your third variable is:", third) | python |
# Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models representing host profiles and constituent parts."""
from copy import deepcopy
import oslo_versionedobjects.fields as obj_fields
import drydock_provisioner.objects as objects
import drydock_provisioner.objects.base as base
import drydock_provisioner.objects.fields as hd_fields
@base.DrydockObjectRegistry.register
class HostProfile(base.DrydockPersistentObject, base.DrydockObject):
VERSION = '1.0'
fields = {
'name':
obj_fields.StringField(nullable=False),
'site':
obj_fields.StringField(nullable=False),
'source':
hd_fields.ModelSourceField(nullable=False),
'parent_profile':
obj_fields.StringField(nullable=True),
'hardware_profile':
obj_fields.StringField(nullable=True),
'oob_type':
obj_fields.StringField(nullable=True),
'oob_parameters':
obj_fields.DictOfStringsField(nullable=True),
'storage_devices':
obj_fields.ObjectField('HostStorageDeviceList', nullable=True),
'volume_groups':
obj_fields.ObjectField('HostVolumeGroupList', nullable=True),
'interfaces':
obj_fields.ObjectField('HostInterfaceList', nullable=True),
'tags':
obj_fields.ListOfStringsField(nullable=True),
'owner_data':
obj_fields.DictOfStringsField(nullable=True),
'rack':
obj_fields.StringField(nullable=True),
'base_os':
obj_fields.StringField(nullable=True),
'image':
obj_fields.StringField(nullable=True),
'kernel':
obj_fields.StringField(nullable=True),
'kernel_params':
obj_fields.DictOfStringsField(nullable=True),
'primary_network':
obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super(HostProfile, self).__init__(**kwargs)
def get_rack(self):
return self.rack
# HostProfile is keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def has_tag(self, tag):
if tag in self.tags:
return True
return False
def apply_inheritance(self, site_design):
# No parent to inherit from, just apply design values
# and return
if self.source == hd_fields.ModelSource.Compiled:
return
if self.parent_profile is None:
self.source = hd_fields.ModelSource.Compiled
return
parent = site_design.get_host_profile(self.parent_profile)
if parent is None:
raise NameError("Cannot find parent profile %s for %s" %
(self.design['parent_profile'], self.name))
parent.apply_inheritance(site_design)
# First compute inheritance for simple fields
inheritable_field_list = [
'hardware_profile', 'oob_type', 'storage_layout',
'bootdisk_device', 'bootdisk_root_size', 'bootdisk_boot_size',
'rack', 'base_os', 'image', 'kernel', 'primary_network'
]
# Create applied data from self design values and parent
# applied values
for f in inheritable_field_list:
setattr(
self, f,
objects.Utils.apply_field_inheritance(
getattr(self, f, None), getattr(parent, f, None)))
# Now compute inheritance for complex types
self.oob_parameters = objects.Utils.merge_dicts(
self.oob_parameters, parent.oob_parameters)
self.tags = objects.Utils.merge_lists(self.tags, parent.tags)
self.owner_data = objects.Utils.merge_dicts(self.owner_data,
parent.owner_data)
self.kernel_params = objects.Utils.merge_dicts(self.kernel_params,
parent.kernel_params)
self.storage_devices = HostStorageDeviceList.from_basic_list(
HostStorageDevice.merge_lists(self.storage_devices,
parent.storage_devices))
self.volume_groups = HostVolumeGroupList.from_basic_list(
HostVolumeGroup.merge_lists(self.volume_groups,
parent.volume_groups))
self.interfaces = HostInterfaceList.from_basic_list(
HostInterface.merge_lists(self.interfaces, parent.interfaces))
self.source = hd_fields.ModelSource.Compiled
return
@base.DrydockObjectRegistry.register
class HostProfileList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostProfile')}
@base.DrydockObjectRegistry.register
class HostInterface(base.DrydockObject):
VERSION = '1.0'
fields = {
'device_name':
obj_fields.StringField(),
'source':
hd_fields.ModelSourceField(),
'network_link':
obj_fields.StringField(nullable=True),
'hardware_slaves':
obj_fields.ListOfStringsField(nullable=True),
'slave_selectors':
obj_fields.ObjectField('HardwareDeviceSelectorList', nullable=True),
'networks':
obj_fields.ListOfStringsField(nullable=True),
'sriov':
obj_fields.BooleanField(default=False),
# SRIOV virtual functions
'vf_count':
obj_fields.IntegerField(nullable=True),
# SRIOV VF trusted mode
'trustedmode':
obj_fields.BooleanField(nullable=True),
}
def __init__(self, **kwargs):
super(HostInterface, self).__init__(**kwargs)
# HostInterface is keyed by device_name
def get_id(self):
return self.get_name()
def get_name(self):
return self.device_name
def get_hw_slaves(self):
return self.hardware_slaves
def get_slave_selectors(self):
return self.slave_selectors
# Return number of slaves for this interface
def get_slave_count(self):
return len(self.hardware_slaves)
# The device attribute may be hardware alias that translates to a
# physical device address. If the device attribute does not match an
# alias, we assume it directly identifies a OS device name. When the
# apply_hardware_profile method is called on the parent Node of this
# device, the selector will be decided and applied
def add_selector(self, slave_selector):
if self.slave_selectors is None:
self.slave_selectors = objects.HardwareDeviceSelectorList()
self.slave_selectors.append(slave_selector)
"""
Merge two lists of HostInterface models with child_list taking
priority when conflicts. If a member of child_list has a device_name
beginning with '!' it indicates that HostInterface should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
if child_list is None:
return parent_list
if parent_list is None:
return child_list
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
parent_interfaces = []
for i in parent_list:
parent_name = i.get_name()
parent_interfaces.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
m = objects.HostInterface()
m.device_name = j.get_name()
m.network_link = \
objects.Utils.apply_field_inheritance(
getattr(j, 'network_link', None),
getattr(i, 'network_link', None))
m.hardware_slaves = objects.Utils.merge_lists(
getattr(j, 'hardware_slaves', []),
getattr(i, 'hardware_slaves', []))
m.networks = objects.Utils.merge_lists(
getattr(j, 'networks', []),
getattr(i, 'networks', []))
m.source = hd_fields.ModelSource.Compiled
effective_list.append(m)
add = False
break
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.device_name not in parent_interfaces
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostInterfaceList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostInterface')}
@base.DrydockObjectRegistry.register
class HostVolumeGroup(base.DrydockObject):
"""Model representing a host volume group."""
VERSION = '1.0'
fields = {
'name': obj_fields.StringField(),
'vg_uuid': obj_fields.StringField(nullable=True),
'logical_volumes': obj_fields.ObjectField(
'HostVolumeList', nullable=True),
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.physical_devices = []
def get_name(self):
return self.name
def get_id(self):
return self.name
def add_pv(self, pv):
self.physical_devices.append(pv)
def is_sys(self):
"""Check if this is the VG for root and/or boot."""
for lv in getattr(self, 'logical_volumes', []):
if lv.is_sys():
return True
return False
@staticmethod
def merge_lists(child_list, parent_list):
if child_list is None:
return parent_list
if parent_list is None:
return child_list
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
parent_devs = []
for i in parent_list:
parent_name = i.get_name()
parent_devs.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
p = objects.HostVolumeGroup()
p.name = j.get_name()
inheritable_field_list = ['vg_uuid']
for f in inheritable_field_list:
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
p.partitions = HostPartitionList.from_basic_list(
HostPartition.merge_lists(
getattr(j, 'logical_volumes', None),
getattr(i, 'logical_volumes', None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.get_name() not in parent_devs
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostVolumeGroupList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostVolumeGroup')}
def add_device_to_vg(self, vg_name, device_name):
for vg in self.objects:
if vg.name == vg_name:
vg.add_pv(device_name)
return
vg = objects.HostVolumeGroup(name=vg_name)
vg.add_pv(device_name)
self.objects.append(vg)
return
@base.DrydockObjectRegistry.register
class HostStorageDevice(base.DrydockObject):
"""Model representing a host physical storage device."""
VERSION = '1.0'
fields = {
'name': obj_fields.StringField(),
'volume_group': obj_fields.StringField(nullable=True),
'labels': obj_fields.DictOfStringsField(nullable=True),
'partitions': obj_fields.ObjectField(
'HostPartitionList', nullable=True),
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.physical_devices = []
def get_name(self):
return self.name
def get_id(self):
return self.name
def add_partition(self, partition):
self.partitions.append(partition)
@staticmethod
def merge_lists(child_list, parent_list):
if child_list is None:
return parent_list
if parent_list is None:
return child_list
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
parent_devs = []
for i in parent_list:
parent_name = i.get_name()
parent_devs.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
p = objects.HostStorageDevice()
p.name = j.get_name()
inherit_field_list = ['volume_group']
for f in inherit_field_list:
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
p.labels = objects.Utils.merge_dicts(
getattr(j, 'labels', None),
getattr(i, 'labels', None))
p.partitions = HostPartitionList.from_basic_list(
HostPartition.merge_lists(
getattr(j, 'partitions', None),
getattr(i, 'partitions', None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.get_name() not in parent_devs
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostStorageDeviceList(base.DrydockObjectListBase, base.DrydockObject):
"""Model representing a list of host physical storage devices."""
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostStorageDevice')}
@base.DrydockObjectRegistry.register
class HostPartition(base.DrydockObject):
"""Model representing a host GPT partition."""
VERSION = '1.0'
fields = {
'name':
obj_fields.StringField(),
'source':
hd_fields.ModelSourceField(),
'bootable':
obj_fields.BooleanField(default=False),
'volume_group':
obj_fields.StringField(nullable=True),
'part_uuid':
obj_fields.UUIDField(nullable=True),
'size':
obj_fields.StringField(nullable=True),
'mountpoint':
obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
def get_device(self):
return self.device
# HostPartition keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def is_sys(self):
"""Check if this is the partition for root and/or boot."""
if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:
return True
return False
"""
Merge two lists of HostPartition models with child_list taking
priority when conflicts. If a member of child_list has a name
beginning with '!' it indicates that HostPartition should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
if child_list is None:
return parent_list
if parent_list is None:
return child_list
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
inherit_field_list = [
"device",
"part_uuid",
"size",
"mountpoint",
"fstype",
"mount_options",
"fs_uuid",
"fs_label",
"volume_group",
"bootable",
]
parent_partitions = []
for i in parent_list:
parent_name = i.get_name()
parent_partitions.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
p = objects.HostPartition()
p.name = j.get_name()
for f in inherit_field_list:
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.get_name() not in parent_partitions
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostPartitionList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostPartition')}
@base.DrydockObjectRegistry.register
class HostVolume(base.DrydockObject):
"""Model representing a host logical volume."""
VERSION = '1.0'
fields = {
'name':
obj_fields.StringField(),
'source':
hd_fields.ModelSourceField(),
'lv_uuid':
obj_fields.UUIDField(nullable=True),
'size':
obj_fields.StringField(nullable=True),
'mountpoint':
obj_fields.StringField(nullable=True),
'fstype':
obj_fields.StringField(nullable=True, default='ext4'),
'mount_options':
obj_fields.StringField(nullable=True, default='defaults'),
'fs_uuid':
obj_fields.UUIDField(nullable=True),
'fs_label':
obj_fields.StringField(nullable=True),
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
# HostVolume keyed by name
def get_id(self):
return self.get_name()
def get_name(self):
return self.name
def is_sys(self):
"""Check if this is the LV for root and/or boot."""
if self.mountpoint is not None and self.mountpoint in ['/', '/boot']:
return True
return False
"""
Merge two lists of HostVolume models with child_list taking
priority when conflicts. If a member of child_list has a name
beginning with '!' it indicates that HostPartition should be
removed from the merged list
"""
@staticmethod
def merge_lists(child_list, parent_list):
if child_list is None:
return parent_list
if parent_list is None:
return child_list
effective_list = []
if len(child_list) == 0 and len(parent_list) > 0:
for p in parent_list:
pp = deepcopy(p)
pp.source = hd_fields.ModelSource.Compiled
effective_list.append(pp)
elif len(parent_list) == 0 and len(child_list) > 0:
for i in child_list:
if i.get_name().startswith('!'):
continue
else:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
elif len(parent_list) > 0 and len(child_list) > 0:
inherit_field_list = [
"lv_uuid",
"size",
"mountpoint",
"fstype",
"mount_options",
"fs_uuid",
"fs_label",
]
parent_volumes = []
for i in parent_list:
parent_name = i.get_name()
parent_volumes.append(parent_name)
add = True
for j in child_list:
if j.get_name() == ("!" + parent_name):
add = False
break
elif j.get_name() == parent_name:
p = objects.HostPartition()
p.name = j.get_name()
for f in inherit_field_list:
setattr(
p, f,
objects.Utils.apply_field_inheritance(
getattr(j, f, None), getattr(i, f, None)))
add = False
p.source = hd_fields.ModelSource.Compiled
effective_list.append(p)
if add:
ii = deepcopy(i)
ii.source = hd_fields.ModelSource.Compiled
effective_list.append(ii)
for j in child_list:
if (j.get_name() not in parent_volumes
and not j.get_name().startswith("!")):
jj = deepcopy(j)
jj.source = hd_fields.ModelSource.Compiled
effective_list.append(jj)
return effective_list
@base.DrydockObjectRegistry.register
class HostVolumeList(base.DrydockObjectListBase, base.DrydockObject):
VERSION = '1.0'
fields = {'objects': obj_fields.ListOfObjectsField('HostVolume')}
| python |
#!/usr/bin/env python3
#
# example_filtering.py: demonstrates how to use `topf` with automated
# peak filtering.
import topf
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
if __name__ == '__main__':
data = np.genfromtxt('example.txt')
# This will automatically instruct the transformer to filter peaks
# until only the 3 highest ones are kept.
transformer = topf.PersistenceTransformer(
n_peaks=3
)
peaks = transformer.fit_transform(data)
# First, let's plot the original data. We can see that there is
# quite a number of relatively small peaks.
plt.subplot(3, 1, 1)
sns.lineplot(x=data[:, 0], y=data[:, 1])
# Second, let's show the transformed data. Here, every non-zero
# point depicts the *prominence* of a peak.
plt.subplot(3, 1, 2)
sns.lineplot(x=peaks[:, 0], y=peaks[:, 1])
plt.subplot(3, 1, 3)
sns.lineplot(x=data[:, 0], y=data[:, 1], alpha=0.5)
sns.scatterplot(
x=data[peaks[:, 1] > 0][:, 0],
y=data[peaks[:, 1] > 0][:, 1],
)
plt.tight_layout()
plt.show()
| python |
#
# This script will allow you to authenticate using OpenID Connect with KeyCloak
# To get more background information on how to use this script, go to
# https://augment1security.com/authentication/how-to-authenticate-with-openid-connect-angular2-spa-zap-part-1/
#
import json
import time
import datetime
import random
import string
import urllib
import org.parosproxy.paros.network.HttpRequestHeader as HttpRequestHeader
import org.parosproxy.paros.network.HttpHeader as HttpHeader
import org.zaproxy.zap.extension.script.ScriptVars as GlobalVariables
import org.parosproxy.paros.network.HttpMessage as HttpMessage
import org.parosproxy.paros.network.HtmlParameter as HtmlParameter
import org.parosproxy.paros.network.HttpSender as HttpSender
import java.net.HttpCookie as HttpCookie
from org.apache.commons.httpclient import URI
from synchronize import make_synchronized
import org.openqa.selenium.By as By
import java.util.concurrent.TimeUnit as TimeUnit
import org.openqa.selenium.firefox.FirefoxDriver as FirefoxDriver;
import org.openqa.selenium.firefox.FirefoxOptions as FirefoxOptions;
import org.openqa.selenium.support.ui.WebDriverWait as WebDriverWait;
import org.openqa.selenium.support.ui.ExpectedConditions as ExpectedConditions;
import org.parosproxy.paros.model.Model as Model
import org.apache.http.client.utils.URLEncodedUtils as URLEncodedUtils
import java.nio.charset.Charset as Charset;
import java.net.URLEncoder as URLEncoder
import java.nio.charset.StandardCharsets as StandardCharsets
APP_ANGULAR_URL = 'http://localhost:8080/app-angular2';
ENCODED_APP_ANGULAR_URL=URLEncoder.encode(APP_ANGULAR_URL, StandardCharsets.UTF_8.toString());
KEYCLOAK_BASE_URL = 'http://localhost:8180/auth';
KEYCLOAK_REALM="master";
USERNAME = "myuser";
PASSWORD = "mypassword";
#constants of cookie names
AUTH_SESSION_ID_LEGACY_COOKIE_NAME="AUTH_SESSION_ID_LEGACY";
KC_RESTART_COOKIE_NAME="KC_RESTART";
KEYCLOAK_IDENTITY_LEGACY_COOKIE_NAME="KEYCLOAK_IDENTITY_LEGACY";
KEYCLOAK_SESSION_LEGACY_COOKIE_NAME="KEYCLOAK_SESSION_LEGACY";
ACCESS_TOKEN_KEY_NAME="ACCESS_TOKEN";
ACCESS_TOKEN_CREATION_TIMESTAMP_KEY_NAME="ACCESS_TOKEN_CREATE_TIMESTAMP";#needs to have a lenght < 30 for a key in GlobalVariables
ACCESS_TOKEN_EXPIRY_IN_SECONDS_KEY_NAME="ACCESS_TOKEN_EXPIRY_IN_SEC";#needs to have a lenght < 30 for a key in GlobalVariables
def sendingRequest(msg, initiator, helper):
print('sendingRequest called for url=' + msg.getRequestHeader().getURI().toString())
accessToken = GlobalVariables.getGlobalVar(ACCESS_TOKEN_KEY_NAME);
#checking if we already have an access token
if accessToken is not None:
print "we have access token, checking if token is valid";
if tokenHasNotExpired(accessToken) == True:
print "accessToken in valid";
setAccessTokenInHttpMessage(accessToken, msg);
return;
print "token is invalid or there is no token, need to relogin"
accessToken = refreshAccessToken(helper);
setAccessTokenInHttpMessage(accessToken, msg);
return;
# clearing of the variables from GlobalVarialbes
def clearAccessTokenFromGlobalVar():
GlobalVariables.setGlobalVar(ACCESS_TOKEN_KEY_NAME, None);
GlobalVariables.setGlobalCustomVar(ACCESS_TOKEN_CREATION_TIMESTAMP_KEY_NAME, None);
GlobalVariables.setGlobalCustomVar(ACCESS_TOKEN_EXPIRY_IN_SECONDS_KEY_NAME, None);
# as all 3 variables need to be set at the same time, better to have a function to do that
def setAccessTokenInGlobalVar(accessToken, expiryInSeconds):
GlobalVariables.setGlobalVar(ACCESS_TOKEN_KEY_NAME, str(accessToken));
GlobalVariables.setGlobalCustomVar(ACCESS_TOKEN_CREATION_TIMESTAMP_KEY_NAME, time.time());
GlobalVariables.setGlobalCustomVar(ACCESS_TOKEN_EXPIRY_IN_SECONDS_KEY_NAME, expiryInSeconds);
def generateRandomAlphanumericString(length):
seq = string.letters + string.digits
return ''.join(random.choice(seq) for _ in xrange(length))
# we have to make this function synchronized as we do not want to have duplicate concurrent attempts to login
@make_synchronized
def refreshAccessToken(helper):
print "refreshing access token and checking if it has already been refreshed"
accessToken = GlobalVariables.getGlobalVar(ACCESS_TOKEN_KEY_NAME);
if accessToken is not None and tokenHasNotExpired(accessToken) == True:
print "access token already refreshed, no need to relogin"
return accessToken;
clearAccessTokenFromGlobalVar();
accessTokenDict = doLogin(helper);
setAccessTokenInGlobalVar(accessTokenDict["accessToken"], accessTokenDict["accessTokenExpiryInSeconds"]);
print "access token refreshed"
return accessTokenDict["accessToken"];
def tokenHasNotExpired(accessToken):
accessTokenCreationTimestamp = GlobalVariables.getGlobalCustomVar(ACCESS_TOKEN_CREATION_TIMESTAMP_KEY_NAME);
#Return the time as a floating point number expressed in seconds since the epoch, in UTC
currentTime = time.time();
difference = currentTime - accessTokenCreationTimestamp;
print "difference in time in seconds:" + str(difference)
accessTokenExpiryInSeconds = GlobalVariables.getGlobalCustomVar(ACCESS_TOKEN_EXPIRY_IN_SECONDS_KEY_NAME);
if difference > accessTokenExpiryInSeconds:
print "token has expired"
return False;
print "token has NOT expired"
return True;
def doLogin(helper):
firefoxOptions = FirefoxOptions()
firefoxOptions.addArguments("--window-size=1920,1080");
firefoxOptions.addArguments("--disable-gpu");
firefoxOptions.addArguments("--disable-extensions");
firefoxOptions.addArguments("--proxy-server='direct://'");
firefoxOptions.addArguments("--proxy-bypass-list=*");
firefoxOptions.addArguments("--start-maximized");
firefoxOptions.addArguments("--headless");
webDriver = FirefoxDriver(firefoxOptions);
# generate state and nonce
state = generateRandomAlphanumericString(20);
nonce = generateRandomAlphanumericString(20);
print "state:"+state;
print "nonce:"+nonce;
#------------getting login page from keycloak------------
loginUrl = KEYCLOAK_BASE_URL+"/realms/"+KEYCLOAK_REALM+"/protocol/openid-connect/auth?client_id=app-angular2&redirect_uri="+ENCODED_APP_ANGULAR_URL+"%2F&state="+state+"&nonce="+nonce+"&response_mode=fragment&response_type=code&scope=openid";
print("loginUrl:"+loginUrl);
webDriver.get(loginUrl);
# we wait until the username element is visible
timeoutInSeconds = 10;
wait = WebDriverWait(webDriver, timeoutInSeconds);
wait.until(ExpectedConditions.visibilityOfElementLocated(By.name("username")));
loginEle = webDriver.findElement(By.name("username"));
formEle = webDriver.findElement(By.id("kc-form-login"));
# gathering all the information to make the next http request
formActionUrl = formEle.getAttribute("action");
formBody = "username="+USERNAME+"&password="+PASSWORD+"&credentialId="
authSessionIdLegacyCookieValue = webDriver.manage().getCookieNamed(AUTH_SESSION_ID_LEGACY_COOKIE_NAME).getValue();
print "authSessionIdLegacyCookieValue: " + authSessionIdLegacyCookieValue;
kcRestartCookieValue = webDriver.manage().getCookieNamed(KC_RESTART_COOKIE_NAME).getValue();
print "kcRestartCookieValue: " + kcRestartCookieValue;
authSessionIdLegacyCookie = HttpCookie(AUTH_SESSION_ID_LEGACY_COOKIE_NAME, authSessionIdLegacyCookieValue);
kcRestartCookie = HttpCookie(KC_RESTART_COOKIE_NAME, kcRestartCookieValue);
cookies = [authSessionIdLegacyCookie, kcRestartCookie];
#-----------------------------------------------------
#------------submitting login credentials to keycloak------------
returnedMsg = callPost(formActionUrl, formBody, {}, cookies, "application/x-www-form-urlencoded", helper);
keyCloakIdentityLegacyCookieValue = returnedMsg.getResponseHeader().getHeader(KEYCLOAK_IDENTITY_LEGACY_COOKIE_NAME)
keyCloakSessionLegacyCookieValue = returnedMsg.getResponseHeader().getHeader(KEYCLOAK_SESSION_LEGACY_COOKIE_NAME);
# we will get a redirect response whose url in the 'location' header we will need to call manually below to get the token
# we cannot use selenium at this stage as it will do auto redirect and we will miss the information returned by the redirect response
location = returnedMsg.getResponseHeader().getHeader("Location");
print "location: " + location;
codeQueryParamValue = getUrlQueryParamValue(location, "code");
print("code:" + codeQueryParamValue);
tokenUrl = KEYCLOAK_BASE_URL+"/realms/"+KEYCLOAK_REALM+"/protocol/openid-connect/token"
formBody = "code="+codeQueryParamValue+"&grant_type=authorization_code&client_id=app-angular2&redirect_uri="+ENCODED_APP_ANGULAR_URL+"%2F";
keyCloakIdentityLegacyCookie = HttpCookie(KEYCLOAK_IDENTITY_LEGACY_COOKIE_NAME, keyCloakIdentityLegacyCookieValue);
keyCloakSessionLegacyCookie = HttpCookie(KEYCLOAK_SESSION_LEGACY_COOKIE_NAME, keyCloakSessionLegacyCookieValue);
cookies = [authSessionIdLegacyCookie, keyCloakIdentityLegacyCookie, keyCloakSessionLegacyCookie];
#-----------------------------------------------------
#-----------calling the url in the 'location' header to get the access token-----------
returnedMsg = callPost(tokenUrl, formBody, {}, cookies, "application/x-www-form-urlencoded", helper);
authenticatedJsonResponseObject = json.loads(str(returnedMsg.getResponseBody()));
accessToken = authenticatedJsonResponseObject.get("access_token");
accessTokenExpiryInSeconds = authenticatedJsonResponseObject.get("expires_in");
print "accessToken:"+str(accessToken);
print "accessTokenExpiryInSeconds:"+str(accessTokenExpiryInSeconds);
return dict({"accessToken": accessToken, "accessTokenExpiryInSeconds": accessTokenExpiryInSeconds})
# function to set the token in Authorization header in request
def setAccessTokenInHttpMessage(accessToken, msg):
print "setting token in request"
msg.getRequestHeader().setHeader("Authorization", "Bearer " + accessToken);
# generic function to make a POST request
def callPost(requestUrl, requestBody, headers, cookies, contentType, helper):
print "-----start of callPost ("+requestUrl+")-------";
requestUri = URI(requestUrl, False);
msg = HttpMessage();
requestHeader = HttpRequestHeader(HttpRequestHeader.POST, requestUri, HttpHeader.HTTP10);
requestHeader.setHeader("content-type",contentType);
for name, value in headers.items():
requestHeader.setHeader(name, value);
requestHeader.setCookies(cookies)
msg.setRequestHeader(requestHeader);
msg.setRequestBody(requestBody);
print("Sending POST request header: " + str(requestHeader));
print("Sending POST request body: " + str(requestBody));
helper.getHttpSender().sendAndReceive(msg);
print("\nReceived response status code for authentication request: " + str(msg.getResponseHeader()));
print("\nResponseBody: " + str(msg.getResponseBody()));
print("------------------------------------");
return msg;
# generic function to get the value of a query parameter
def getUrlQueryParamValue(url, paramNameToLookFor):
urlParams = URLEncodedUtils.parse(url, Charset.forName("UTF-8"));
for param in urlParams:
if param.getName() == paramNameToLookFor:
return param.getValue();
return None;
# generic function to make a GET request
def callGet(requestUrl, headers, helper):
requestUri = URI(requestUrl, False);
print "-----start of callGet-------";
print "requestUrl:"+requestUrl;
msg = HttpMessage();
requestHeader = HttpRequestHeader(HttpRequestHeader.GET, requestUri, HttpHeader.HTTP10);
msg.setRequestHeader(requestHeader);
for name, value in headers.items():
requestHeader.setHeader(name, value);
print "Sending GET request: " + str(requestHeader);
helper.getHttpSender().sendAndReceive(msg)
print "Received response status code for authentication request: " + str(msg.getResponseHeader());
print("\nResponseBody: " + str(msg.getResponseBody()));
print "------------------------------------";
return msg;
# function called for every incoming server response from server (part of httpsender)
def responseReceived(msg, initiator, helper):
pass | python |
import pytest
from tournament_game import get_winner, Character
@pytest.mark.parametrize("name", ["текст", "42", "", 12, -345, 52.08, None, True])
def test_get_winner_solo(name):
character_sample = Character(name)
character_list = [character_sample]
assert str(name) == get_winner(character_list)
@pytest.mark.parametrize("name_list", [["текст", 42], ["", 1252.08]])
def test_get_winner_set(name_list):
character_list = [Character(name) for name in name_list]
assert name_list[0] == get_winner(character_list)
@pytest.mark.parametrize("name", ["текст", "42", "", 12, -345, 52.08, None, True])
def test_get_winner_type(name):
character_sample = Character(name)
character_list = [character_sample]
name = get_winner(character_list)
assert isinstance(name, str)
| python |
import numpy as np
from UncertainSCI.ttr import predict_correct_discrete, stieltjes_discrete, \
aPC, hankel_deter, mod_cheb, lanczos_stable
from UncertainSCI.utils.compute_moment import compute_moment_discrete
from UncertainSCI.families import JacobiPolynomials
import time
from tqdm import tqdm
"""
We use six methods
1. pc (Predictor-corrector method)
2. sp (Stieltjes procedure)
3. apc (Arbitrary polynomial chaos expansion method)
4. hd (Hankel determinants)
5. mc (Modified Chebyshev algorithm)
6. lz (Stabilized Lanczos algorithm)
to compute the recurrence coefficients for
the discrete Chebyshev transformed to [0,1).
"""
def discrete_chebyshev(N):
"""
Return the first N exact recurrence coefficients
"""
ab = np.zeros([N, 2])
ab[1:, 0] = (N-1) / (2*N)
ab[0, 1] = 1.
ab[1:, 1] = np.sqrt(1/4 * (1 - (np.arange(1, N)/N)**2)
/ (4 - (1/np.arange(1, N)**2)))
return ab
# N_array = [37, 38, 39, 40]
# N_quad = 40
# N_array = [56, 60, 64, 68]
# N_quad = 80
# N_array = [82, 89, 96, 103]
# N_quad = 160
N_array = [82, 89, 96, 103]
N_quad = 320
x = np.arange(N_quad) / N_quad
w = (1/N_quad) * np.ones(len(x))
t_pc = np.zeros(len(N_array))
t_sp = np.zeros(len(N_array))
t_apc = np.zeros(len(N_array))
t_hd = np.zeros(len(N_array))
t_mc = np.zeros(len(N_array))
t_lz = np.zeros(len(N_array))
e_pc = np.zeros(len(N_array))
e_sp = np.zeros(len(N_array))
e_apc = np.zeros(len(N_array))
e_hd = np.zeros(len(N_array))
e_mc = np.zeros(len(N_array))
e_lz = np.zeros(len(N_array))
iter_n = np.arange(100)
for k in tqdm(iter_n):
for ind, N in enumerate(N_array):
ab = discrete_chebyshev(N_quad)[:N, :]
m = compute_moment_discrete(x, w, N)
# Predict-correct
start = time.time()
ab_pc = predict_correct_discrete(x, w, N)
end = time.time()
t_pc[ind] += (end - start) / len(iter_n)
e_pc[ind] = np.linalg.norm(ab - ab_pc, None)
# Stieltjes
start = time.time()
ab_sp = stieltjes_discrete(x, w, N)
end = time.time()
t_sp[ind] += (end - start) / len(iter_n)
e_sp[ind] = np.linalg.norm(ab - ab_sp, None)
# Arbitrary Polynomial Chaos Expansion
start = time.time()
ab_apc = aPC(m, N)
end = time.time()
t_apc[ind] += (end - start) / len(iter_n)
e_apc[ind] = np.linalg.norm(ab - ab_apc, None)
# Hankel Determinant
start = time.time()
ab_hd = hankel_deter(N, m)
end = time.time()
t_hd[ind] += (end - start) / len(iter_n)
e_hd[ind] = np.linalg.norm(ab - ab_hd, None)
# Modified Chebyshev
J = JacobiPolynomials(probability_measure=False)
def peval(x, n):
return J.eval(x, n)
def integrand(x):
return peval(x, i).flatten()
mod_m = np.zeros(2*N - 1)
for i in range(2*N - 1):
mod_m[i] = np.sum(integrand(x) * w)
start = time.time()
ab_mc = mod_cheb(N, mod_m, J)
end = time.time()
t_mc[ind] += (end - start) / len(iter_n)
e_mc[ind] = np.linalg.norm(ab - ab_mc, None)
# Stabilized Lanczos
start = time.time()
ab_lz = lanczos_stable(x, w, N)
end = time.time()
t_lz[ind] += (end - start) / len(iter_n)
e_lz[ind] += np.linalg.norm(ab - ab_lz, None)
"""
N_array = [37, 38, 39, 40] with tol = 1e-12, N_quad = 40
--- Frobenius norm error ---
e_pc
array([5.83032276e-16, 7.88106850e-16, 1.31264360e-14, 6.81247807e-13])
e_sp
array([6.79107529e-15, 7.08424027e-15, 1.52208335e-14, 7.23359604e-13])
e_apc
array([nan, nan, nan, nan])
e_hd
array([nan, nan, nan, nan])
e_mc
array([nan, nan, nan, nan])
e_lz
array([8.26282134e-16, 8.75621328e-16, 8.78366402e-16, 8.80556299e-16])
--- elapsed time ---
t_pc
array([0.01866756, 0.01940269, 0.02026843, 0.02117965])
t_sp
array([0.01808646, 0.01872314, 0.01958155, 0.02055171])
t_apc
array([0.00344686, 0.00372854, 0.00387698, 0.00402875])
t_hd
array([0.00818913, 0.00850275, 0.00893114, 0.00921517])
t_mc
array([0.00544071, 0.00575021, 0.00612659, 0.00639981])
t_lz
array([0.00161063, 0.00168495, 0.00170782, 0.00174096])
N_array = [56, 60, 64, 68] with tol = 1e-12, N_quad = 80
e_pc
array([1.19606888e-15, 1.92721740e-13, 5.03366337e-10, 3.84167092e-06])
e_sp
array([3.81010361e-15, 7.60074466e-14, 2.02231318e-10, 1.57318802e-06])
e_apc
array([nan, nan, nan, nan, nan])
e_hd
array([nan, nan, nan, nan, nan])
e_mc
array([nan, nan, nan, nan, nan])
e_lz
array([1.15977130e-15, 1.21238184e-15, 1.36341761e-15, 1.49468349e-15])
t_pc
array([0.04124258, 0.0486698 , 0.05391277, 0.05956687])
t_sp
array([0.04043174, 0.04731631, 0.05250208, 0.05827137])
t_apc
array([0.00683582, 0.00755854, 0.00840556, 0.00946519])
t_hd
array([0.01683453, 0.01991775, 0.02230049, 0.02437497])
t_mc
array([0.01336397, 0.01488232, 0.01709907, 0.01894911])
t_lz
array([0.0028906 , 0.00300488, 0.00327993, 0.00346822])
N_array = [82, 89, 96, 103] with tol = 1e-12, N_quad = 160
e_pc
array([1.35320885e-15, 1.52422750e-12, 1.12490901e-08, 2.16713303e-04])
e_sp
array([6.44431630e-15, 3.66258846e-12, 2.71222200e-08, 5.23466153e-04])
e_apc
array([nan, nan, nan, nan])
e_hd
array([nan, nan, nan, nan])
e_mc
array([nan, nan, nan, nan])
e_lz
array([1.32966300e-15, 1.41362828e-15, 1.55629351e-15, 1.68556574e-15])
t_pc
array([0.10012377, 0.11433365, 0.13067236, 0.15082069])
t_sp
array([0.09506917, 0.11128752, 0.12852232, 0.1470592 ])
t_apc
array([0.01341118, 0.01552454, 0.01833375, 0.02090821])
t_hd
array([0.03509946, 0.04140449, 0.04904011, 0.05577155])
t_mc
array([0.02791258, 0.03276293, 0.03802878, 0.04396228])
t_lz
array([0.00592635, 0.00665268, 0.00714997, 0.00809739])
N_array = [82, 89, 96, 103] with tol = 1e-12, N_quad = 320
e_pc
array([1.19348975e-15, 1.33976368e-15, 1.57963123e-15, 1.73577787e-15])
e_sp
array([2.92199121e-15, 3.03780940e-15, 3.42385023e-15, 3.63905129e-15])
e_apc
array([nan, nan, nan, nan])
e_hd
array([nan, nan, nan, nan])
e_mc
array([nan, nan, nan, nan])
e_lz
array([1.18636824e-15, 1.35263944e-15, 1.65349634e-15, 1.79683860e-15])
t_pc
array([0.12287572, 0.13825425, 0.16237012, 0.18260074])
t_sp
array([0.11560148, 0.13418031, 0.15452703, 0.17811085])
t_apc
array([0.01396315, 0.01658385, 0.01925649, 0.02249643])
t_hd
array([0.03557385, 0.04164304, 0.04904677, 0.05764251])
t_mc
array([0.02806302, 0.03326251, 0.03876049, 0.04441474])
t_lz
array([0.01207455, 0.01389778, 0.0154752 , 0.01657487])
"""
| python |
"""The Test file for CLI Formatters."""
import re
from sqlfluff.rules.base import RuleGhost
from sqlfluff.parser import RawSegment
from sqlfluff.parser.markers import FilePositionMarker
from sqlfluff.errors import SQLLintError
from sqlfluff.cli.formatters import format_filename, format_violation, format_path_violations
def escape_ansi(line):
"""Remove ANSI color codes for testing."""
ansi_escape = re.compile(u'\u001b\\[[0-9]+(;[0-9]+)?m')
return ansi_escape.sub('', line)
def test__cli__formatters__filename_nocol():
"""Test formatting filenames."""
res = format_filename('blahblah', success=True, verbose=0)
assert escape_ansi(res) == "== [blahblah] PASS"
def test__cli__formatters__filename_col():
"""Explicity test color codes."""
res = format_filename('blah', success=False, verbose=0)
assert res == u"== [\u001b[30;1mblah\u001b[0m] \u001b[31mFAIL\u001b[0m"
def test__cli__formatters__violation():
"""Test formatting violations.
NB Position is 1 + start_pos.
"""
s = RawSegment('foobarbar', FilePositionMarker(0, 20, 11, 100))
r = RuleGhost('A', 'DESC')
v = SQLLintError(segment=s, rule=r)
f = format_violation(v)
assert escape_ansi(f) == "L: 20 | P: 11 | A | DESC"
def test__cli__formatters__violations():
"""Test formatting and ordering of violations."""
v = {
'foo': [
SQLLintError(
segment=RawSegment('blah', FilePositionMarker(0, 25, 2, 26)),
rule=RuleGhost('A', 'DESCR')),
# Here we check the optional description override
SQLLintError(
segment=RawSegment('blah', FilePositionMarker(0, 21, 3, 22)),
rule=RuleGhost('B', 'DESCR'), description='foo')],
'bar': [
SQLLintError(
segment=RawSegment('blah', FilePositionMarker(0, 2, 11, 3)),
rule=RuleGhost('C', 'DESCR'))]
}
f = format_path_violations(v)
k = sorted(['foo', 'bar'])
chk = {
'foo': ["L: 21 | P: 3 | B | foo", "L: 25 | P: 2 | A | DESCR"],
'bar': ["L: 2 | P: 11 | C | DESCR"]
}
chk2 = []
for elem in k:
chk2 = chk2 + [format_filename(elem)] + chk[elem]
chk2 = '\n'.join(chk2)
assert escape_ansi(f) == escape_ansi(chk2)
| python |
from twitter import Twitter, OAuth
class TwitterAPI:
ACCESS_TOKEN = "223212203-5n4o9eTcRmKaxoPxtAelhufNzkdOTCSjn1dpku6U"
ACCESS_SECRET = "kmqNtVCtlyxJ7tS9U0C4HjfjAtE3Djqb3CDrIhFHEoJQt"
CONSUMER_KEY = "h5csBXeGpJmLma9IgnoV3JWfn"
CONSUMER_SECRET = "2OVIV2H7kG1TLaNI7FFZ0Gn6odOda8UuojyVkh8emgRnlxB1wW"
oauth = OAuth(ACCESS_TOKEN, ACCESS_SECRET, CONSUMER_KEY, CONSUMER_SECRET)
def __init__(self):
# Initiate the connection to Twitter REST API
self.twitter = Twitter(auth=self.oauth)
def get_active_tweets(self, text, lat, lon, radius):
json = self.twitter.search.tweets(q=text, result_type='recent',
geocode="{},{},{}km".format(lat, lon, radius),
count=100)
return json
def get_passive_tweets(self, lat, lon, radius):
json = self.twitter.search.tweets(q='*',
geocode="{},{},{}km".format(lat, lon, radius),
count=100)
return json
def extract_image_tweets(self, tweets):
return [i for i in tweets if ('media' in i['entities'] and i['entities']['media'][0]['type'] == 'photo')]
| python |
"""
The customized image URL processing engine.
Author: Qing Wang
"""
import re
LC_LIST = ["a", "b", "c", "d", "e", "f", "g"]
CAP_LIST = ["A", "B", "C", "D", "E", "F", "G"]
NUM_LIST = ["0", "1", "2", "3", "4", "5", "6"]
class URLProcessor(object):
"""
Class for URLProcessor.
"""
def __init__(self, data_url, page_num):
"""
Constructor method.
"""
super(URLProcessor, self).__init__()
self.pnum = page_num
self.data_url = data_url
self.template = self._generate_template(self.data_url)
def _generate_template(self, url):
"""
Generate the template string from url.
"""
fn = url.split("/")[-1]
str_to_replaced = re.findall(r"\d+", fn)
self.num_vars = len(str_to_replaced)
self.n_digits = [len(s) for s in str_to_replaced]
rep = {}
for index, item in enumerate(str_to_replaced):
rep[item] = "{var%i}" % index
# use these three lines to do the replacement
rep = dict((re.escape(k), v) for k, v in rep.items())
pattern = re.compile("|".join(rep.keys()))
text = pattern.sub(lambda m: rep[re.escape(m.group(0))], url)
return text
def normal_url_list(self):
"""
Generate normal url list for iteration.
"""
for i in range(0, self.pnum + 1):
rep_dict = {
"var%i" % t: str(i).zfill(self.n_digits[t])
for t in range(self.num_vars)
}
yield self.template.format(**rep_dict)
def special_url_list(self, sep=""):
"""
Generate special urls for iteration.
"""
sp_c_list = LC_LIST + CAP_LIST + NUM_LIST
for c in sp_c_list:
if sep:
rep_dict = {
"var%i" % t: "0".zfill(self.n_digits[t])
if t < self.num_vars - 1
else "0".zfill(self.n_digits[t]) + sep + c
for t in range(self.num_vars)
}
else:
rep_dict = {
"var%i" % t: "0".zfill(self.n_digits[t])
if t < self.num_vars - 1
else "0".zfill(self.n_digits[t]) + c
for t in range(self.num_vars)
}
yield self.template.format(**rep_dict)
| python |
from collections import OrderedDict, defaultdict
from comet_ml import Experiment
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import os
from utils import *
from parse import get_parser
import pickle
from sklearn.manifold import TSNE
from math import sin, cos, sqrt, atan2, radians
class Model:
def __init__(self, batchsize, nuser, nitem, duser, ditem, dcateg=0, nlayer=0, nhidden=50):
demb = np.sqrt(duser + ditem + max(len(userattrs), len(itemattrs)) * dcateg)
userbatch, itembatch = [], []
with tf.variable_scope('useritem'):
self.userembs = tf.get_variable(name='userembs', shape=(nuser, duser), dtype=tf.float32, trainable=True,
initializer=tf.random_normal_initializer(stddev=1 / np.sqrt(demb)))
self.itemembs = tf.get_variable(name='itemembs', shape=(nitem, ditem), dtype=tf.float32, trainable=True,
initializer=tf.random_normal_initializer(stddev=1 / np.sqrt(demb)))
self.userids = tf.placeholder(tf.int32, shape=(batchsize,), name='userids')
self.itemids = tf.placeholder(tf.int32, shape=(batchsize,), name='itemids')
if duser > 0:
userbatch.append(tf.gather(self.userembs, self.userids, name='userbatch'))
itembatch.append(tf.gather(self.itemembs, self.itemids, name='itembatch'))
with tf.variable_scope('categorical'):
self.categembs = {}
categrefs = {}
self.categs = {}
self.usercategrefs = {}
self.itemcategrefs = {}
self.usercategids = {}
self.itemcategids = {}
usercategbatch = []
itemcategbatch = []
allattrs = set(userattrs).union(set(itemattrs))
print(f'attributes that we will use as covariates {allattrs}')
for attr in allattrs:
normattr = normalize_name(attr)
with tf.variable_scope(normattr):
categs = set(users.get(attr, [])).union(set(items.get(attr, [])))
categs = list(set(normalize_name(categ) for categ in categs))
self.categs[normattr] = categs
print(f'embedding all categories from attribute {attr}, {len(categs)} categories found')
self.categembs[normattr] = tf.get_variable(name=f'categembs', shape=(len(categs), dcateg), dtype=tf.float32, trainable=True,
initializer=tf.random_normal_initializer(stddev=1 / np.sqrt(demb)))
self.usercategids[normattr] = tf.placeholder(tf.int32, shape=(batchsize,), name=f'usercategids')
self.itemcategids[normattr] = tf.placeholder(tf.int32, shape=(batchsize,), name=f'itemcategids')
usercategbatch.append(tf.gather(self.categembs[normattr], self.usercategids[normattr], name=f'usercategbatch'))
itemcategbatch.append(tf.gather(self.categembs[normattr], self.itemcategids[normattr], name=f'itemcategbatch'))
categrefs[normattr] = {categ: i for i, categ in enumerate(categs)}
self.usercategrefs[normattr] = {userid: categrefs[normattr][normalize_name(categ)] for userid, categ in enumerate(users[attr] if attr in users else [])}
self.itemcategrefs[normattr] = {itemid: categrefs[normattr][normalize_name(categ)] for itemid, categ in enumerate(items[attr] if attr in items else [])}
if dcateg > 0:
userbatch.append(tf.concat(usercategbatch, axis=1, name='userconcat'))
itembatch.append(tf.concat(itemcategbatch, axis=1, name='itemconcat'))
userbatch = tf.concat(userbatch, axis=1, name='userconcat')
itembatch = tf.concat(itembatch, axis=1, name='itemconcat')
with tf.variable_scope('forward'):
def forward(x, scope):
with tf.variable_scope(scope):
for layer in range(nlayer):
x = tf.layers.dense(x, nhidden, activation=None if layer == nlayer - 1 else tf.nn.relu, use_bias=True, name=f'fc{layer}')
return x
userbatch = forward(userbatch, 'usernet')
itembatch = forward(itembatch, 'itemnet')
self.userlogits = userbatch
self.itemlogits = itembatch
with tf.variable_scope('losses'):
self.predbatch = tf.reduce_sum(userbatch * itembatch, axis=1, name='preddist')
self.truebatch = tf.placeholder(dtype=tf.float32, shape=(batchsize), name='truedist')
self.loss = tf.reduce_sum((self.predbatch - self.truebatch) ** 2, name='loss')
self.l1mean = tf.reduce_mean(tf.abs(self.predbatch - self.truebatch))
self.lrnrate = tf.placeholder(tf.float32, shape=(), name='lrnrate')
self.trainop = tf.train.AdamOptimizer(learning_rate=self.lrnrate).minimize(self.loss)
def get_categids(self, userids, useritem='user'):
if useritem == 'user': categrefs = self.usercategrefs
else: categrefs = self.itemcategrefs
categids = defaultdict(list)
for attr in userattrs:
normattr = normalize_name(attr)
for userid in userids:
categids[normattr].append(categrefs[normattr][userid])
return categids
def make_feeddict(self, idsbatch, rtnorm):
userids, itemids = idsbatch[:, 0], idsbatch[:, 1]
usercategids = self.get_categids(userids, 'user')
itemcategids = self.get_categids(itemids, 'item')
truebatch = np.array([rtnorm[userid, itemid] for userid, itemid in idsbatch])
feeddict = {self.userids: userids, self.itemids: itemids, self.truebatch: truebatch,
**{self.usercategids[key]: val for key, val in usercategids.items()},
**{self.itemcategids[key]: val for key, val in itemcategids.items()}}
return feeddict
def get_truebatch(self, idsbatch, rtnorm):
truebatch = np.array([rtnorm[userid, itemid] for userid, itemid in idsbatch])
return truebatch
def valid(epoch, step):
losses, preds, trues = [], [], []
for i in range(0, len(validids) - args.batchsize + 1, args.batchsize):
idsbatch = validids[i: i + args.batchsize]
l1, predbatch = sess.run([model.l1mean, model.predbatch], model.make_feeddict(idsbatch, rtnorm))
losses.append(l1)
preds.extend(list(predbatch)[:20])
trues.extend(list(model.get_truebatch(idsbatch, rtnorm))[:20])
experiment.log_metric('l1V', l1, step=step)
trues, preds = np.array(trues), np.array(preds)
trues, preds = trues * std + mean, preds * std + mean
if epoch in [0, args.nepoch - 1]: plt.plot(trues, preds, '.r' if epoch == 0 else '.b', alpha=.3, markeredgewidth=0, label='untrained' if epoch == 0 else 'trained')
print(f'valid | epoch {epoch} | loss {np.mean(losses)}')
xlim = plt.gca().get_xlim()
plt.plot(xlim, xlim, '-g')
plt.xlabel('ground truth')
plt.ylabel('predicted')
plt.gca().axis('equal')
plt.title('log response time')
plt.legend()
plt.tight_layout()
experiment.log_figure(step=epoch)
def train(epoch, step):
for i in range(0, len(trainids) - args.batchsize + 1, args.batchsize):
feeddict = model.make_feeddict(trainids[i: i + args.batchsize], rtnorm)
feeddict.update({model.lrnrate: get_lrnrate(step, lrnrate=args.lrnrate)})
_, l1 = sess.run([model.trainop, model.l1mean], feeddict)
if not step % 10:
experiment.log_metric('l1', l1, step=step)
print(f'train | epoch {epoch} | step {step} | loss {l1}')
step += 1
return step
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
# data
userattrs = itemattrs = ['subnet', 'Country', 'AS']
rt, users, items, nuser, nitem = load_data()
users, items = extract_feats(users, items)
trainids, validids = extract_pair_ids(rt, nuser, nitem, splitratio=args.splitratio)
rtnorm, mean, std = inverse_standardized_log_latency(rt)
plt.hist(rtnorm.ravel(), 100)
plt.savefig('debug.png')
plt.close()
# model
model = Model(args.batchsize, nuser, nitem, args.duser, args.ditem, args.dcateg, args.nlayer, args.nhidden)
sess = tf.Session()
sess.run(tf.global_variables_initializer())
# writer = tf.summary.FileWriter('./', graph=sess.graph)
# begin training
experiment = Experiment(api_key="vPCPPZrcrUBitgoQkvzxdsh9k", project_name='wsdream', workspace='wronnyhuang', display_summary=False)
plt.figure(figsize=(5, 5))
step = 0
for epoch in range(args.nepoch):
valid(epoch, step)
step = train(epoch, step)
## embedding tsne visualizations
# country
categembs = sess.run(model.categembs)
with open('categembs.pkl', 'wb') as f: pickle.dump(categembs, f)
with open('categembs.pkl', 'rb') as f: categembs = pickle.load(f)
embs = categembs['country']
tsnes = TSNE(n_components=2).fit_transform(embs)
plt.figure(figsize=(8, 8))
plt.plot(*tsnes.T, '.')
for i, tsne in enumerate(tsnes):
plt.text(*tsne, ' ' + model.categs['country'][i], fontsize=8)
plt.gca().axis('equal')
plt.tight_layout()
print(experiment.log_figure(step=epoch))
# AS
embs = categembs['as'][:300]
tsnes = TSNE(n_components=2).fit_transform(embs)
plt.figure(figsize=(16, 16))
plt.plot(*tsnes.T, '.')
for i, tsne in enumerate(tsnes):
plt.text(*tsne, ' ' + model.categs['as'][i][3:23], fontsize=8)
plt.gca().axis('equal')
plt.tight_layout()
print(experiment.log_figure(step=epoch))
# subnet
embs = categembs['subnet']
tsnes = TSNE(n_components=2).fit_transform(embs)
plt.figure(figsize=(8, 8))
plt.plot(*tsnes.T, '.')
for i, tsne in enumerate(tsnes):
plt.text(*tsne, ' ' + model.categs['subnet'][i], fontsize=8)
plt.gca().axis('equal')
plt.tight_layout()
print(experiment.log_figure(step=epoch))
## correlation between latency and distance (hint: none)
def latlondist(lat1, lon1, lat2, lon2):
# approximate radius of earth in km
R = 6373.0 * 1e-3
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
lldists = []
latencies = []
for userid, itemid in trainids[:500]:
lat1, lon1 = users['Latitude'][userid], users['Longitude'][userid]
lat2, lon2 = items['Latitude'][itemid], items['Longitude'][itemid]
lldists.append(latlondist(lat1, lon1, lat2, lon2))
latencies.append(np.log10(rt[userid, itemid]))
plt.figure(figsize=(5, 5))
plt.plot(lldists, latencies, '.')
plt.title('relationship between physical distance and latency')
plt.xlabel('physical distance (km)')
plt.ylabel('log response time (s)')
print(experiment.log_figure())
print(f'time for light to circle the earth inside silica fiber: {40e3 / 3e8 * 1.5 * 1000} ms')
| python |
import rasiberryPiGPIOBaseController.RasiberryPiGPIO as RasiberryPiGPIO
import rasiberryPiGPIOBaseController.Pin as Pin
PI = RasiberryPiGPIO.RasiberryPiGPIO("3B+", "BCM") | python |
##############################################################################
#
# Copyright (c) 2014-2017, 2degrees Limited.
# All Rights Reserved.
#
# This file is part of hubspot-contacts
# <https://github.com/2degrees/hubspot-contacts>, which is subject to the
# provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
from voluptuous import Any
from voluptuous import Schema
from hubspot.contacts.properties import PROPERTY_TYPE_BY_NAME
PROPERTY_RESPONSE_SCHEMA_DEFINITION = {
'name': unicode,
'type': Any(*PROPERTY_TYPE_BY_NAME.keys()),
'options': [],
}
CREATE_PROPERTY_RESPONSE_SCHEMA = Schema(
PROPERTY_RESPONSE_SCHEMA_DEFINITION,
required=True,
extra=True,
)
_GET_ALL_PROPERTIES_RESPONSE_SCHEMA_DEFINITION = [
PROPERTY_RESPONSE_SCHEMA_DEFINITION,
]
GET_ALL_PROPERTIES_RESPONSE_SCHEMA = Schema(
_GET_ALL_PROPERTIES_RESPONSE_SCHEMA_DEFINITION,
required=True,
extra=True,
)
| python |
# Solution 1
for e, n in enumerate(name):
print(e, n)
print(n, surname[e])
print("-----")
| python |
import util
import converter
class lexical:
def __init__(self, code, keys, keywords):
self.code = code
self.list = self.code.split('\n')
self.keys = keys
self.keywords = keywords
self.inter()
def list_str(self):
self.code = ''
for c in self.list:
self.code += f"{c}\n"
def inter(self):
for key, value in self.keys.items():
for c in range(0, len(self.list)):
string, lists = util.replace(self.list[c], key, value, True)
self.list[c] = string
self.list_str()
self.list = self.code.split('\n')
self.indentation()
def indentation(self):
indent = 0
for c in range(0, len(self.list)):
if not self.list[c].strip() == '':
indent = len(self.list[c]) - len(self.list[c].lstrip())
break
if indent == 0:
return
else:
for c in range(0, len(self.list)):
self.list[c] = self.list[c][indent:]
self.list_str()
self.elseC()
def elseC(self):
e = converter.elsif(self.list)
self.list = e.code
self.list_str() | python |
from .models import *
from decorator import *
from .views import *
from django.shortcuts import render
from django.http import HttpResponse
@service
def get_goods_info(param): # 获取商品信息
interface_id = "2000"
goods_id = param.get('goods_id', None)
try:
goods = getGoodsByID(goods_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"goods": goods.toDict(),
}
return pack(interface_id, data=resp)
@service
def get_all_goods(param):
interface_id = "2003"
resp = {
"goods": [goods.toDict() for goods in getAllGoods()]
}
return pack(interface_id, data = resp)
@login
@service
def create_goods(param): # 添加商品(不捣乱的管理员)
interface_id = "2001"
name = param.get("name", None)
category_id = param.get("category_id", None)
picture_id = param.get("picture_id", None)
unit = param.get("unit", None)
price = param.get("price", None)
remain = param.get("remain", None)
if not remain:
remain = 0
try:
goods = createGoods(name, category_id, unit, price, remain)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"goods": goods.toDict()
}
if picture_id:
try:
picture = getPictureByID(goods_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"1", str(e))
picture.goods = goods
return pack(interface_id, data=resp)
@login
@service
def change_goods(param): # 修改商品
interface_id = "2002"
goods_id = param.get("goods_id", None)
key = param.get("key", None)
value = param.get("value", None)
try:
goods = setGoodsInfo(goods_id, key, value)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"goods": goods.toDict(),
}
return pack(interface_id, data=resp)
@login
@service
def delete_goods(param): # 删除商品
interface_id = "2004"
goods_id = param.get("goods_id", None)
try:
goods = deleteGoods(goods_id)
return pack(interface_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
@service
def get_category(param): # 获取所有分类
interface_id = "2010"
try:
resp = {
"category": getAllCategory()
}
return pack(interface_id, data=resp)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
@login
@service
def create_category(param): # 添加分类
interface_id = "2011"
name = param.get("name", None)
superior = param.get("superior_id", None)
try:
category = createCategory(name, superior)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"category": category.toDict(),
}
return pack(interface_id, data=resp)
@login
@service
def change_category(param): # 修改分类名称
interface_id = "2012"
category_id = param.get("category_id", None)
name = param.get("name", None)
try:
category = setCategoryName(category_id, name)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"category": category.toDict(),
}
return pack(interface_id, data=resp)
@login
@service
def delete_category(param): # 删除分类
interface_id = "2013"
category_id = param.get("category_id", None)
try:
deleteCategory(category_id)
return pack(interface_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
@service
def get_picture(param): # 获取商品图片
interface_id = "2020"
goods_id = param.get("goods_id", None)
try:
gp = getPictureByGoods(goods_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"picture": [gp1.toDict for gp1 in gp]
}
return pack(interface_id, data=resp)
@login
@service
def append_picture(param):
interface_id = "2021"
goods_id = param.get("goods_id", None)
picture_id = param.get("picture_id", None)
try:
goods = getGoodsByID(goods_id)
picture = getPictureByID(picture_id)
picture.goods = goods
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
resp = {
"picture": picture.toDict(),
}
return pack(interface_id, data=resp)
@login
@service
def delete_picture(param): # 删除商品图片
interface_id = "2022"
picture_id = param.get("picture_id", None)
try:
deletePicture(picture_id)
return pack(interface_id)
except RFSException as e:
return pack(interface_id, e.ret, e.msg)
except Exception as e:
return pack(interface_id, interface_id+"0", str(e))
| python |
#!/usr/bin/env python3
import argparse
import nnabla as nn
import nnabla.functions as F # it crashes without this
import numpy.random as R
import itertools as IT
from nn_circle import *
from nn_smt2 import *
from shared import *
parser = argparse.ArgumentParser(description='Generate ReLU neural network for unit circle classifier.')
parser.add_argument('-s', '--seed', type=int,
help='random seed for training phase')
parser.add_argument('-t', '--test-seed', type=int,
help='random seed for test phase')
parser.add_argument('-L', '--layers', type=int, default=1,
help='number of hidden layers of neural network')
parser.add_argument('-S', '--size', type=int, default=8,
help='size of each hidden layer of neural network')
parser.add_argument('-B', '--batch', type=int, default=BATCH_SIZE,
help='batch size')
parser.add_argument('--plot', action='store_true',
help='plot test results')
parser.add_argument('--save-tests', nargs='?', type=int, const=BATCH_SIZE,
help='save test data to smt2 file - can optionally specify number of tests to save')
parser.add_argument('--eps', type=float, default=1e-6,
help='epsilon for test data assertion in smt2 file')
parser.add_argument('--include', type=str,
help='file to include in smt2 output, before (check-sat)')
parser.add_argument('--std', action='store_true',
help='output standard smt2')
args = parser.parse_args()
args.seed = seed(args.seed)
x, t, y, loss, hs = setup_network(args.layers, args.size, batch_size=args.batch)
train_network(loss, x, t)
args.test_seed = seed(args.test_seed) # reseed for test data
pq, label = random_data(args.batch)
preds, loss = predict(pq, label, x, t, y, loss)
#for name, param in nn.get_parameters().items():
# print(name, param.shape, param.g.flat[:20])
eprint("Test loss:", loss.d)
smt2 = nnabla_to_smt2(y, {x: 'x', y: 'y'},
save_test = x if args.save_tests is not None else None,
test_batch = args.save_tests,
seed = args.seed,
test_seed = args.test_seed,
test_eps = args.eps,
include=args.include,
std=args.std)
print(smt2, end='')
if args.plot:
plot_classified(x.d, t.d.reshape(t.shape[0]), preds)
| python |
# usage: python setup.py pydexe
from pyd.support import setup, Extension, pydexe_sanity_check
import platform
pydexe_sanity_check()
projName = "object_"
setup(
name=projName,
version='1.0',
ext_modules=[
Extension("object_", ['object_.d'],
build_deimos=True,
d_lump=True,
d_unittest=True
),
],
)
| python |
"""
Tester Suite:
**Purpose**
This one checks glglob (replaces glglob_test.py)
"""
import unittest, numpy
# get glbase
import sys, os
sys.path.append(os.path.realpath("../../"))
import glbase3
glbase3.config.SILENT = True
glbase3.config.set_log_level(None)
class Test_glglob(unittest.TestCase):
def setUp(self):
# get some data;
self.data1 = glbase3.genelist(filename="test_data/testA.csv", format={'loc': 0, 'name':1, 'score': 2, 'skiplines': 0})
self.data2 = glbase3.genelist(filename="test_data/testB.csv", format={'loc': 0, 'name':1})
self.data3 = glbase3.genelist(filename="test_data/testC.csv", format={'loc': 0, 'name':1})
#self.data4 = glbase3.genelist(filename="test_data/ccat_list.region", format=glbase3.format_ccat_output)
print(self.data1)
self.g = glbase3.glglob(self.data1, self.data2, self.data3, type="peaklist")
def test_chip_seq_cluster_heatmap_error(self):
no_loc_gl = glbase3.genelist()
no_loc_gl.load_list([{'name': 'missing'}, {'name': 'a'}, {'name': 'loc'}, {'name': 'key'}])
self.assertRaises(glbase3.errors.AssertionError, self.g.chip_seq_cluster_heatmap, [self.data1, self.data2, self.data3], []) # Fails at a differnet stage, but passes the assertion
self.assertRaises(glbase3.errors.AssertionError, self.g.chip_seq_cluster_heatmap, [self.data1, self.data2, no_loc_gl], [])
self.assertRaises(glbase3.errors.AssertionError, self.g.chip_seq_cluster_heatmap, [self.data1, no_loc_gl, no_loc_gl], [])
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(Test_glglob)
unittest.TextTestRunner(verbosity=2).run(suite)
| python |
# coding=utf-8
"""
This module contains the tokenizer functions supported by py_entitymatching.
"""
import logging
import pandas as pd
import six
import py_stringmatching as sm
import py_entitymatching.utils.generic_helper as gh
logger = logging.getLogger(__name__)
# Initialize global tokenizers
_global_tokenizers = pd.DataFrame(
{'function_name': ['tok_qgram', 'tok_delim', 'tok_wspace'],
'short_name': ['qgm', 'dlm', 'wsp']})
def get_tokenizers_for_blocking(q=[2, 3], dlm_char=[' ']):
"""
This function returns the single argument tokenizers that can be used for
blocking purposes (typically in rule-based blocking).
Args:
q (list): The list of integers (i.e q value) for which the q-gram
tokenizer must be generated (defaults to [2, 3]).
dlm_char (list): The list of characters (i.e delimiter character) for
which the delimiter tokenizer must be generated (defaults to [` ']).
Returns:
A Python dictionary with tokenizer name as the key and tokenizer
function as the value.
Raises:
AssertionError: If both `q` and `dlm_char` are set to None.
Examples:
>>> import py_entitymatching as em
>>> block_t = em.get_tokenizers_for_blocking()
>>> block_t = em.get_tokenizers_for_blocking(q=[3], dlm_char=None)
>>> block_t = em.get_tokenizers_for_blocking(q=None, dlm_char=[' '])
"""
# Validate inputs
if q is None and dlm_char is None:
logger.error('Both q and dlm_char cannot be null')
raise AssertionError('Both q and dlm_char cannot be null')
else:
# Return single arg tokenizers for the given inputs.
return _get_single_arg_tokenizers(q, dlm_char)
def get_tokenizers_for_matching(q=[2, 3], dlm_char=[' ']):
"""
This function returns the single argument tokenizers that can be used for
matching purposes.
Args:
q (list): The list of integers (i.e q value) for which the q-gram
tokenizer must be generated (defaults to [2, 3]).
dlm_char (list): The list of characters (i.e delimiter character) for
which the delimiter tokenizer must be generated (defaults to [` ']).
Returns:
A Python dictionary with tokenizer name as the key and tokenizer
function as the value.
Raises:
AssertionError: If both `q` and `dlm_char` are set to None.
Examples:
>>> import py_entitymatching as em
>>> match_t = em.get_tokenizers_for_blocking()
>>> match_t = em.get_tokenizers_for_blocking(q=[3], dlm_char=None)
>>> match_t = em.get_tokenizers_for_blocking(q=None, dlm_char=[' '])
"""
if q is None and dlm_char is None:
logger.error('Both q and dlm_char cannot be null')
raise AssertionError('Both q and dlm_char cannot be null')
else:
# Return single arg tokenizers for the given inputs.
return _get_single_arg_tokenizers(q, dlm_char)
def _get_single_arg_tokenizers(q=[2, 3], dlm_char=[' ']):
"""
This function creates single argument tokenizers for the given input
parameters.
"""
# Validate the input parameters
if q is None and dlm_char is None:
logger.error('Both q and dlm_char cannot be null')
raise AssertionError('Both q and dlm_char cannot be null')
# Initialize the key (function names) and value dictionaries (tokenizer
# functions).
names = []
functions = []
if q is not None:
if not isinstance(q, list):
q = [q]
# Create a qgram function for the given list of q's
qgm_fn_list = [_make_tok_qgram(k) for k in q]
qgm_names = ['qgm_' + str(x) for x in q]
# Update the tokenizer name, function lists
names.extend(qgm_names)
functions.extend(qgm_fn_list)
names.append('wspace')
functions.append(tok_wspace)
names.append('alphabetic')
functions.append(tok_alphabetic)
names.append('alphanumeric')
functions.append(tok_alphanumeric)
if dlm_char is not None:
if not isinstance(dlm_char, list) and isinstance(dlm_char,
six.string_types):
dlm_char = [dlm_char]
# Create a delimiter function for the given list of q's
dlm_fn_list = [_make_tok_delim(k) for k in dlm_char]
# Update the tokenizer name, function lists
dlm_names = ['dlm_dc' + str(i) for i in range(len(dlm_char))]
names.extend(dlm_names)
functions.extend(dlm_fn_list)
if len(names) > 0 and len(functions) > 0:
return dict(zip(names, functions))
else:
logger.warning('Didnot create any tokenizers, returning empty dict.')
return dict()
def _make_tok_delim(d):
"""
This function returns a delimiter-based tokenizer with a fixed delimiter
"""
def tok_delim(s):
# check if the input is of type base string
if pd.isnull(s):
return s
# Remove non ascii characters. Note: This should be fixed in the
# next version.
#s = remove_non_ascii(s)
s = gh.convert_to_str_unicode(s)
# Initialize the tokenizer measure object
measure = sm.DelimiterTokenizer(delim_set=[d])
# Call the function that will tokenize the input string.
return measure.tokenize(s)
return tok_delim
# return a qgram-based tokenizer with a fixed q
def _make_tok_qgram(q):
"""
This function returns a qgran-based tokenizer with a fixed delimiter
"""
def tok_qgram(s):
# check if the input is of type base string
if pd.isnull(s):
return s
s = gh.convert_to_str_unicode(s)
measure = sm.QgramTokenizer(qval=q)
return measure.tokenize(s)
return tok_qgram
# q-gram tokenizer
def tok_qgram(input_string, q):
"""
This function splits the input string into a list of q-grams. Note that,
by default the input strings are padded and then tokenized.
Args:
input_string (string): Input string that should be tokenized.
q (int): q-val that should be used to tokenize the input string.
Returns:
A list of tokens, if the input string is not NaN,
else returns NaN.
Examples:
>>> import py_entitymatching as em
>>> em.tok_qgram('database', q=2)
['#d', 'da', 'at', 'ta', 'ab', 'ba', 'as', 'se', 'e$']
>>> em.tok_qgram('database', q=3)
['##d', '#da', 'dat', 'ata', 'tab', 'aba', 'bas', 'ase', 'se$', 'e$$']
>>> em.tok_qgram(None, q=2)
nan
"""
if pd.isnull(input_string):
return pd.np.NaN
input_string = gh.convert_to_str_unicode(input_string)
measure = sm.QgramTokenizer(qval=q)
return measure.tokenize(input_string)
def tok_delim(input_string, d):
"""
This function splits the input string into a list of tokens
(based on the delimiter).
Args:
input_string (string): Input string that should be tokenized.
d (string): Delimiter string.
Returns:
A list of tokens, if the input string is not NaN ,
else returns NaN.
Examples:
>>> import py_entitymatching as em
>>> em.tok_delim('data science', ' ')
['data', 'science']
>>> em.tok_delim('data$#$science', '$#$')
['data', 'science']
>>> em.tok_delim(None, ' ')
nan
"""
if pd.isnull(input_string):
return pd.np.NaN
input_string = gh.convert_to_str_unicode(input_string)
measure = sm.DelimiterTokenizer(delim_set=[d])
return measure.tokenize(input_string)
def tok_wspace(input_string):
"""
This function splits the input string into a list of tokens
(based on the white space).
Args:
input_string (string): Input string that should be tokenized.
Returns:
A list of tokens, if the input string is not NaN ,
else returns NaN.
Examples:
>>> import py_entitymatching as em
>>> em.tok_wspace('data science')
['data', 'science']
>>> em.tok_wspace('data science')
['data', 'science']
>>> em.tok_wspace(None)
nan
"""
if pd.isnull(input_string):
return pd.np.NaN
# input_string = remove_non_ascii(input_string)
input_string = gh.convert_to_str_unicode(input_string)
measure = sm.WhitespaceTokenizer()
return measure.tokenize(input_string)
def tok_alphabetic(input_string):
"""
This function returns a list of tokens that are maximal sequences of
consecutive alphabetical characters.
Args:
input_string (string): Input string that should be tokenized.
Returns:
A list of tokens, if the input string is not NaN ,
else returns NaN.
Examples:
>>> import py_entitymatching as em
>>> em.tok_alphabetic('data99science, data#integration.')
['data', 'science', 'data', 'integration']
>>> em.tok_alphabetic('99')
[]
>>> em.tok_alphabetic(None)
nan
"""
if pd.isnull(input_string):
return pd.np.NaN
measure = sm.AlphabeticTokenizer()
input_string = gh.convert_to_str_unicode(input_string)
return measure.tokenize(input_string)
def tok_alphanumeric(input_string):
"""
This function returns a list of tokens that are maximal sequences of
consecutive alphanumeric characters.
Args:
input_string (string): Input string that should be tokenized.
Returns:
A list of tokens, if the input string is not NaN ,
else returns NaN.
Examples:
>>> import py_entitymatching as em
>>> em.tok_alphanumeric('data9,(science), data9#.(integration).88')
['data9', 'science', 'data9', 'integration', '88']
>>> em.tok_alphanumeric('#.$')
[]
>>> em.tok_alphanumeric(None)
nan
"""
if pd.isnull(input_string):
return pd.np.NaN
input_string = gh.convert_to_str_unicode(input_string)
measure = sm.AlphanumericTokenizer()
return measure.tokenize(input_string)
| python |
from hu import ObjectDict
def test_old_import():
"Verify that a backwards-compatible import still works."
from hu.object_dict import ObjectDict as OD
assert OD is ObjectDict
| python |
#!/usr/bin/env python
# coding=utf-8
# Created Time: 2017-03-17 14:59:15
# Modified Time: 2017-03-17 14:59:18
| python |
#!/usr/bin/python3
# *****************************************************************************
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# ******************************************************************************
import argparse
import os
import sys
from datalab.actions_lib import *
from datalab.fab import *
from datalab.notebook_lib import *
parser = argparse.ArgumentParser()
parser.add_argument('--hostname', type=str, default='')
parser.add_argument('--keyfile', type=str, default='')
parser.add_argument('--region', type=str, default='')
parser.add_argument('--os_user', type=str, default='')
parser.add_argument('--datalab_path', type=str, default='')
parser.add_argument('--keycloak_auth_server_url', type=str, default='')
parser.add_argument('--keycloak_realm_name', type=str, default='')
parser.add_argument('--keycloak_client_id', type=str, default='')
parser.add_argument('--keycloak_client_secret', type=str, default='')
parser.add_argument('--edge_instance_private_ip', type=str, default='')
parser.add_argument('--edge_instance_public_ip', type=str, default='')
parser.add_argument('--superset_name', type=str, default='')
parser.add_argument('--ip_address', type=str, default='')
args = parser.parse_args()
gitlab_certfile = os.environ['conf_gitlab_certfile']
##############
# Run script #
##############
if __name__ == "__main__":
print("Configure connections")
global conn
conn = datalab.fab.init_datalab_connection(args.hostname, args.os_user, args.keyfile)
# PREPARE DISK
print("Prepare .ensure directory")
try:
if not exists(conn,'/home/' + args.os_user + '/.ensure_dir'):
conn.sudo('mkdir /home/' + args.os_user + '/.ensure_dir')
except:
sys.exit(1)
#print("Mount additional volume")
#prepare_disk(args.os_user)
# INSTALL DOCKER COMPOSE
print("Installing docker compose")
if not ensure_docker_compose(args.os_user):
sys.exit(1)
# INSTALL UNGIT
print("Install nodejs")
install_nodejs(args.os_user)
print("Install ungit")
install_ungit(args.os_user, args.superset_name, args.edge_instance_private_ip)
if exists(conn, '/home/{0}/{1}'.format(args.os_user, gitlab_certfile)):
install_gitlab_cert(args.os_user, gitlab_certfile)
# INSTALL INACTIVITY CHECKER
print("Install inactivity checker")
install_inactivity_checker(args.os_user, args.ip_address)
# PREPARE SUPERSET
try:
configure_superset(args.os_user, args.keycloak_auth_server_url, args.keycloak_realm_name,
args.keycloak_client_id, args.keycloak_client_secret, args.edge_instance_private_ip, args.edge_instance_public_ip, args.superset_name)
except:
sys.exit(1)
conn.close()
| python |
"""Implements the main DSE loop in spark."""
import maxbuild
import argparse
import itertools
import json
import os
import pprint
import re
import shutil
import subprocess
import sys
import pandas as pd
from tabulate import tabulate
from html import HTML
from bs4 import BeautifulSoup
from os import listdir
from os.path import isfile, join
from scipy import io, sparse
from subprocess import call
from termcolor import colored
import utils
PRJ = 'Spmv'
TARGET_DFE_MOCK = 'dfe_mock'
TARGET_DFE = 'dfe'
TARGET_SIM = 'sim'
BENCHMARK_NONE = 'none'
BENCHMARK_BEST = 'best'
BENCHMARK_ALL_TO_ALL = 'all'
REP_CSV = 'csv'
REP_HTML = 'html'
DIR_PATH_RESULTS = 'results'
DIR_PATH_LOG = 'logs'
DIR_PATH_RUNS = 'runs'
DSE_LOG_FILE = 'dse_run.log'
PATH_TO_CASK_FILE = os.path.dirname(os.path.abspath(__file__))
PATH_TO_ROOT = os.path.abspath(os.path.join(PATH_TO_CASK_FILE, '../../'))
WORKING_DIR = os.getcwd()
BUILD_DIR = os.path.join(PATH_TO_ROOT, 'build')
SOURCE_DIR = os.path.join(PATH_TO_ROOT, 'src')
OUTPUT_DIR = WORKING_DIR
pd.options.display.float_format = '{:.2f}'.format
def build_path(path=''):
print 'Build dir -->', BUILD_DIR
return os.path.join(BUILD_DIR, path)
def src_path(path=''):
return os.path.join(SOURCE_DIR, path)
def output_path(path=''):
return os.path.join(OUTPUT_DIR, path)
def preProcessBenchmark(benchDirPath):
entries = []
for f in os.listdir(benchDirPath):
info = io.mminfo(os.path.join(benchDirPath, f))
if info[0] == info[1]:
info = list(info[1:])
info.append(info[1] / info[0])
info.insert(0, f.replace(r'.mtx', ''))
info[1] = int(info[1])
info[2] = int(info[2])
entries.append(info)
return sorted(entries, key=lambda x : x[-1], reverse=True)
def print_from_iterator(lines_iterator, logfile=None):
output = ''
if logfile:
with open(logfile, 'w') as log:
for line in lines_iterator:
log.write(line)
log.flush()
output += line
else:
for line in lines_iterator:
print line
output += line
return output
def runDse(benchFile, paramsFile, target, skipExecution=False):
dseFile = "dse_out.json"
if not skipExecution:
utils.execute([build_path('main'), benchFile, paramsFile], DSE_LOG_FILE)
else:
print ' --> Skip DSE run, load results from', dseFile
params = []
prjs = []
architectures = []
with open(dseFile) as f:
data = json.load(f)
for arch in data['best_architectures']:
ps = arch['architecture_params']
est_impl_ps = arch['estimated_impl_params']
matrix = arch['matrices'][0]
params.append(ps)
# XXX Should check for identical architectures before assigning new ID
prj_id = len(prjs)
architectures.append(
[ os.path.basename(matrix).replace('.mtx', ''),
prj_id,
int(ps['cache_size']), int(ps['input_width']),
int(ps['num_pipes']), int(ps['num_controllers']),
int(ps['max_rows']),
# The model uses BRAM36, the McTools use BRAM18
int(est_impl_ps['BRAMs']),
int(est_impl_ps['LUTs']),
int(est_impl_ps['FFs']),
int(est_impl_ps['DSPs']),
float(est_impl_ps['memory_bandwidth']),
float(arch['estimated_gflops']), ])
prjs.append(maxbuild.PrjConfig(ps, target, PRJ, prj_id, src_path('spmv/build/')))
return prjs, architectures
def buildClient(target):
print ' >> Building Client ----'
utils.execute(['make', '-C', build_path(), 'test_spmv_' + target])
def runClient(benchmark, target, prj=None):
print ' ---- Benchmarking Client ----'
for p in benchmark:
cmd = []
if target == TARGET_DFE:
cmd = ['bash', src_path('frontend/spark_dfe_run.sh'), p]
elif target == TARGET_SIM:
cmd = ['bash', src_path('frontend/simrunner'), build_path('test_spmv_sim'), p]
elif target == TARGET_DFE_MOCK:
cmd = ['bash', src_path('frontend/mockrunner'), build_path('test_spmv_dfe_mock'), p]
outF = 'runs/run_' + target + '_'
if prj:
cmd.append(str(prj.prj_id))
outF += prj.buildName()
else:
outF += 'benchmark_best'
outF += '_' + os.path.basename(p)
print ' -->', p, 'outFile =', outF
try:
out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
print ' ',e
out = e.output
mode = 'w'
if prj:
if os.path.exists(outF):
os.remove(outF)
mode = 'a'
with open(outF, mode) as f:
for line in out:
f.write(line)
class Spark:
def __init__(self, target, prjs, cppCompiler='g++'):
self.target = target
self.prjs =prjs
self.cppCompiler = cppCompiler
def runLibraryBuild(self, prjs, libName):
print ' >> Building Library'
interfaceFile = 'GeneratedImplementations.cpp'
deviceO = 'SmpvDeviceInterface.o'
maxfileO = 'maxfile.o'
prj_includes = []
obj_files = []
if self.target != TARGET_DFE_MOCK:
for p in prjs:
objFile = p.name + '.o'
utils.execute(
['sliccompile', p.maxFileLocation(), objFile],
logfile=p.logFile())
prj_includes.append('-I' + p.resultsDir())
obj_files.append(objFile)
cmd =[
self.cppCompiler,
'-c',
'-Wall',
'-std=c++11',
'-fPIC',
'-I' + src_path('runtime'),
]
# TODO move these checks in an earlier phase
mcdir = os.getenv('MAXCOMPILERDIR')
maxosdir = os.getenv('MAXELEROSDIR')
if mcdir and maxosdir and self.target != TARGET_DFE_MOCK:
cmd.extend([
'-I' + mcdir + '/include',
'-I' + mcdir + '/include/slic',
'-I' + maxosdir + '/include'])
cmd.extend(prj_includes)
cmd.extend([
interfaceFile,
'-o',
deviceO
])
out = subprocess.check_output(cmd)
cmd =[
self.cppCompiler,
'-fPIC',
'--std=c++11',
'-shared',
'-Wl,-soname,{0}.0'.format(libName),
'-o',
libName]
cmd.extend(obj_files + [deviceO])
if mcdir and maxosdir and self.target != TARGET_DFE_MOCK:
cmd.extend([
'-L' + os.path.join(mcdir, 'lib'),
'-L' + os.path.join(maxosdir, 'lib'),
'-lmaxeleros',
'-lslic',])
cmd.extend(['-lm', '-lpthread'])
utils.execute(cmd, 'lib_build.log')
# copy the generated library
libDir = 'lib-generated'
if not os.path.exists(libDir):
os.makedirs(libDir)
shutil.copy(libName, libDir + '/{}.0'.format(libName))
shutil.copy(libName, libDir)
def generateImplementationHeader(self, prjs):
genFilePath = output_path('GeneratedImplementations.cpp')
with open(genFilePath, 'w') as f:
# Include maxfile headers
if self.target != TARGET_DFE_MOCK:
for p in prjs:
f.write('#include <{0}.h>\n'.format(p.name))
# Defines struct formats
f.write('#include "{0}"\n'.format('GeneratedImplSupport.hpp'))
f.write('using namespace cask::runtime;\n')
f.write("""
cask::runtime::SpmvImplementationLoader::SpmvImplementationLoader() {
""")
for i in range(len(prjs)):
p = prjs[i]
f.write('this->impls.push_back(')
runFunction = p.name
writeFunction = p.name + '_dramWrite'
readFunction = p.name + '_dramRead'
dramReductionEnabled = p.name + '_dramReductionEnabled'
if self.target == TARGET_DFE_MOCK:
runFunction = 'cask::runtime::spmvRunMock'
writeFunction = 'cask::runtime::spmvWriteMock'
readFunction = 'cask::runtime::spmvReadMock'
dramReductionEnabled = 'false'
f.write(
'new GeneratedSpmvImplementation({0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}));'.format(
p.prj_id,
runFunction,
writeFunction,
readFunction,
p.getParam('max_rows'),
p.getParam('num_pipes'),
p.getParam('cache_size'),
p.getParam('input_width'),
dramReductionEnabled,
p.getParam('num_controllers')))
f.write('\n}')
def runBuilds(self):
print ' >> Building Hardware Implementations'
if self.target != TARGET_DFE_MOCK:
b = maxbuild.MaxBuildRunner(poolSize=6)
b.runBuilds(self.prjs)
# library generation is sequential
self.generateImplementationHeader(self.prjs)
self.runLibraryBuild(self.prjs, 'libSpmv_' + self.target + '.so')
# buildClient(self.target)
def runBenchmark(self, benchmark, benchmark_mode):
if benchmark_mode == BENCHMARK_NONE:
return
if benchmark_mode == BENCHMARK_ALL_TO_ALL:
for p in self.prjs:
runClient(benchmark, self.target, p)
else:
runClient(benchmark, self.target)
def logTexTable(entries, fpath):
rows = []
float_prec = '.3f'
# find maximum length
length = 0
for e in itertools.chain.from_iterable(entries):
l = len(str(e))
if type(e) is float:
l = len(('{0:' + float_prec + '}').format(e))
length = max(length, l)
fmt = '{0:' + str(length) + '}'
float_fmt = '{0:' + str(length) + float_prec + '}'
for entry in entries:
row = fmt.format(entry[0])
for field in entry[1:]:
f = fmt
if type(field) is float:
f = float_fmt
row += ' &' + f.format(field)
rows.append(row)
table = '\\begin{{tabular}}{{{0}}} \n{1}\n\end{{tabular}}'.format(
'l' * len(entries[0]),
' \\\\\n'.join(rows) + r' \\' )
with open(fpath, 'w') as f:
f.write(table)
def logDseResults(benchmark_df, arch_df):
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
df = pd.merge(benchmark_df, arch_df, left_on='Matrix', right_on='Matrix')
write_result('dse_matrix_arch.tex', df.to_latex())
write_result('dse_matrix_arch.html', df.to_html())
return df
def postProcessResults(prjs, benchmark, benchmark_df, arch_df, arch_build_df, dirpath):
utils.info('Post-processing results')
# need to reconstruct a (matrix, architecture) relation from run files;
# this relation also stores execution results (e.g. bwidth, gflops)
df = pd.DataFrame([], columns=['Id', 'Matrix', 'GFLOPs'])
for p in os.listdir(dirpath):
with open(os.path.join(dirpath, p)) as f:
matrix = None
archId = None
gflops = None
for l in f:
m = re.match(r'Config ArchitectureId (\d*).*', l)
if m:
matrix = int(m.group(1))
m = re.match(r'Param MatrixPath ([\w/-]*)', l)
if m:
archId = os.path.basename(m.group(1))
m = re.match(r'Result Simple Gflops \(actual\)=(.*),', l)
if m:
gflops = float(m.group(1))
if gflops and matrix and archId is not None:
new_df = pd.DataFrame([[matrix, archId, gflops]], columns=['Id', 'Matrix', 'GFLOPs'])
df = df.append(new_df, ignore_index=True)
break
# build a table compare est and measured results
df1 = pd.merge(benchmark_df, df, left_on='Matrix', right_on='Matrix')
df2 = pd.merge(df1, arch_df, left_on='Id', right_on='Id')
df2 = pd.merge(df2, arch_build_df, left_on='Id', right_on='Id')
# keep only some interesting columns and reorderd them
df2 = df2[['Matrix_x', 'Order', 'Nonzeros', 'Nnz/row', 'Cx', 'k', 'Np', 'Cb', 'Logic %', 'DSP %', 'BRAM %', 'BWidth', 'GFLOPs_x', 'GFLOPs_y']]
write_result('matrix_arch_before_after.tex', df2.to_latex(index=False))
print arch_build_df
print df2
def check_make_dir(dirname):
if not os.path.exists(dirname):
os.makedirs(dirname)
def make_clean_dir(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.makedirs(dirname)
def write_result(fname, data):
with open(os.path.join(DIR_PATH_RESULTS, fname), 'w') as f:
f.write(data)
def build_html():
matrices = []
check_make_dir('matrices_html')
for root, dirs, files in os.walk('matrices'):
h = HTML()
matrix = os.path.basename(root)
if not dirs:
print root, dirs, files
h.p('Matrix: ' + matrix)
sparsity_plot = None
for f in files:
if not f.endswith('.png'):
with open(os.path.join(root, f)) as fin:
h.p(fin.read(), style='white-space: pre-wrap;')
else:
p = h.p()
p.img(src=matrix + '.png')
sparsity_plot = os.path.join(root, f)
path = 'matrices_html/' + matrix + '.html'
with open(path, 'w') as fout:
matrices.append(matrix + '.html')
fout.write(str(h))
shutil.copyfile(sparsity_plot, 'matrices_html/' + matrix + '.png')
with open('matrices_html/index.html', 'w') as fout:
h = HTML()
h.p('matrices: ')
l = h.ol
for m in matrices:
l.li.a(m, href=m)
fout.write(str(h))
def main():
parser = argparse.ArgumentParser(description='Run Spark DSE flow')
parser.add_argument('-d', '--dse', action='store_true', default=False)
parser.add_argument('-ds', '--dse-skip', action='store_true', default=False)
parser.add_argument('-t', '--target', choices=[TARGET_DFE, TARGET_SIM, TARGET_DFE_MOCK], required=True)
parser.add_argument('-p', '--param-file', required=True)
parser.add_argument('-b', '--benchmark-dir', required=True)
parser.add_argument('-st', '--build_start', type=int, default=None)
parser.add_argument('-en', '--build_end', type=int, default=None)
parser.add_argument('-bmst', '--benchmark_start', type=int, default=None)
parser.add_argument('-bmen', '--benchmark_end', type=int, default=None)
parser.add_argument('-cpp', '--cpp_compiler', default='g++')
parser.add_argument('-bm', '--benchmarking-mode',
choices=[BENCHMARK_BEST, BENCHMARK_ALL_TO_ALL, BENCHMARK_NONE],
default=BENCHMARK_NONE)
parser.add_argument('-rb', '--run-builds', default=False, action='store_true')
parser.add_argument('-rep', '--reporting',
choices=[REP_CSV, REP_HTML],
default=REP_CSV)
args = parser.parse_args()
buildName = PRJ + '_' + args.target
prjs = []
## Prepare some directories
check_make_dir('results')
check_make_dir('logs')
if args.benchmarking_mode != BENCHMARK_NONE:
make_clean_dir('runs')
## Run DSE pass
prjs = []
benchmark_df = pd.DataFrame(
preProcessBenchmark(args.benchmark_dir),
columns = ['Matrix', 'Order', 'Nonzeros', 'Format', 'Type', 'Pattern', 'Nnz/row'])
if args.dse:
utils.info('Running DSE flow')
# the DSE tool produces a JSON file with architectures to be built
prjs, log_archs = runDse(args.benchmark_dir, args.param_file, args.target, args.dse_skip)
else:
# load default parameters values from param_file
with open(args.param_file) as f:
data = json.load(f)
ps = {}
for k, v in data['dse_params'].iteritems():
ps[k] = str(v['default'])
# XXX prj_id is not defined at this point, how do we create a project with the default values?
params = [maxbuild.PrjConfig(ps, args.target, PRJ, prj_id, '../spmv/build/')]
arch_df = pd.DataFrame(log_archs,
columns = ['Matrix', 'Id', 'Cx', 'k', 'Np', 'Nc', 'Cb', 'BRAMs', 'LUTs', 'FFs', 'DSPs', 'BWidth', 'GFLOPs'])
merged_df = logDseResults(benchmark_df, arch_df)
print merged_df
p = os.path.abspath(args.benchmark_dir)
benchmark = [ join(p, f) for f in listdir(p) if isfile(join(p,f)) ]
if args.benchmark_start != None and args.benchmark_end != None:
benchmark = benchmark[args.benchmark_start:args.benchmark_end]
ps = prjs
if args.build_start != None and args.build_end != None:
ps = prjs[args.build_start:args.build_end]
spark = Spark(args.target, ps, args.cpp_compiler)
if args.run_builds:
utils.info('Running builds')
spark.runBuilds()
if args.target == TARGET_DFE:
prj_info = []
header = ['Id', 'Logic', 'Logic %', 'DSP', 'DSP %', 'BRAM', 'BRAM %']
for p in ps:
resUsage = p.getBuildResourceUsage()
logic = resUsage['Logic utilization']
dsps = resUsage['DSP blocks']
# XXX: block memory type depends on the device
# brams = resUsage['Block memory (BRAM18)']
brams = resUsage['Block memory (M20K)']
prj_info.append([
p.prj_id,
logic[0], logic[0] / float(logic[1]) * 100,
dsps[0], dsps[0] / float(dsps[1]) * 100,
brams[0], brams[0] / float(brams[1]) * 100
])
arch_build_df = pd.DataFrame(prj_info, columns = header)
if args.benchmarking_mode != BENCHMARK_NONE:
utils.info('Running benchmark')
spark.runBenchmark(benchmark, args.benchmarking_mode)
# Post-process results
if args.target == TARGET_DFE:
postProcessResults(ps, benchmark,
benchmark_df, arch_df, arch_build_df,
DIR_PATH_RUNS)
# Reporting
if args.reporting == REP_HTML:
utils.info('Generating HTML reports')
for p in benchmark:
out, out_err = utils.execute(['python', src_path('frontend/sparsegrind.py'),
'-f', 'mm', '-a', 'summary', p], silent=False)
outputDir = os.path.join('matrices', os.path.basename(p).replace('.mtx', ''))
summaryFile = os.path.join(outputDir, 'summary.csv')
check_make_dir(outputDir)
with open(summaryFile, 'w') as f:
f.write(out)
utils.execute(['python', src_path('frontend/sparsegrind.py'),
'-f', 'mm', '-a', 'plot', p], silent=False)
shutil.copy('sparsity.png', outputDir)
build_html()
# TODO also need to add hardware / simulation results to report
# matrix_sim_run=${matrix_dir}/sim_run.csv
# cd scripts && bash simrunner ../build/test_spmv_sim ../${f} >> ../${matrix_sim_run} && cd ..
bs = BeautifulSoup(merged_df.to_html(), 'html.parser')
for row in bs.findAll('tr'):
cols = row.findAll('td')
if cols:
matrixName = cols[0].string
new_tag = bs.new_tag('a', href='matrices/' + matrixName + '.html')
new_tag.string = matrixName
cols[0].string = ''
cols[0].append(new_tag)
with open('matrices_html/matrix_index.html', 'w') as f:
f.write(str(bs))
if __name__ == '__main__':
main()
| python |
n,m = map(int,input().split())
l = list(map(int,input().split()))
l=sorted(l)
j = n-1
i=0
ans=0
while i <= j:
if l[i] + l[j] > m:
j-=1
else:
i+=1
j-=1
ans+=1
print(ans) | python |
from django.contrib.auth.models import User
from django.contrib.gis.db import models
from social_django.models import UserSocialAuth
from social_django.utils import load_strategy
from stravalib.client import Client as StravaClient
from homebytwo.importers.exceptions import StravaMissingCredentials
class Athlete(models.Model):
# Extend default user model
user = models.OneToOneField(User, on_delete=models.CASCADE)
# has the initial import of all Strava activities already taken place?
activities_imported = models.BooleanField(default=False)
def __str__(self):
return str(self.user.username)
@property
def strava_client(self):
"""
the Strava API client instantiated with the athlete's
authorization token. Note that it only generates a hit to the Strava
API if the authorization token is expired.
"""
# retrieve the access token from the user with social auth
try:
social = self.user.social_auth.get(provider="strava")
except UserSocialAuth.DoesNotExist:
raise StravaMissingCredentials
strava_access_token = social.get_access_token(load_strategy())
# return the Strava client
return StravaClient(access_token=strava_access_token)
@property
def strava_id(self):
return self.user.social_auth.get(provider="strava").uid
"""
A snippet to create an athlete profile the first time it is accessed.
https://www.djangorocks.com/snippets/automatically-create-a-django-profile.html
"""
User.athlete = property(lambda u: Athlete.objects.get_or_create(user=u)[0])
| python |
# -*- coding: utf-8; -*-
from django.utils.translation import ugettext_lazy as _
from django.utils import timezone
from django.utils.http import urlquote
from django.db import models
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from django.db.models import Sum
from django.db.models.signals import post_init, post_save, pre_save
from django.core.exceptions import ValidationError
from django.conf import settings
from django.contrib.sites.models import Site
from hado.managers import HackDoUserManager
from dateutil.relativedelta import relativedelta
from utils import send_email
import urllib
import hashlib
import datetime
import calendar
import os
def get_image_path(instance, filename):
now = datetime.datetime.now()
newfilename = hashlib.md5(now.strftime("%I%M%S") + filename).hexdigest()\
+ os.path.splitext(filename)[1]
return 'user_avatars/%s/%s' % (instance.username, newfilename)
DISPATCH_UID_PREFIX = settings.DISPATCH_UID_PREFIX
EMAIL_SUBJECT_PREFIX = settings.EMAIL_SUBJECT_PREFIX
USER_TYPES = (
('MEM', 'Member'),
('SPO', 'Sponsor'),
('DON', 'Donation'),
)
CONTRACT_STATUSES = (
('ACT', 'Active'),
('LAP', 'Lapsed'),
('TER', 'Terminated'),
('PEN', 'Pending')
)
PAYMENT_METHODS = (
('EFT', 'Electronic Fund Transfer'),
('CHK', 'Cheque'),
('CSH', 'Cash'),
('OTH', 'Others')
)
PAYMENT_STATUSES = (
('VFD', 'Verified'),
('RJD', 'Rejected'),
('PEN', 'Pending')
)
TRANSACTION_TYPE = (
('DPT', 'Deposit'),
('WTD', 'Withdrawal'),
)
class HackDoUser(AbstractBaseUser, PermissionsMixin):
"""
Custom User model, extending Django's AbstractBaseUser
"""
# Django User required attribute
username = models.CharField(
_('username'),
max_length=40,
unique=True,
db_index=True,
help_text=_('primary index for user'),
)
email = models.EmailField(
_('email'),
max_length=255,
db_index=True,
help_text=_('email linked with user'),
)
first_name = models.CharField(
_('first name'),
max_length=30,
blank=True,
help_text=_('user first name'),
)
last_name = models.CharField(
_('last name'),
max_length=30,
blank=True,
help_text=_('user last name'),
)
date_joined = models.DateTimeField(
_('date joined'),
default=timezone.now,
help_text=_('user joined time'),
)
is_staff = models.BooleanField(
_('staff status'), default=False,
help_text=_('Designates whether the user \
can log into django admin site.')
)
is_active = models.BooleanField(
_('active'), default=False,
help_text=_('Desingates whether the user \
is a verified hackerspacesg member.')
)
# HackDo User required attribute
profile_image = models.ImageField(
_('profile image'),
upload_to=get_image_path,
blank=True,
help_text=_('user profile image'),
)
is_gravatar_enabled = models.BooleanField(
_('gravatar_enabled'), default=True,
help_text=_('Desingates whether the user \
uses gravatar as profile image.')
)
utype = models.CharField(
_('member type'),
max_length=3,
choices=USER_TYPES,
default='MEM',
help_text=_('user member type'),
)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
objects = HackDoUserManager()
# Django User required method
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"""
Returns the username
"""
return self.get_username()
def get_absolute_url(self):
"""
Returns the user default url -- /users/username
"""
return "/users/%s/" % urlquote(self.get_username())
def __unicode__(self):
"""
Returns the user full name if any, else returns username
"""
if self.first_name and self.last_name:
return self.get_full_name()
return self.username
# HackDo method
@property
def user_avatar_url(self, size=20):
"""
Returns user avatar url
"""
default = "http://%s/static/img/default_avatar.png" % (
Site.objects.get_current().domain
)
if self.is_gravatar_enabled:
return "http://www.gravatar.com/avatar/%s?%s" % (
hashlib.md5(self.email.lower()).hexdigest(),
urllib.urlencode({'d': 'mm', 's': str(size)})
)
else:
if self.profile_image:
return self.profile_image.url
return default
@property
def most_recent_payment(self):
"""
Returns most recent payment if any
"""
p = self.payments_made.all().order_by('-date_paid')
return p[0] if p else None
def total_paid(self, ptype=None):
"""
Returns the total amount the User has paid either in total,
or for a specified Contract type
"""
# Construct the appropriate Queryset
if ptype is not None:
payments = self.payments_made.filter(contract__ctype__desc=ptype)
else:
payments = self.payments_made
return payments.aggregate(Sum('amount'))['amount__sum'] or 0.0
def membership_status(self, pretty=False):
"""
Returns string (see Contract::CONTRACT_STATUSES)
indicating latest Membership status of this User
"""
try:
if not hasattr(self, '__latest_membership'):
lm = self.contracts.filter(ctype__desc='Membership')\
.exclude(status='PEN').latest('start')
self.__latest_membership = lm
return self.__latest_membership.get_status_display() \
if pretty else self.__latest_membership.status
except Contract.DoesNotExist:
self.__latest_membership = None
return None
def member_since(self):
"""
Returns datetime object representing
start date of earliest Membership Contract if found, None otherwise
"""
try:
if not hasattr(self, '__member_since'):
ms = self.contracts.filter(ctype__desc='Membership')\
.order_by('start')[0:1]
if len(ms) > 0:
self.__member_since = ms[0].start
else:
self.__member_since = None
return self.__member_since
except Contract.DoesNotExist:
return None
class Meta:
verbose_name = _('user')
verbose_name_plural = _('users')
class ContractType(models.Model):
"""
Stores an contract type:
1. Membership
2. Locker
3. Registered Address
"""
desc = models.CharField(
_('description'),
max_length=128,
blank=False,
null=True,
help_text=_('contract type description')
)
def __unicode__(self):
"""
Returns contract type description
"""
return self.desc
class Tier(models.Model):
"""
Stores an tier related to :model:`hado.ContractType`
1. Trial
2. Youth
3. Regular
4. Hotdesk
5. Resident
"""
fee = models.FloatField(
_('tier fee'),
default=0.0,
help_text=_('tier fee'),
)
desc = models.CharField(
_('description'),
max_length=255,
help_text=_('tier description'),
)
ctype = models.ForeignKey(
ContractType,
blank=False,
null=True,
help_text=_('linked contract type'),
)
def __unicode__(self):
"""
Returns tier description
"""
return self.desc
class MembershipReview(models.Model):
"""
Stores an membership review request for model:`hado.HackDoUser`
"""
applicant = models.ForeignKey(
HackDoUser,
related_name=_('applicant'),
help_text=_('Membership applicant'),
)
referrer = models.ForeignKey(
HackDoUser,
related_name=_('referrer'),
help_text=_('Membership referrer'),
)
reviewed = models.BooleanField(
default=False,
blank=False,
help_text=_('Referrer reviewed?')
)
def __unicode__(self):
"""
Returns applicant and referrer
"""
return '%s requests Hackerspace membership with %s as referrer.' % (
self.applicant.username, self.referrer.username,)
class BankLog(models.Model):
"""
Stores a bank transaction log related to :model:`hado.Contract`
"""
date = models.DateField(
help_text=_('transaction log date'),
)
desc = models.CharField(
max_length=255,
help_text=_('transaction log description'),
)
currency = models.CharField(
max_length=5,
help_text=_('currency code'),
)
amount = models.FloatField(
help_text=_('locker number')
)
t_type = models.CharField(
_('transaction type'),
max_length=3,
choices=TRANSACTION_TYPE,
help_text=_('transaction type: \
1. Deposit 2. Withdrawal'),
)
def __unicode__(self):
"""
Returns date and description
"""
return 'Bank log on %s for %s.' % (
self.date, self.desc,)
class Meta:
unique_together = ("date", "desc")
class Contract(models.Model):
"""
Stores an contract related to :model:`hado.ContractType`, \
:model:`hado.HackDoUser` and :model: `hado.Tier`
"""
start = models.DateField(
help_text=_('contract starting time'),
)
end = models.DateField(
blank=True, null=True,
help_text=_('contract ending time'),
)
valid_till = models.DateField(
editable=False,
help_text=_('contract valid until time'),
)
ctype = models.ForeignKey(
ContractType,
blank=False,
null=True,
verbose_name=_('Contract type'),
help_text=_('Locker and Address Use Contracts must use \
their respective Tiers.\
Membership contracts can accept all other Tiers'),
)
tier = models.ForeignKey(
Tier, blank=False, null=True,
help_text=_('Linked tier'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=False,
null=True,
related_name=_('contracts'),
)
status = models.CharField(
max_length=3, choices=CONTRACT_STATUSES,
help_text=_('contract status: \
1. Active 2. Lapsed \
3. Terminated 4.Pending'),
)
desc = models.CharField(
max_length=1024,
blank=True,
help_text=_('Enter company name if Contract is for Address Use.\
May use for general remarks for other Contract types')
)
def __extend_by(self, num_months):
"""
Extends the validity of this Contract by specified number of months.\
THIS METHOD DOES NOT save() AUTOMATICALLY
"""
# We subtract one day, such that if we start on the first of a month,
# eg. datetime.date(2011, 02, 01), extending the validity
# by 5 months, won't give us an end date of datetime.date(2011, 07, 01)
# [which is wrong], but datetime.date(2011, 06, 30) [which is right]
delta = {
'months': num_months,
'days': -1
}
self.valid_till = self.valid_till + relativedelta(**delta)
# Normalise date to end of that month
self.valid_till = datetime.date(self.valid_till.year,
self.valid_till.month,
calendar.monthrange(
self.valid_till.year,
self.valid_till.month)[1])
def __month_diff(self, end, start):
"""
Returns the months (inclusive of part thereof) between two dates
"""
r = relativedelta(end + relativedelta(days=+1), start)
return r.months + \
(r.years * 12 if r.years else 0) + (1 if r.days else 0)
@property
def total_paid(self):
"""
Returns total amount paid due to this :model:`hado.Contract`
"""
return self.payments.aggregate(Sum('amount'))['amount__sum'] or 0.0
def sync(self):
"""
Looks at the total amount paid to this :model:`hado.Contract` \
and recalculates its proper expiry (end) date, taking a month's \
deposit into account
"""
# Reset the clock
self.valid_till = self.start
months_paid = self.total_paid / self.tier.fee
if months_paid > 0:
self.__extend_by(int(months_paid))
self.save()
def balance(self, in_months=False):
"""
Looks at how much has been paid for this :model:`hado.Contract` \
and determines if there is any balance owed by (-ve) / \
owed to (+ve) the Member
"""
balance = 0
duration_in_months = 0
# Calculate number of months Contract has been in effect,
# ie. not Terminated
if self.status == 'TER':
duration_in_months += self.__month_diff(self.end, self.start)
else:
duration_in_months += self.__month_diff(datetime.date.today(),
self.start)
balance = self.total_paid - (self.tier.fee * duration_in_months)
if in_months:
return balance / self.tier.fee
else:
return balance
def update_with_payment(self, p):
"""
Takes a :model:`hado.Payment`, \
calculates how many month's worth it is, \
and extends the contract end date accordingly
"""
if isinstance(p, Payment):
# Get number of multiples of Contract for this Payment
multiples = int(p.amount / self.tier.fee)
self.__extend_by(multiples)
self.save()
# sync() the Contract if this is the first Payment
# being made on this Contract
if self.payments.count() == 1:
self.sync()
else:
return False
def save(self, *args, **kwargs):
"""
Overridden save() forces the date of self.end \
to be the last day of that given month. \
Eg. if self.end is initially declared as 5 May 2010, \
we now force it to become 31 May 2010 \
before actually save()'ing the object.
"""
# But first, is self.end even specified?
if not self.valid_till:
self.valid_till = self.start
today = datetime.date.today()
last_day = calendar.monthrange(self.valid_till.year,
self.valid_till.month)[1]
self.valid_till = datetime.date(self.valid_till.year,
self.valid_till.month, last_day)
# Force start date to be normalised as 1st day of the month
if self.start.day != 1:
self.start = datetime.date(self.start.year, self.start.month, 1)
# If we notice the Contract is now Terminated,
# and the end date has not been set, set the end date
if self.status == 'TER' and self.end is None:
self.end = datetime.date(today.year,
today.month,
calendar.monthrange(today.year,
today.month)[1])
# If the model has been saved already,
# ie. has an id, force it to update
# otherwise, insert a new record
if self.id:
kwargs['force_update'] = True
kwargs['force_insert'] = False
else:
kwargs['force_insert'] = True
kwargs['force_update'] = False
if self.status == 'PEN':
return super(Contract, self).save(*args, **kwargs)
if self.valid_till > today:
self.status = u'ACT'
super(Contract, self).save(*args, **kwargs)
def clean(self):
"""
Model validation to ensure that \
validates that :model:`hado.ContractType` \
and :model:`hado.Tier` are allowed
"""
if self.ctype != self.tier.ctype:
raise ValidationError(_("Contract type and tier mismatched"))
def __unicode__(self):
"""
Returns :model:`hado.Tier` desc, :model:`hado.ContractType` desc \
start time and valid time
"""
return "%s %s | %s to %s" % (self.tier,
self.ctype,
self.start.strftime('%b %Y'),
self.valid_till.strftime('%b %Y'))
class Payment(models.Model):
"""
Stores a payment related to :model:`hado.Contract` \
and :model:`hado.HackDoUser`
"""
date_paid = models.DateField(
_('date of payment'),
help_text=_('date of payment'),
)
amount = models.FloatField(
default=0.0,
help_text=_('payment amount'),
)
method = models.CharField(
max_length=3,
choices=PAYMENT_METHODS,
default='EFT',
help_text=_('payment method: \
1. Electronic Fund Transfer 2. Cheque \
3. Cash 4. Others'),
)
contract = models.ForeignKey(
Contract,
blank=False,
null=True,
related_name=_('payments'),
)
desc = models.CharField(
max_length=255,
blank=True,
help_text=_('Eg. Cheque or transaction number,\
if applicable'),
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=False,
null=True,
related_name=_('payments_made'),
)
verified = models.CharField(
max_length=3,
choices=PAYMENT_STATUSES,
default='PEN',
help_text=_('payment status: \
1. Verified 2. Rejected 3. Pending'),
)
bank_log = models.OneToOneField(
BankLog,
blank=True,
null=True,
help_text=_('linked bank log')
)
def __unicode__(self):
"""
Returns :model:`hado.HackDoUser`, :model:`hado.Tier` desc, \
:model:`hado.ContractType` desc, amount and date of payment \
"""
return u"%s | %s %s | %s, %s" % (self.user,
self.contract.tier,
self.contract.ctype,
self.amount,
self.date_paid.strftime('%d %b %Y'))
class Locker(models.Model):
"""
Stores a locker related to :model:`hado.HackDoUser`
"""
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
blank=False,
null=True,
related_name=_('locker')
)
num = models.IntegerField(
help_text=_('locker number')
)
# Attaching a post_save signal handler to the Payment model
# to update the appropriate Contract
def update_contract_with_payments(sender, **kwargs):
payment = kwargs['instance']
c = payment.contract
c.update_with_payment(payment)
post_save.connect(
update_contract_with_payments,
sender=Payment,
dispatch_uid="%s.update_contract_with_payments"
% DISPATCH_UID_PREFIX)
# Attaching a pre_save signal handler to the Payment model
# to send out notification email when payment status changed
def send_payment_status_change_notification(sender, **kwargs):
new = kwargs['instance']
if not new.id:
return
old = Payment.objects.get(id=new.id)
if old.verified == "PEN" and (new.verified in ["VFD", "RJD"]):
if new.verified == "VFD":
status = "Verified"
elif new.verified == "RJD":
status = "Rejected"
else:
status = "Pending"
fields = {
"prefix": EMAIL_SUBJECT_PREFIX,
"user": old.user,
"date": old.date_paid,
"amount": old.amount,
"status": status,
}
send_email(
'email/payments/payment-notification-subject.txt',
'email/payments/payment-notification.txt',
'email/payments/payment-notification.html',
fields,
[old.user.email])
pre_save.connect(
send_payment_status_change_notification,
sender=Payment,
dispatch_uid="%s.send_payment_status_change_notification"
% DISPATCH_UID_PREFIX)
def lapsed_check(sender, **kwargs):
'''
Checks the end date of active contract and compares it with today.
If contract is lapsed, update the contract status to lapsed.
'''
contract = kwargs['instance']
# If this is a new Contract, check if we have a valid_till date set
if not contract.id and not contract.valid_till:
contract.valid_till = contract.start
if contract.status == u'ACT':
if contract.valid_till < datetime.date.today():
contract.status = u'LAP'
contract.save()
elif contract.status == u'LAP' and \
contract.valid_till > datetime.date.today():
contract.status = u'ACT'
contract.save()
post_init.connect(
lapsed_check,
sender=Contract,
dispatch_uid="%s.lapsed_check"
% DISPATCH_UID_PREFIX)
| python |
import os
import sys
import argparse
from cuttsum.event import read_events_xml
from cuttsum.nuggets import read_nuggets_tsv
from cuttsum.util import gen_dates
import cuttsum.wtmf
import streamcorpus as sc
from sklearn.metrics.pairwise import cosine_similarity
from collections import defaultdict
import numpy as np
def main():
event_file, rc_dir, event_title, nuggets_tsv, ss_params, ofile = parse_args()
ss_model, ss_vocab, ss_dims = ss_params
event = load_event(event_title, event_file)
nuggets = read_nuggets_tsv(nuggets_tsv, filter_query_id=event.query_id)
hours = [dth for dth in gen_dates(event.start, event.end)]
print u"Found", len(nuggets), u"nuggets."
print u"Loading sentence-sim model..."
wmat_model = cuttsum.wtmf.load_model(ss_model, ss_vocab, latent_dims=ss_dims)
nugget_lvecs = wmat_model.factor_unicode([n.text for n in nuggets])
meta_data = []
unicodes = []
print u"Loading sentence data..."
nhours = len(hours)
for h, hour in enumerate(hours, 1):
chunk = os.path.join(rc_dir, u'{}.sc.gz'.format(hour))
for si_idx, si in enumerate(sc.Chunk(path=chunk)):
if u'article-clf' not in si.body.sentences:
continue
sent_idx_map = {}
for idx, sent in enumerate(si.body.sentences[u'serif']):
sent_idx_map[sentence_uni(sent)] = idx
for sent in si.body.sentences[u'article-clf']:
uni = sentence_uni(sent)
meta_data.append((hour, si.stream_id, sent_idx_map[uni]))
unicodes.append(uni)
print u"Computing similarities..."
sent_lvecs = wmat_model.factor_unicode(unicodes)
S = cosine_similarity(sent_lvecs, nugget_lvecs)
S = np.ma.masked_array(S, np.isnan(S))
Szmuv = (S - S.mean(axis=0)) / S.std(axis=0)
M = np.amax(Szmuv, axis=1)
m = np.amin(Szmuv, axis=1)
U = np.mean(Szmuv, axis=1)
T = np.sum(Szmuv, axis=1)
### WRITE TSV HEADER AND DATA ###
print u"Writing to", ofile
header = 'date-hour\tstream-id\tsent-id\tmax-sim\tmin-sim' + \
'\tmean-sim\ttotal-sim'
for i in range(ss_dims):
header += '\tlv{}'.format(i)
with open(ofile, 'w') as f:
f.write(header)
f.write('\n')
for idx, meta_datum in enumerate(meta_data):
f.write('{}\t{}\t{}\t{}\t{}\t{}\t{}'.format(meta_datum[0], meta_datum[1],
meta_datum[2], M[idx], m[idx], U[idx]))
for c in range(ss_dims):
f.write('\t{}'.format(sent_lvecs[idx,c]))
f.write('\n')
f.flush()
def sentence_uni(sent):
return u' '.join(token.token.decode(u'utf-8') for token in sent.tokens)
def get_active_nuggets(hour, nuggets, lvecs):
act_nugs = []
idx = 0
for nugget in nuggets:
if nugget.timestamp.strftime("%Y-%m-%d-%H") <= hour:
idx += 1
else:
break
if idx > 0:
return lvecs[0:idx,:]
else:
return None
def load_event(event_title, event_xml):
events = read_events_xml(event_xml)
for event in events:
if event_title == event.title:
return event
raise ValueError(("No event title matches \"{}\" " \
+ "in file: {}").format(event_title, event_xml))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--event-file',
help=u'Event xml file.',
type=unicode, required=True)
parser.add_argument('-r', '--rel-chunks-dir',
help=u'Relevance Chunks dir',
type=str, required=True)
parser.add_argument('-n', '--nuggets-tsv',
help=u'Nuggets tsv file',
type=str, required=True)
parser.add_argument('-t', '--event-title',
help=u'Event title',
type=unicode, required=True)
parser.add_argument('-s', '--sent-sim-model',
help=u'Location of sentence sim model',
type=unicode, required=True)
parser.add_argument('-v', '--sent-sim-vocab',
help=u'Location of sentence sim vocab',
type=unicode, required=True)
parser.add_argument('-d', '--sent-sim-dims',
help=u'Sentence-sim model dimensions',
type=int, required=True)
parser.add_argument('-o', '--output-file',
help=u'Location to write sims',
type=unicode, required=True)
args = parser.parse_args()
event_file = args.event_file
rc_dir = args.rel_chunks_dir
event_title = args.event_title
nuggets_tsv = args.nuggets_tsv
ss_model = args.sent_sim_model
ss_vocab = args.sent_sim_vocab
dims = args.sent_sim_dims
ofile = args.output_file
odir = os.path.dirname(ofile)
if odir != u'' and not os.path.exists(odir):
os.makedirs(odir)
if not os.path.exists(event_file) or os.path.isdir(event_file):
sys.stderr.write((u'--event-file argument {} either does not exist' \
+ u' or is a directory!\n').format(event_file))
sys.stderr.flush()
sys.exit()
if not os.path.exists(rc_dir) or not os.path.isdir(rc_dir):
sys.stderr.write((u'--rel-chunks-dir argument {} either does not' \
+ u' exist or is not a directory!\n').format(rc_dir))
sys.stderr.flush()
sys.exit()
if not os.path.exists(nuggets_tsv) or os.path.isdir(nuggets_tsv):
sys.stderr.write((u'--nuggets-tsv argument {} either does not' \
+ u' exist or is a directory!\n').format(
nuggets_tsv))
sys.stderr.flush()
sys.exit()
if not os.path.exists(ss_model) or os.path.isdir(ss_model):
sys.stderr.write((u'--sent-sim-model argument {} either does not' \
+ u' exist or is a directory!\n').format(
ss_model))
sys.stderr.flush()
sys.exit()
if not os.path.exists(ss_vocab) or os.path.isdir(ss_vocab):
sys.stderr.write((u'--sent-sim-vocab argument {} either does not' \
+ u' exist or is a directory!\n').format(
ss_vocab))
sys.stderr.flush()
sys.exit()
return (event_file, rc_dir, event_title, nuggets_tsv,
(ss_model, ss_vocab, dims), ofile)
if __name__ == '__main__':
main()
| python |
# Generated by Django 3.1.7 on 2021-06-01 15:58
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Saved_Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spoonacular_id', models.IntegerField(null=True)),
('title', models.CharField(max_length=100)),
('image', models.URLField()),
('source_name', models.CharField(max_length=100, null=True)),
('source_url', models.URLField(null=True)),
('servings', models.IntegerField(null=True)),
('ready_in_minutes', models.IntegerField(null=True)),
('summary', models.CharField(max_length=5000, null=True)),
('favorite', models.BooleanField()),
('edited', models.BooleanField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'saved_recipe',
'verbose_name_plural': 'saved_recipes',
},
),
migrations.CreateModel(
name='Meal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spoonacular_id', models.IntegerField(null=True)),
('saved_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cookit_api.saved_recipe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'meal',
'verbose_name_plural': 'meals',
},
),
migrations.CreateModel(
name='Instruction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spoonacular_id', models.IntegerField(null=True)),
('step_number', models.IntegerField()),
('instruction', models.CharField(max_length=100, null=True)),
('saved_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cookit_api.saved_recipe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'instruction',
'verbose_name_plural': 'instructions',
},
),
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spoonacular_id', models.IntegerField(null=True)),
('spoon_ingredient_id', models.IntegerField(null=True)),
('amount', models.FloatField()),
('unit', models.CharField(max_length=100, null=True)),
('name', models.CharField(max_length=100, null=True)),
('original', models.CharField(max_length=100, null=True)),
('aisle', models.CharField(max_length=100, null=True)),
('aquired', models.BooleanField()),
('saved_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cookit_api.saved_recipe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'ingredient',
'verbose_name_plural': 'ingredients',
},
),
migrations.CreateModel(
name='Equipment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('spoonacular_id', models.IntegerField(null=True)),
('name', models.CharField(max_length=50)),
('saved_recipe', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='cookit_api.saved_recipe')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| python |
import logging
from cached_property import cached_property
from pymobiledevice3.exceptions import PyMobileDevice3Exception
from pymobiledevice3.restore.img4 import stitch_component
from pymobiledevice3.restore.tss import TSSResponse
class Component:
def __init__(self, build_identity, name: str, tss: TSSResponse = None, data: bytes = None, path: str = None):
self.logger = logging.getLogger(__name__)
self._tss = tss
self.build_identity = build_identity
self.name = name
self._data = data
self._path = path
@cached_property
def path(self):
if self._path:
return self._path
path = None
if self._tss:
path = self._tss.get_path_by_entry(self.name)
if path is None:
self.logger.debug(f'NOTE: No path for component {self.name} in TSS, will fetch from build_identity')
if path is None:
path = self.build_identity.get_component_path(self.name)
if path is None:
raise PyMobileDevice3Exception(f'Failed to find component path for: {self.name}')
return path
@cached_property
def data(self):
if self._data is None:
return self.build_identity.build_manifest.ipsw.read(self.path)
return self._data
@cached_property
def personalized_data(self):
if self._tss is None:
raise PyMobileDevice3Exception(f'TSS ticket must be supplied for personalizing component: {self.name}')
# stitch ApImg4Ticket into IMG4 file
return stitch_component(self.name, self.data, self._tss.ap_img4_ticket)
| python |
from setuptools import setup, find_packages
from vrpcd import __version__, __author__
# Package info
PACKAGE_NAME = "tabu_vrpcd"
SHORT_DESCRIPTION = ('Tabu Search Algorithm for solving Vehicle Routing'
'Problem with Cross-Docking')
PACKAGES_ROOT = '.'
PACKAGES = find_packages(PACKAGES_ROOT)
# Package meta
CLASSIFIERS = []
# Package requirements
INSTALL_REQUIRES = ['networkx']
EXTRAS_REQUIRES = {}
TESTS_REQUIRES = []
setup(
name=PACKAGE_NAME,
version=__version__,
author=__author__,
author_email='[email protected]',
licence='Apache v2',
description=SHORT_DESCRIPTION,
classifiers=CLASSIFIERS,
packages=PACKAGES,
package_dir={'': PACKAGES_ROOT},
include_package_data=True,
zip_safe=False,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRES,
tests_require=TESTS_REQUIRES,
)
| python |
import os
import pytest
from app import create_app, db
@pytest.fixture
def app() -> None:
os.environ['APP_SETTINGS'] = 'app.configs.TestingConfig'
app = create_app()
with app.app_context():
# TODO: create test database with geographic modules
db.create_all()
yield app
with app.app_context():
db.session.close()
db.drop_all()
@pytest.fixture
def client(app) -> None:
return app.test_client()
| python |
from itertools import combinations
# Define is_in_triangle()
def is_in_triangle(G, n):
"""
Checks whether a node `n` in graph `G` is in a triangle relationship or not.
Returns a boolean.
"""
in_triangle = False
# Iterate over all possible triangle relationship combinations
for n1, n2 in combinations(G.neighbors(n), 2):
# Check if an edge exists between n1 and n2
if G.has_edge(n1,n2):
in_triangle = True
break
return in_triangle
| python |
import threading
import unittest
import requests
from confident_metrics import record_event
from confident_metrics.metrics import ConfidentCounter, PreciseFloat
class MetricReader:
def __init__(self, port, addr="localhost"):
self.__is_running = False
self.__port = port
self.__addr = addr
self.__metrics = {}
self.__data = {}
@property
def port(self) -> int:
return self.__port
@property
def addr(self) -> str:
return self.__addr
@property
def metrics(self) -> dict:
return self.__metrics
@property
def data(self) -> dict:
return self.__data
def query_data(self, addr: str = None, port: int = None) -> str:
addr = self.addr if addr is None else addr
port = self.port if port is None else port
api_endpoint = "http://{}:{}".format(addr, port)
r = requests.get(url=api_endpoint)
if r.status_code == 200:
data = r.content.decode()
return data
raise ValueError(
"\nGot status code {} when querying the server."
" Reponse content: {}\n".format(r.status_code, r.content.decode()),
)
@staticmethod
def parse_response(data: str):
lines = data.split("\n")
def is_metric_line(line: str):
return not (line.startswith("#") or line.startswith("python") or line == "")
def parse_line(line):
try:
name, val = line.split(" ")
except ValueError:
return line, None
try:
val = float(val)
except ValueError:
pass
return name, val
raw_metrics = [l for l in lines if is_metric_line(l)]
metric_values = [parse_line(l) for l in raw_metrics]
metrics = {name: val for name, val in metric_values}
return metrics
def parse_data(self, addr: str = None, port: int = None):
decoded_response = self.query_data(addr=addr, port=port)
self.__data = self.parse_response(decoded_response)
self.__metrics = {
name: val for name, val in self.__data.items() if not name.startswith("process_")
}
def query_metrics(self, name: str):
return {k: v for k, v in self.metrics.items() if name in k}
def dummy_server():
from confident_metrics.metrics import _prometheus_server as server
if server is None:
try:
record_event("start_server_hack", 8000)
except OSError as e:
raise e
from confident_metrics.metrics import _prometheus_server as server
assert server is not None
return server
class TestConfidentCounter(unittest.TestCase):
def test_kahan_algorithm(self):
metric = ConfidentCounter("test_data_kahan", "running counters")
# why this number? https://en.wikipedia.org/wiki/Double-precision_floating-point_format
origin = brute_sum = 4503599627370496 # 4_503_599_627_370_496
metric += origin
val = 0.001
for _ in range(1000):
brute_sum += val
metric += val
metric_val = metric.collect()[0].samples[1].value
self.assertEqual(metric_val, origin + 1.)
self.assertNotEqual(brute_sum, origin + 1)
def test_get(self):
metric = ConfidentCounter("test_get_counter", "running counters")
metric += 10
self.assertEqual(metric._count.get(), 1)
self.assertEqual(metric._sum.get(), 10)
self.assertEqual(metric._sum_of_squares.get(), 100)
def test_set(self):
metric = ConfidentCounter("test_set_counter", "running counters")
metric._count.set(1)
metric._sum.set(10)
metric._sum_of_squares.set(100)
self.assertEqual(metric._count.get(), 1)
self.assertEqual(metric._sum.get(), 10)
self.assertEqual(metric._sum_of_squares.get(), 100)
def test_multithread(self):
x = PreciseFloat()
threads = []
def bump():
nonlocal x
for _ in range(1000):
x += 1
for _ in range(100):
t = threading.Thread(target=bump)
t.start()
threads.append(t)
for i in range(100):
threads[i].join()
self.assertEqual(x.get(), 100 * 1000)
class TestPrometheusServer(unittest.TestCase):
def setUp(self) -> None:
self.reader = MetricReader(8000)
self.server = dummy_server()
def test_attributes(self):
self.assertIsInstance(self.server.metrics, dict)
self.assertIsInstance(self.server.host, str)
self.assertIsInstance(self.server.port, int)
def test_filter_metric_name(self):
valid_name = "miau.gdb"
filtered = self.server._adjust_metric_name(name=valid_name)
self.assertEqual(filtered, "miau:gdb")
with self.assertRaises(ValueError):
invalid_name = "!AM!?wilto%."
self.server._adjust_metric_name(name=invalid_name)
# match = self.server._valid_name_regex.match(invalid_name)
# self.assertEqual(filtered, match)
def test_submit_rolling_stats(self):
name = "test_rolling_stats"
val = 4
self.server.submit_event(key=name, value=val)
val = 6
self.server.submit_event(key=name, value=val)
self.reader.parse_data()
self.assertTrue("{}_sum".format(name) in list(self.reader.metrics.keys()))
self.assertTrue("{}_count".format(name) in list(self.reader.metrics.keys()))
self.assertTrue(self.reader.metrics["{}_count".format(name)] == 2)
self.assertTrue(self.reader.metrics["{}_sum".format(name)] == 10)
self.assertTrue(self.reader.metrics["{}_sum_of_squares".format(name)] == 52)
class TestSubmitEvent(unittest.TestCase):
def setUp(self) -> None:
self.server = dummy_server()
self.reader = MetricReader(8000)
def test_send_new_scalar(self):
name = "a_float"
record_event(name, 3.1)
self.reader.parse_data()
self.assertTrue(self.reader.metrics["{}_sum".format(name)] == 3.1)
record_event(name, 5.1)
self.reader.parse_data()
self.assertTrue(self.reader.metrics["{}_sum".format(name)] == 8.2)
| python |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2019 Palo Alto Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = """
---
module: panos_http_profile
short_description: Manage http server profiles.
description:
- Manages http server profiles.
author: "Garfield Lee Freeman (@shinmog)"
version_added: '1.0.0'
requirements:
- pan-python
- pandevice >= 0.11.1
- PAN-OS >= 8.0
notes:
- Panorama is supported.
- Check mode is supported.
extends_documentation_fragment:
- paloaltonetworks.panos.fragments.transitional_provider
- paloaltonetworks.panos.fragments.vsys_shared
- paloaltonetworks.panos.fragments.device_group
- paloaltonetworks.panos.fragments.state
options:
name:
description:
- Name of the profile.
type: str
required: true
tag_registration:
description:
- The server should have user-ID agent running in order for tag
registration to work.
type: bool
config_name:
description:
- Name for custom config format.
type: str
config_uri_format:
description:
- URI format for custom config format.
type: str
config_payload:
description:
- Payload for custom config format.
type: str
system_name:
description:
- Name for custom config format.
type: str
system_uri_format:
description:
- URI format for custom config format.
type: str
system_payload:
description:
- Payload for custom config format.
type: str
threat_name:
description:
- Name for custom config format.
type: str
threat_uri_format:
description:
- URI format for custom config format.
type: str
threat_payload:
description:
- Payload for custom config format.
type: str
traffic_name:
description:
- Name for custom config format.
type: str
traffic_uri_format:
description:
- URI format for custom config format.
type: str
traffic_payload:
description:
- Payload for custom config format.
type: str
hip_match_name:
description:
- Name for custom config format.
type: str
hip_match_uri_format:
description:
- URI format for custom config format.
type: str
hip_match_payload:
description:
- Payload for custom config format.
type: str
url_name:
description:
- Name for custom config format.
type: str
url_uri_format:
description:
- URI format for custom config format.
type: str
url_payload:
description:
- Payload for custom config format.
type: str
data_name:
description:
- Name for custom config format.
type: str
data_uri_format:
description:
- URI format for custom config format.
type: str
data_payload:
description:
- Payload for custom config format.
type: str
wildfire_name:
description:
- Name for custom config format.
type: str
wildfire_uri_format:
description:
- URI format for custom config format.
type: str
wildfire_payload:
description:
- Payload for custom config format.
type: str
tunnel_name:
description:
- Name for custom config format.
type: str
tunnel_uri_format:
description:
- URI format for custom config format.
type: str
tunnel_payload:
description:
- Payload for custom config format.
type: str
user_id_name:
description:
- Name for custom config format.
type: str
user_id_uri_format:
description:
- URI format for custom config format.
type: str
user_id_payload:
description:
- Payload for custom config format.
type: str
gtp_name:
description:
- Name for custom config format.
type: str
gtp_uri_format:
description:
- URI format for custom config format.
type: str
gtp_payload:
description:
- Payload for custom config format.
type: str
auth_name:
description:
- Name for custom config format.
type: str
auth_uri_format:
description:
- URI format for custom config format.
type: str
auth_payload:
description:
- Payload for custom config format.
type: str
sctp_name:
description:
- PAN-OS 8.1+.
- Name for custom config format.
type: str
sctp_uri_format:
description:
- PAN-OS 8.1+.
- URI format for custom config format.
type: str
sctp_payload:
description:
- PAN-OS 8.1+.
- Payload for custom config format.
type: str
iptag_name:
description:
- PAN-OS 9.0+.
- Name for custom config format.
type: str
iptag_uri_format:
description:
- PAN-OS 9.0+.
- URI format for custom config format.
type: str
iptag_payload:
description:
- PAN-OS 9.0+.
- Payload for custom config format.
type: str
"""
EXAMPLES = """
# Create a profile
- name: Create http profile
panos_http_profile:
provider: '{{ provider }}'
name: 'my-profile'
tag_registration: true
"""
RETURN = """
# Default return values
"""
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.paloaltonetworks.panos.plugins.module_utils.panos import (
get_connection,
)
try:
from panos.device import HttpServerProfile
from panos.errors import PanDeviceError
except ImportError:
try:
from pandevice.device import HttpServerProfile
from pandevice.errors import PanDeviceError
except ImportError:
pass
def main():
helper = get_connection(
vsys_shared=True,
device_group=True,
with_state=True,
with_classic_provider_spec=True,
min_pandevice_version=(0, 11, 1),
min_panos_version=(8, 0, 0),
argument_spec=dict(
name=dict(required=True),
tag_registration=dict(type="bool"),
config_name=dict(),
config_uri_format=dict(),
config_payload=dict(),
system_name=dict(),
system_uri_format=dict(),
system_payload=dict(),
threat_name=dict(),
threat_uri_format=dict(),
threat_payload=dict(),
traffic_name=dict(),
traffic_uri_format=dict(),
traffic_payload=dict(),
hip_match_name=dict(),
hip_match_uri_format=dict(),
hip_match_payload=dict(),
url_name=dict(),
url_uri_format=dict(),
url_payload=dict(),
data_name=dict(),
data_uri_format=dict(),
data_payload=dict(),
wildfire_name=dict(),
wildfire_uri_format=dict(),
wildfire_payload=dict(),
tunnel_name=dict(),
tunnel_uri_format=dict(),
tunnel_payload=dict(),
user_id_name=dict(),
user_id_uri_format=dict(),
user_id_payload=dict(),
gtp_name=dict(),
gtp_uri_format=dict(),
gtp_payload=dict(),
auth_name=dict(),
auth_uri_format=dict(),
auth_payload=dict(),
sctp_name=dict(),
sctp_uri_format=dict(),
sctp_payload=dict(),
iptag_name=dict(),
iptag_uri_format=dict(),
iptag_payload=dict(),
),
)
module = AnsibleModule(
argument_spec=helper.argument_spec,
supports_check_mode=True,
required_one_of=helper.required_one_of,
)
# Verify imports, build pandevice object tree.
parent = helper.get_pandevice_parent(module)
try:
listing = HttpServerProfile.refreshall(parent)
except PanDeviceError as e:
module.fail_json(msg="Failed refresh: {0}".format(e))
spec = {
"name": module.params["name"],
"tag_registration": module.params["tag_registration"],
"config_name": module.params["config_name"],
"config_uri_format": module.params["config_uri_format"],
"config_payload": module.params["config_payload"],
"system_name": module.params["system_name"],
"system_uri_format": module.params["system_uri_format"],
"system_payload": module.params["system_payload"],
"threat_name": module.params["threat_name"],
"threat_uri_format": module.params["threat_uri_format"],
"threat_payload": module.params["threat_payload"],
"traffic_name": module.params["traffic_name"],
"traffic_uri_format": module.params["traffic_uri_format"],
"traffic_payload": module.params["traffic_payload"],
"hip_match_name": module.params["hip_match_name"],
"hip_match_uri_format": module.params["hip_match_uri_format"],
"hip_match_payload": module.params["hip_match_payload"],
"url_name": module.params["url_name"],
"url_uri_format": module.params["url_uri_format"],
"url_payload": module.params["url_payload"],
"data_name": module.params["data_name"],
"data_uri_format": module.params["data_uri_format"],
"data_payload": module.params["data_payload"],
"wildfire_name": module.params["wildfire_name"],
"wildfire_uri_format": module.params["wildfire_uri_format"],
"wildfire_payload": module.params["wildfire_payload"],
"tunnel_name": module.params["tunnel_name"],
"tunnel_uri_format": module.params["tunnel_uri_format"],
"tunnel_payload": module.params["tunnel_payload"],
"user_id_name": module.params["user_id_name"],
"user_id_uri_format": module.params["user_id_uri_format"],
"user_id_payload": module.params["user_id_payload"],
"gtp_name": module.params["gtp_name"],
"gtp_uri_format": module.params["gtp_uri_format"],
"gtp_payload": module.params["gtp_payload"],
"auth_name": module.params["auth_name"],
"auth_uri_format": module.params["auth_uri_format"],
"auth_payload": module.params["auth_payload"],
"sctp_name": module.params["sctp_name"],
"sctp_uri_format": module.params["sctp_uri_format"],
"sctp_payload": module.params["sctp_payload"],
"iptag_name": module.params["iptag_name"],
"iptag_uri_format": module.params["iptag_uri_format"],
"iptag_payload": module.params["iptag_payload"],
}
obj = HttpServerProfile(**spec)
parent.add(obj)
changed, diff = helper.apply_state(obj, listing, module)
module.exit_json(changed=changed, diff=diff, msg="Done")
if __name__ == "__main__":
main()
| python |
import torch
import torch.optim as optim
import torch.utils.data as data_utils
import os
from pointnetae.model import PointNetAE
from pointnetae.config import *
from pointnetae.utils import *
from pointnetae.dataset import SceneDataset
# from torch.utils.data.dataloader import default_collate # for batching input scenes
REGRESS_UNMATCHED_DIM = True # regress dim of unmatched predictions to 0
NUM_EPOCHS = num_epochs
BATCH_SIZE = batch_size
LOAD_PATH = ''
SAVE_PATH = os.path.join("experiments", model_name, model_params_subdir)
LEARNING_RATE_INITIAL = learning_rate
STEP_SIZE = step_size
STEP_GAMMA = step_gamma
base_dir = os.path.join(data_dir, room_name)
rooms_dir = os.path.join(base_dir, rooms_subdir)
model = PointNetAE()
if LOAD_PATH != '':
model.load_state_dict(torch.load(LOAD_PATH))
if SAVE_PATH != '' and not os.path.exists(SAVE_PATH):
os.makedirs(SAVE_PATH)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE_INITIAL, betas=(0.9, 0.999))
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=STEP_SIZE, gamma=STEP_GAMMA)
model = model.train().cuda()
scene_dataset = SceneDataset(rooms_dir, max_num_points, load_ram=True)
def collate_fn(batch):
# return default_collate([t[0] for t in batch]), [t[1] for t in batch]
return [t[0] for t in batch], [t[1] for t in batch]
scene_loader = data_utils.DataLoader(
scene_dataset,
batch_size=BATCH_SIZE,
shuffle=True,
num_workers=1,
drop_last=True,
collate_fn=collate_fn
)
loss_log = []
geometric_loss_log = []
orientation_loss_log = []
categorical_loss_log = []
existence_loss_log = []
shape_loss_log = []
for epoch in range(NUM_EPOCHS):
epoch_losses = [0, 0, 0, 0, 0] # geometric, orientation, categorical, existence, shape
for i, scene_data in enumerate(scene_loader):
scenes, targets = scene_data # scenes and targets are both lists of 2D tensors
optimizer.zero_grad()
losses = [0, 0, 0, 0, 0] # geometric, orientation, categorical, existence, shape
for j in range(BATCH_SIZE):
scene = scenes[j].transpose(1, 0).cuda() # need to transpose for Conv1d
target = targets[j]
cats = target[:, geometry_size + orientation_size].numpy().astype(int) # category indices
target = target.cuda()
# use single-element batches due to differently-shaped batch elements
reconstruction_batch, latent_code_batch = model(scene.unsqueeze(0), np.expand_dims(cats, 0))
reconstruction = reconstruction_batch[0]
latent_code = latent_code_batch[0]
cost_mat_position = get_cost_matrix_2d(reconstruction[:, 0:2], target[:, 0:2])
cost_mat_dimension = get_cost_matrix_2d(reconstruction[:, 2:4], target[:, 2:4])
cost_mat = cost_mat_position + dimensions_matching_weight * cost_mat_dimension
cost_mat = cost_mat.detach().cpu()
target_ind, matched_ind, unmatched_ind = get_assignment_problem_matchings(cost_mat)
reconstruction_matched = reconstruction[matched_ind]
reconstruction_unmatched = reconstruction[unmatched_ind]
target_existence = torch.zeros(max_num_points)
target_existence[matched_ind] = 1
target = target[target_ind] # reorder target
target_category_idx = target[:, geometry_size+orientation_size].long()
# Geometry
losses[0] += geometric_weight * geometric_loss(
reconstruction_matched[:, 0:geometry_size],
target[:, 0:geometry_size]
)
if REGRESS_UNMATCHED_DIM and reconstruction_unmatched.shape[0] > 0: # regress dimension of unmatched to zero
losses[0] += geometric_weight * geometric_loss(
reconstruction_unmatched[:, 2:4],
torch.zeros_like(reconstruction_unmatched[:, 2:4])
)
# Orientation
losses[1] += orientation_weight * orientation_loss(
reconstruction_matched[:, geometry_size:geometry_size+orientation_size],
target[:, geometry_size:geometry_size+orientation_size]
)
# Category
losses[2] += categorical_weight * categorical_loss(
reconstruction_matched[:, geometry_size+orientation_size:geometry_size+orientation_size+num_categories],
target_category_idx
)
# Existence
losses[3] += existence_weight * existence_loss(
reconstruction[:, geometry_size+orientation_size+num_categories],
target_existence.cuda()
)
# Shape
shape_codes = torch.zeros(target.shape[0], shape_size).cuda()
for k in range(target.shape[0]):
x = torch.cat(
(
latent_code,
reconstruction_matched[k, 0:geometry_size+orientation_size]
)
)
shape_codes[k, :] = model.decode_shape(x, target_category_idx[k])
losses[4] += shape_weight * shape_loss(
shape_codes,
target[:, geometry_size+orientation_size+1:]
)
loss = 0
for li in range(len(losses)):
loss += losses[li]
epoch_losses[li] += losses[li].item()
# if opt.feature_transform:
# loss += feature_transform_regularizer(trans_feat) * 0.001
loss.backward()
optimizer.step()
print('[%d: %d] train loss: %f (%f, %f, %f, %f, %f)' % (
epoch + 1, i + 1, loss.item(), losses[0].item(), losses[1].item(), losses[2].item(), losses[3].item(), losses[4].item()
))
epoch_loss = 0
for li in range(len(epoch_losses)):
epoch_loss += epoch_losses[li]
print('EPOCH %d train loss: %f (%f, %f, %f, %f, %f)' % (
epoch + 1, epoch_loss, epoch_losses[0], epoch_losses[1], epoch_losses[2], epoch_losses[3], epoch_losses[4]
))
loss_log.append(epoch_loss)
geometric_loss_log.append(epoch_losses[0])
orientation_loss_log.append(epoch_losses[1])
categorical_loss_log.append(epoch_losses[2])
existence_loss_log.append(epoch_losses[3])
shape_loss_log.append(epoch_losses[4])
scheduler.step()
if (epoch + 1) % 100 == 0:
torch.save(model.state_dict(), '%s/%d.pth' % (SAVE_PATH, epoch + 1))
torch.save(
{
"loss": loss_log,
"geometric_loss": geometric_loss_log,
"orientation_loss": orientation_loss_log,
"categorical_loss": categorical_loss_log,
"existence_loss": existence_loss_log,
"shape_loss": shape_loss_log
},
os.path.join("experiments", model_name, "Logs.pth")
)
torch.save(model.state_dict(), '%s/latest.pth' % (SAVE_PATH)) | python |
"""OpenAPI core responses module"""
from functools import lru_cache
from six import iteritems
from openapi_core.exceptions import InvalidContentType
from openapi_core.media_types import MediaTypeGenerator
from openapi_core.parameters import ParametersGenerator
class Response(object):
def __init__(
self, http_status, description, headers=None, content=None,
links=None):
self.http_status = http_status
self.description = description
self.headers = headers and dict(headers) or {}
self.content = content and dict(content) or {}
self.links = links and dict(links) or {}
def __getitem__(self, mimetype):
try:
return self.content[mimetype]
except KeyError:
raise InvalidContentType(
"Invalid mime type `{0}`".format(mimetype))
class ResponsesGenerator(object):
def __init__(self, dereferencer, schemas_registry):
self.dereferencer = dereferencer
self.schemas_registry = schemas_registry
def generate(self, responses):
for http_status, response in iteritems(responses):
response_deref = self.dereferencer.dereference(response)
description = response_deref['description']
headers = response_deref.get('headers')
content = response_deref.get('content')
media_types = None
if content:
media_types = self.media_types_generator.generate(content)
parameters = None
if headers:
parameters = self.parameters_generator.generate(headers)
yield http_status, Response(
http_status, description,
content=media_types, headers=parameters)
@property
@lru_cache()
def media_types_generator(self):
return MediaTypeGenerator(self.dereferencer, self.schemas_registry)
@property
@lru_cache()
def parameters_generator(self):
return ParametersGenerator(self.dereferencer, self.schemas_registry)
| python |
# -*- coding: utf-8 -*-
"""
demo
~~~~
:copyright: (c) 2014 by Shipeng Feng.
:license: BSD, see LICENSE for more details.
"""
from plan import Plan
cron = Plan()
cron.command('ls /tmp', every='1.day', at='12:00')
cron.command('pwd', every='2.month')
cron.command('date', every='weekend')
if __name__ == "__main__":
cron.run()
| python |
import numpy as np
from pycocotools.mask import iou
def np_iou(A, B):
def to_xywh(box):
box = box.copy()
box[:, 2] -= box[:, 0]
box[:, 3] -= box[:, 1]
return box
ret = iou(
to_xywh(A), to_xywh(B),
np.zeros((len(B),), dtype=np.bool))
return ret
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
class BreakfastMenu(object):
def __init__(self):
self.items = []
def add_item(self, name, price):
self.items.append((name, price))
def __iter__(self):
""" return a Iterable object """
return iter(self.items)
class LaunchMenu(object):
def __init__(self):
self.items = set()
def add_item(self, name, price):
self.items.add((name, price))
def __iter__(self):
""" return a Iterable object """
return iter(self.items)
class DinnerMenu(object):
def __init__(self):
self.items = {}
def add_item(self, name, price):
self.items[name] = price
def __iter__(self):
""" return a Iterable object """
return iter(((name, price) for name, price in self.items.items()))
if __name__ == '__main__':
breakfast_menu = BreakfastMenu()
breakfast_menu.add_item('milk', 5)
breakfast_menu.add_item('bread', 6)
breakfast_menu.add_item('coffee', 7)
breakfast_menu.add_item('donuts', 3)
print('\nBreakfastMenu:')
for item in breakfast_menu:
print(item)
launch_menu = LaunchMenu()
launch_menu.add_item('milk', 5)
launch_menu.add_item('bread', 6)
launch_menu.add_item('coffee', 7)
launch_menu.add_item('donuts', 3)
print('\nLaunchMenu:')
for item in launch_menu:
print(item)
dinner_menu = DinnerMenu()
dinner_menu.add_item('milk', 5)
dinner_menu.add_item('bread', 6)
dinner_menu.add_item('coffee', 7)
dinner_menu.add_item('donuts', 3)
print('\nDinnerMenu:')
for item in dinner_menu:
print(item)
| python |
import os
WORKDIR = os.path.dirname(__file__)
SWAGGER_PATH = os.path.join(WORKDIR, 'swagger')
def get_number_of_pages(num_of_items: int, page_size: int) -> int:
"""
Get number of pages
:param num_of_items: number of items in database
:param page_size: size of one page
:return: number of pages
"""
return int((num_of_items / float(page_size)) + int(num_of_items % float(page_size) > 0))
| python |
from importlib.machinery import SourceFileLoader
import io
import os.path
from setuptools import setup
parquetry = SourceFileLoader(
"parquetry", "./parquetry/__init__.py"
).load_module()
with io.open(os.path.join(os.path.dirname(__file__), "README.md"), encoding="utf-8") as f:
long_description = f.read()
package_data = {"": ["README.md"]}
setup(
name="parquetry",
description="Dump parquet files to sql",
long_description=long_description,
long_description_content_type="text/markdown",
version=parquetry.__version__,
license="Apache 2.0",
author="source{d}",
author_email="[email protected]",
url="https://github.com/src-d/parquetry",
download_url="https://github.com/src-d/parquetry",
keywords=["dashboard_server"],
install_requires=[
"pandas",
"sqlalchemy",
"fastparquet",
"python-snappy",
"psycopg2-binary",
],
package_data=package_data,
python_requires=">=3.5",
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Operating System :: POSIX",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Libraries",
],
)
| python |
#!/usr/bin/env python3
import re
import json
prometheus_batchnum = 0
prometheus_batchsize = 1000
prometheus_batch = []
opentsdb_batchnum = 0
opentsdb_batchsize = 1000
opentsdb_batch = []
input = open("data", "r")
for line in input:
m = re.match(r"ctr,some=(tag-\w+) n=(\d+)i (\d+)", line)
if m:
tagvalue = m.group(1)
fieldvalue = int(m.group(2))
timestamp = int(m.group(3))
# ignoring timestamp for prometheus
prometheus_metric = 'ctr{some="%s",field="n"} %s\n' % (tagvalue, fieldvalue)
prometheus_batch.append(prometheus_metric)
opentsb_metric = {
"metric": "ctr",
# convert nanoseconds since epoch to seconds
"timestamp": round(timestamp / 1000000000),
"value": fieldvalue,
"tags": {"some": tagvalue, "field": "n"},
}
opentsdb_batch.append(opentsb_metric)
if len(prometheus_batch) == prometheus_batchsize:
print("Writing prometheus batch %s" % prometheus_batchnum)
batchfile = open("prometheus_data/%s" % prometheus_batchnum, "w")
batchfile.writelines(prometheus_batch)
prometheus_batch = []
prometheus_batchnum = prometheus_batchnum + 1
if len(opentsdb_batch) == opentsdb_batchsize:
print("Writing opentsdb batch %s" % opentsdb_batchnum)
batchfile = open("opentsdb_data/%s" % opentsdb_batchnum, "w")
batchfile.writelines(json.dumps(opentsdb_batch))
opentsdb_batch = []
opentsdb_batchnum = opentsdb_batchnum + 1
| python |
from httplib import OK
from unittest import SkipTest
from django.contrib.contenttypes.models import ContentType
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.contrib.auth.models import User, Group, Permission
from pycon.tests.factories import PyConTalkProposalFactory, PyConTutorialProposalFactory, \
ProposalResultFactory
from symposion.proposals.models import ProposalBase, ProposalKind
from symposion.proposals.tests.factories import init_kinds
from symposion.reviews.models import Review, ReviewAssignment, Votes
from symposion.reviews.views import is_voting_period_active
class login(object):
def __init__(self, testcase, user, password):
self.testcase = testcase
success = testcase.client.login(username=user, password=password)
self.testcase.assertTrue(
success,
"login with username=%r, password=%r failed" % (user, password)
)
def __enter__(self):
pass
def __exit__(self, *args):
self.testcase.client.logout()
class ReviewTestMixin(object):
def setUp(self):
super(ReviewTestMixin, self).setUp()
init_kinds()
def create_user(self, username="joe",
email=None,
password="snoopy",
first_name="Joe",
last_name="Smith"
):
if email is None:
email = "%[email protected]" % username
return User.objects.create_user(username,
email=email,
password=password,
first_name=first_name,
last_name=last_name)
def login(self, username="[email protected]", password="snoopy"):
# The auth backend that pycon is using is kind of gross. It expects
# username to contain the email address.
self.assertTrue(self.client.login(username=username,
password=password),
"Login failed")
class ReviewTests(TestCase):
def setUp(self):
raise SkipTest
def get(self, url_name, *args, **kwargs):
return self.client.get(reverse(url_name, args=args, kwargs=kwargs))
def post(self, url_name, *args, **kwargs):
data = kwargs.pop("data")
return self.client.post(reverse(url_name, args=args, kwargs=kwargs),
data)
def login(self, user, password):
return login(self, user, password)
def test_detail_perms(self):
guidos_proposal = ProposalBase.objects.all()[0]
response = self.get("review_detail", pk=guidos_proposal.pk)
# Not logged in
self.assertEqual(response.status_code, 302)
with self.login("guido", "pythonisawesome"):
response = self.get("review_detail", pk=guidos_proposal.pk)
# Guido can see his own proposal.
self.assertEqual(response.status_code, 200)
with self.login("matz", "pythonsucks"):
response = self.get("review_detail", pk=guidos_proposal.pk)
# Matz can't see guido's proposal
self.assertEqual(response.status_code, 302)
larry = User.objects.get(username="larryw")
# Larry is a trustworthy guy, he's a reviewer.
larry.groups.add(Group.objects.get(name="reviewers"))
with self.login("larryw", "linenoisehere"):
response = self.get("review_detail", pk=guidos_proposal.pk)
# Reviewers can see a review detail page.
self.assertEqual(response.status_code, 200)
def test_reviewing(self):
guidos_proposal = ProposalBase.objects.all()[0]
with self.login("guido", "pythonisawesome"):
response = self.post("review_review", pk=guidos_proposal.pk, data={
"vote": "+1",
})
# It redirects, but...
self.assertEqual(response.status_code, 302)
# ... no vote recorded
self.assertEqual(guidos_proposal.reviews.count(), 0)
larry = User.objects.get(username="larryw")
# Larry is a trustworthy guy, he's a reviewer.
larry.groups.add(Group.objects.get(name="reviewers"))
with self.login("larryw", "linenoisehere"):
response = self.post("review_review", pk=guidos_proposal.pk, data={
"vote": "+0",
"text": "Looks like a decent proposal, and Guido is a smart guy",
})
self.assertEqual(response.status_code, 302)
self.assertEqual(guidos_proposal.reviews.count(), 1)
self.assertEqual(ReviewAssignment.objects.count(), 1)
assignment = ReviewAssignment.objects.get()
self.assertEqual(assignment.proposal, guidos_proposal)
self.assertEqual(assignment.origin, ReviewAssignment.OPT_IN)
self.assertEqual(guidos_proposal.comments.count(), 1)
comment = guidos_proposal.comments.get()
self.assertFalse(comment.public)
response = self.post("review_review", pk=guidos_proposal.pk, data={
"vote": "+1",
"text": "Actually Perl is dead, we really need a talk on the future",
})
self.assertEqual(guidos_proposal.reviews.count(), 2)
self.assertEqual(ReviewAssignment.objects.count(), 1)
assignment = ReviewAssignment.objects.get()
self.assertEqual(assignment.review, Review.objects.order_by("-id")[0])
self.assertEqual(guidos_proposal.comments.count(), 2)
# Larry's a big fan...
response = self.post("review_review", pk=guidos_proposal.pk, data={
"vote": "+20",
})
self.assertEqual(guidos_proposal.reviews.count(), 2)
def test_speaker_commenting(self):
guidos_proposal = ProposalBase.objects.all()[0]
with self.login("guido", "pythonisawesome"):
response = self.get("review_comment", pk=guidos_proposal.pk)
# Guido can comment on his proposal.
self.assertEqual(response.status_code, 200)
response = self.post("review_comment", pk=guidos_proposal.pk, data={
"text": "FYI I can do this as a 30-minute or 45-minute talk.",
})
self.assertEqual(response.status_code, 302)
self.assertEqual(guidos_proposal.comments.count(), 1)
comment = guidos_proposal.comments.get()
self.assertTrue(comment.public)
larry = User.objects.get(username="larryw")
# Larry is a trustworthy guy, he's a reviewer.
larry.groups.add(Group.objects.get(name="reviewers"))
with self.login("larryw", "linenoisehere"):
response = self.get("review_comment", pk=guidos_proposal.pk)
# Larry can comment, since he's a reviewer
self.assertEqual(response.status_code, 200)
response = self.post("review_comment", pk=guidos_proposal.pk, data={
"text": "Thanks for the heads-up Guido."
})
self.assertEqual(response.status_code, 302)
self.assertEqual(guidos_proposal.comments.count(), 2)
with self.login("matz", "pythonsucks"):
response = self.get("review_comment", pk=guidos_proposal.pk)
# Matz can't comment.
self.assertEqual(response.status_code, 302)
class ReviewPageTest(ReviewTestMixin, TestCase):
def test_review_section(self):
talk = PyConTalkProposalFactory(
title="My talk",
description="Description of the talk",
category__name="My talk category"
)
# Make a few more talks to inflate the queries if we haven't optimized them properly
for __ in range(10):
ProposalResultFactory(proposal=PyConTalkProposalFactory())
tutorial = PyConTutorialProposalFactory(
title="My tutorial",
category__name="My tutorial category"
)
self.user = self.create_user()
self.login()
# If we go to the talk section, we only see talk data (not
# tutorial data).
kind = ProposalKind.objects.get(slug='talk')
section = kind.section
url = reverse('review_section', kwargs={'section_slug': section.slug})
ct = ContentType.objects.get_for_model(Review)
perm, __ = Permission.objects.get_or_create(
codename="can_review_%s" % section.slug,
content_type=ct,
)
self.user.user_permissions.add(perm)
# Run it once to force creation of result objects
rsp = self.client.get(url)
self.assertEqual(OK, rsp.status_code)
# Now run it for the test, making sure we don't need more queries than reasonable
with self.assertNumQueries(16):
rsp = self.client.get(url)
self.assertEqual(OK, rsp.status_code)
self.assertContains(rsp, talk.title)
self.assertContains(rsp, "My talk category")
self.assertNotContains(rsp, tutorial.title)
self.assertNotContains(rsp, "My tutorial category")
# Now make sure the tutorial section has tutorial data but not talk.
kind2 = ProposalKind.objects.get(slug='tutorial')
section = kind2.section
perm, __ = Permission.objects.get_or_create(
codename="can_review_%s" % section.slug,
content_type=ct,
)
self.user.user_permissions.add(perm)
url = reverse('review_section', kwargs={'section_slug': section.slug})
rsp = self.client.get(url)
self.assertEqual(OK, rsp.status_code)
self.assertNotContains(rsp, talk.title)
self.assertNotContains(rsp, "My talk category")
self.assertContains(rsp, tutorial.title)
self.assertContains(rsp, "My tutorial category")
class SubmitReviewTest(ReviewTestMixin, TestCase):
def submit_review(self, proposal, user, vote):
# Submit a vote and return the updated proposal object
assert is_voting_period_active(proposal)
self.login(username=user.username)
url = reverse('review_detail', kwargs={'pk': proposal.pk})
data = dict(
vote_submit="yep",
vote=vote,
comment="deep thoughts",
)
rsp = self.client.post(url, data)
self.assertRedirects(rsp, url)
return type(proposal).objects.get(pk=proposal.pk)
def test_submit_review(self):
# Reviewers can submit multiple reviews. Only their most recent vote counts.
talk = PyConTalkProposalFactory(title="talk", description="talk",
category__name="My talk category")
self.user = self.create_user()
perm, __ = Permission.objects.get_or_create(
codename="can_review_talks",
content_type=ContentType.objects.get_for_model(Review),
)
self.user.user_permissions.add(perm)
user2 = self.create_user(username="user2")
user2.user_permissions.add(perm)
# User submits first vote: +1
talk = self.submit_review(talk, self.user, Votes.PLUS_ONE)
# One +1 vote gives a score of 3
self.assertEqual(3, talk.result.score)
# Let's try adding another vote - because it's from the same
# user, it should supersede their previous vote in the score.
talk = self.submit_review(talk, self.user, Votes.MINUS_ZERO)
# A -0 vote is a score of -1
self.assertEqual(-1, talk.result.score)
# Now, add a vote from a different user, which should be counted
# separately and adjust the score
talk = self.submit_review(talk, user2, Votes.PLUS_ONE)
# Adding a new +1 vote adds 3 to the previous score
self.assertEqual(2, talk.result.score)
| python |
import datetime
from datetime import date
def from_external_date(s: str):
"""
Translates the data from external source file to the datetime.date object
:param s: String representation of a date
:return: The datetime.date object
"""
if '/' in s:
year, month = [int(x) for x in s.split('/')]
return date(year=year, month=month, day=1)
else:
return date(year=int(s), month=1, day=1)
base = date(year=1998, month=1, day=1)
def date_to_int(dt: date):
"""
Uniformly transforms any date in the file into the int
:param dt: the datetime.date object
:return: int representation of the date
"""
return (dt - base).days
def int_to_date(i: int):
return base + datetime.timedelta(days=i) | python |
import logging
import warnings
from typing import Dict, Tuple, Union
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
import xarray as xr
from scipy import signal, spatial
import matlab.engine
# import pharedox_registration
# import matlab
from pharedox import utils
import pkgutil
def to_dataframe(data: xr.DataArray, *args, **kwargs) -> pd.DataFrame:
"""
Replacement for `xr.DataArray.to_dataframe` that adds the attrs for the given
DataArray into the resultant DataFrame.
Parameters
----------
data : xr.DataArray
the data to convert to DataFrame
Returns
-------
pd.DataFrame
a pandas DataFrame containing the data in the given DataArray, including the
global attributes
"""
df = data.to_dataframe(*args, **kwargs)
for k, v in data.attrs.items():
df[k] = v
return df
def align_pa(
intensity_data: xr.DataArray,
reference_wavelength: str = "410",
reference_pair: int = 0,
reference_timepoint: int = 0,
) -> xr.DataArray:
"""
Given intensity profile data, flip each animal along their anterior-posterior axis
if necessary, so that all face the same direction
Parameters
----------
intensity_data
the data to align
reference_wavelength: optional
the wavelength to calculate the alignment for
reference_pair: optional
the pair to calculate the alignment for
reference_timepoint
the timepoint to calculate the alignment for
Returns
-------
aligned_intensity_data
the PA-aligned intensity data
Notes
-----
The alignments are calculated for a single wavelength and pair for each animal, then
applied to all wavelengths and pairs for that animal.
The algorithm works as follows:
- take the derivative of the (trimmed) intensity profiles (this accounts for
differences in absolute intensity between animals)
- use the first animal in the stack as the reference profile
- for all animals:
- compare a forward and reverse profile to the reference profile (using the
cosine-similarity metric)
- keep either the forward or reverse profile accordingly
- finally, determine the location of the peaks in the *average* profile
- reverse all profiles if necessary (this will be necessary if the first
animal happens to be reversed)
"""
data = intensity_data
ref_data = data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
)
ref_profile = ref_data.isel(animal=0).data
ref_vecs = np.tile(ref_profile, (data.animal.size, 1))
unflipped = data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
).data
flipped = np.fliplr(unflipped)
# cosine-similarity measurements
should_flip = (
spatial.distance.cdist(ref_vecs, unflipped, "cosine")[0, :]
> spatial.distance.cdist(ref_vecs, flipped, "cosine")[0, :]
)
# Do the actual flip
# position needs to be reindexed, otherwise xarray freaks out
intensity_data[should_flip] = np.flip(
intensity_data[should_flip].values, axis=intensity_data.get_axis_num("position")
)
intensity_data = intensity_data.reindex(
position=np.linspace(0, 1, intensity_data.position.size)
)
mean_intensity = trim_profile(
np.mean(
intensity_data.sel(
wavelength=reference_wavelength,
pair=reference_pair,
timepoint=reference_timepoint,
),
axis=0,
).data,
threshold=2000,
new_length=100,
)
# parameters found experimentally
# TODO these could use some tweaking
peaks, _ = signal.find_peaks(
mean_intensity, distance=0.2 * len(mean_intensity), prominence=200, wlen=10
)
if len(peaks) < 2:
return intensity_data
if peaks[0] < len(mean_intensity) - peaks[1]:
logging.warning("Skipping second data flip. Needs further investigation!")
return intensity_data
# intensity_data = np.flip(
# intensity_data, axis=intensity_data.get_axis_num("position")
# )
return intensity_data
def summarize_over_regions(
data: xr.DataArray,
regions: Dict,
eGFP_correction: Dict,
rescale: bool = True,
value_name: str = "value",
pointwise: Union[bool, str] = False,
**redox_params,
):
if pointwise == "both":
# recursively call this function for pointwise=T/F and concat the results
return pd.concat(
[
summarize_over_regions(
data, regions, rescale, value_name, pointwise=False
),
summarize_over_regions(
data, regions, rescale, value_name, pointwise=True
),
]
)
if rescale:
regions = utils.scale_region_boundaries(regions, data.shape[-1])
try:
# Ensure that derived wavelengths are present
data = utils.add_derived_wavelengths(data, **redox_params)
except ValueError:
pass
with warnings.catch_warnings():
warnings.simplefilter("ignore")
all_region_data = []
for _, bounds in regions.items():
if isinstance(bounds, (int, float)):
all_region_data.append(data.interp(position=bounds))
else:
all_region_data.append(
data.sel(position=slice(bounds[0], bounds[1])).mean(
dim="position", skipna=True
)
)
region_data = xr.concat(all_region_data, pd.Index(regions.keys(), name="region"))
region_data = region_data.assign_attrs(**data.attrs)
try:
region_data.loc[dict(wavelength="r")] = region_data.sel(
wavelength=redox_params["ratio_numerator"]
) / region_data.sel(wavelength=redox_params["ratio_denominator"])
region_data.loc[dict(wavelength="oxd")] = r_to_oxd(
region_data.sel(wavelength="r"),
r_min=redox_params["r_min"],
r_max=redox_params["r_max"],
instrument_factor=redox_params["instrument_factor"],
)
region_data.loc[dict(wavelength="e")] = oxd_to_redox_potential(
region_data.sel(wavelength="oxd"),
midpoint_potential=redox_params["midpoint_potential"],
z=redox_params["z"],
temperature=redox_params["temperature"],
)
except ValueError:
pass
# add corrections
if eGFP_correction["should_do_corrections"]:
# add data using xr.to_dataframe so correction values can be added directly next to value column
df = region_data.to_dataframe(value_name)
corrections = eGFP_corrections(df, eGFP_correction, **redox_params)
df["correction_ratio"] = corrections["correction_ratio"]
df["corrected_value"] = corrections["corrected_value"]
df["oxd"] = corrections["oxd"]
df["e"] = corrections["e"]
# add attributes
for k, v in region_data.attrs.items():
df[k] = v
for i in range(df.shape[0]):
x = i % 6
pd.options.mode.chained_assignment = None # default='warn'
# TODO fix chain indexing error warning. Will leave for now but may cause issues
if data["wavelength"][x] == "TL":
df["e"][i] = None
else:
df = to_dataframe(region_data, value_name)
df["pointwise"] = pointwise
try:
df.set_index(["experiment_id"], append=True, inplace=True)
except ValueError:
pass
return df
def eGFP_corrections(
data: DataFrame,
eGFP_correction: Dict,
**redox_params,
):
logging.info("Doing eGFP corrections")
# find the correction factor based of experiment specific eGFP number
correction_ratio = (
eGFP_correction["Cata_Number"] / eGFP_correction["Experiment_Number"]
)
# create empty lists that will contain column values
correction_ratio = [correction_ratio] * data.shape[0]
corrected_value = [None] * data.shape[0]
oxd = [None] * data.shape[0]
e = [None] * data.shape[0]
values = data["value"].tolist()
# loop through all the values
for i in range(data.shape[0]):
# find corrected value
corrected_value[i] = values[i] * correction_ratio[i]
# find oxd using formula
oxd[i] = r_to_oxd(
corrected_value[i],
redox_params["r_min"],
redox_params["r_max"],
redox_params["instrument_factor"],
)
# find e based on oxd
e[i] = oxd_to_redox_potential(oxd[i])
return {
"correction_ratio": correction_ratio,
"corrected_value": corrected_value,
"oxd": oxd,
"e": e,
}
def smooth_profile_data(
profile_data: Union[np.ndarray, xr.DataArray],
lambda_: float = 100.0,
order: float = 4.0,
n_basis: float = 100.0,
n_deriv=0.0,
eng=None,
):
"""
Smooth profile data by fitting smoothing B-splines
Implemented in MATLAB as smooth_profiles
"""
# eng = pharedox_registration.initialize()
try:
import matlab.engine
except ImportError:
logging.warn("MATLAB engine not installed. Skipping smoothing.")
return profile_data
if eng is None:
eng = matlab.engine.start_matlab()
resample_resolution = profile_data.position.size
return xr.apply_ufunc(
lambda x: np.array(
eng.smooth_profiles(
matlab.double(x.tolist()),
resample_resolution,
n_basis,
order,
lambda_,
n_deriv,
)
).T,
profile_data,
input_core_dims=[["position"]],
output_core_dims=[["position"]],
vectorize=True,
)
def standardize_profiles(
profile_data: xr.DataArray,
redox_params,
template: Union[xr.DataArray, np.ndarray] = None,
eng=None,
**reg_kwargs,
) -> Tuple[xr.DataArray, xr.DataArray]:
"""
Standardize the A-P positions of the pharyngeal intensity profiles.
Parameters
----------
profile_data
The data to standardize. Must have the following dimensions:
``["animal", "timepoint", "pair", "wavelength"]``.
redox_params
the parameters used to map R -> OxD -> E
template
a 1D profile to register all intensity profiles to. If None, intensity profiles
are registered to the population mean of the ratio numerator.
eng
The MATLAB engine to use for registration. If ``None``, a new engine is started.
reg_kwargs
Keyword arguments to use for registration. See `registration kwargs` for more
information.
Returns
-------
standardized_data: xr.DataArray
the standardized data
warp_functions: xr.DataArray
the warp functions generated to standardize the data
"""
# eng = pharedox_registration.initialize()
if eng is None:
eng = matlab.engine.start_matlab()
std_profile_data = profile_data.copy()
std_warp_data = profile_data.copy().isel(wavelength=0)
if template is None:
template = profile_data.sel(wavelength=redox_params["ratio_numerator"]).mean(
dim=["animal", "pair"]
)
try:
template = matlab.double(template.values.tolist())
except AttributeError:
template = matlab.double(template.tolist())
for tp in profile_data.timepoint:
for pair in profile_data.pair:
data = std_profile_data.sel(timepoint=tp, pair=pair)
i_num = matlab.double(
data.sel(wavelength=redox_params["ratio_numerator"]).values.tolist()
)
i_denom = matlab.double(
data.sel(wavelength=redox_params["ratio_denominator"]).values.tolist()
)
resample_resolution = float(profile_data.position.size)
reg_num, reg_denom, warp_data = eng.standardize_profiles(
i_num,
i_denom,
template,
resample_resolution,
reg_kwargs["warp_n_basis"],
reg_kwargs["warp_order"],
reg_kwargs["warp_lambda"],
reg_kwargs["smooth_lambda"],
reg_kwargs["smooth_n_breaks"],
reg_kwargs["smooth_order"],
reg_kwargs["rough_lambda"],
reg_kwargs["rough_n_breaks"],
reg_kwargs["rough_order"],
reg_kwargs["n_deriv"],
nargout=3,
)
reg_num, reg_denom = np.array(reg_num).T, np.array(reg_denom).T
std_profile_data.loc[
dict(
timepoint=tp, pair=pair, wavelength=redox_params["ratio_numerator"]
)
] = reg_num
std_profile_data.loc[
dict(
timepoint=tp,
pair=pair,
wavelength=redox_params["ratio_denominator"],
)
] = reg_denom
std_warp_data.loc[dict(timepoint=tp, pair=pair)] = np.array(warp_data).T
std_profile_data = std_profile_data.assign_attrs(**reg_kwargs)
std_profile_data = utils.add_derived_wavelengths(std_profile_data, **redox_params)
return std_profile_data, std_warp_data
def channel_register(
profile_data: xr.DataArray,
redox_params: dict,
reg_params: dict,
eng: matlab.engine.MatlabEngine = None,
) -> Tuple[xr.DataArray, xr.DataArray]:
"""
Perform channel-registration on the given profile data
Parameters
----------
profile_data
the data to register
redox_params
the redox parameters
reg_params
the registration parameters
eng
the MATLAB engine (optional)
Returns
-------
reg_data: xr.DataArray
the registered data
warp_data: xr.DataArray
the warp functions used to register the data
"""
if eng is None:
eng = matlab.engine.start_matlab()
# eng = pharedox_registration.initialize()
reg_profile_data = profile_data.copy()
warp_data = profile_data.copy().isel(wavelength=0)
for p in profile_data.pair:
for tp in profile_data.timepoint:
i_num = matlab.double(
profile_data.sel(
timepoint=tp, pair=p, wavelength=redox_params["ratio_numerator"]
).values.tolist()
)
i_denom = matlab.double(
profile_data.sel(
timepoint=tp, pair=p, wavelength=redox_params["ratio_denominator"]
).values.tolist()
)
resample_resolution = float(profile_data.position.size)
reg_num, reg_denom, warps = eng.channel_register(
i_num,
i_denom,
resample_resolution,
reg_params["warp_n_basis"],
reg_params["warp_order"],
reg_params["warp_lambda"],
reg_params["smooth_lambda"],
reg_params["smooth_n_breaks"],
reg_params["smooth_order"],
reg_params["rough_lambda"],
reg_params["rough_n_breaks"],
reg_params["rough_order"],
reg_params["n_deriv"],
nargout=3,
)
reg_num, reg_denom = np.array(reg_num).T, np.array(reg_denom).T
reg_profile_data.loc[
dict(timepoint=tp, pair=p, wavelength=redox_params["ratio_numerator"])
] = reg_num
reg_profile_data.loc[
dict(timepoint=tp, pair=p, wavelength=redox_params["ratio_denominator"])
] = reg_denom
warp_data.loc[dict(pair=p, timepoint=tp)] = np.array(warps).T
reg_profile_data = utils.add_derived_wavelengths(reg_profile_data, **redox_params)
return reg_profile_data, warp_data
def trim_profile(
profile: Union[np.ndarray, xr.DataArray], threshold: float, new_length: int
):
"""
Trim the given profile data by finding the first/last values where the profile
crosses the specified threshold, then interpolating to fit the given new length.
.. note::
Uses linear interpolation
Parameters
----------
profile
the data to trim
threshold
the threshold
new_length
the length of the resultant interpolated profiles
Returns
-------
"""
first = np.argmax(profile > threshold)
last = len(profile) - np.argmax(np.flip(profile > threshold))
trimmed = profile[first : last + 1]
new_xs = np.linspace(0, len(trimmed), new_length)
old_xs = np.arange(0, len(trimmed))
return np.interp(new_xs, old_xs, trimmed)
def get_trim_boundaries(
data: xr.DataArray, ref_wvl: str = "410", thresh: float = 2000.0
) -> Tuple[np.ndarray, np.ndarray]:
"""
Find the "left" and "right" indices to use to trim intensity profiles given a
threshold.
Essentially, we find the first index where the intensity profile crosses the given
threshold and call that the "left", then do the same on the reversed profile and
call that the "right".
Parameters
----------
data
the intensity profile data (potentially containing multiple wavelengths)
ref_wvl
the wavelength to use to calculate boundaries
thresh
the threshold
Returns
-------
(np.ndarray, np.ndarray)
the (left, right) bounds for each profile, where the index in the array
corresponds to the index of the animal in ``data``.
"""
prof_len = data.position.size
data_reversed = data.reindex(position=list(reversed(data.position)))
l_bound = (data.sel(wavelength=ref_wvl) >= thresh).argmax(dim="position").data - 1
r_bound = (
prof_len
- (data_reversed.sel(wavelength=ref_wvl) >= thresh).argmax(dim="position").data
) - 1
return l_bound, r_bound
def trim_profiles(
intensity_data: xr.DataArray, threshold: float, ref_wvl: str = "410"
) -> xr.DataArray:
"""
Trim the background away from the profiles.
Parameters
----------
intensity_data : xr.DataArray
the profile data to trim
threshold : float
the threshold under which data will be thrown away
ref_wvl : str, optional
the wavelength to be used to calculate trim boundaries. Other wavelengths will
be trimmed using these boundaries. By default "410"
Returns
-------
xr.DataArray
the trimmed profiles
"""
trimmed_intensity_data = intensity_data.copy()
l, r = get_trim_boundaries(intensity_data, ref_wvl=ref_wvl, thresh=threshold)
for i, img_idx in enumerate(intensity_data.animal):
for wvl_idx in range(intensity_data.wavelength.size):
wvl = intensity_data.wavelength.data[wvl_idx]
if "tl" not in wvl.lower():
for pair in range(intensity_data.pair.size):
for tp in intensity_data.timepoint.values:
selector = dict(
wavelength=wvl, pair=pair, animal=img_idx, timepoint=tp
)
data = intensity_data.sel(selector).data
l_i, r_i = l[i, tp, pair], r[i, tp, pair]
try:
trimmed = data[l_i:r_i]
new_xs = np.linspace(
0, len(trimmed), intensity_data.position.size
)
old_xs = np.arange(0, len(trimmed))
resized = np.interp(new_xs, old_xs, trimmed)
trimmed_intensity_data.loc[selector] = resized
except ValueError:
logging.warning(
f"trim boundaries close ({np.abs(r_i - l_i)}) for (animal: {i}, wvl: {wvl}, pair: {pair}) - skipping trimming this animal"
)
return trimmed_intensity_data
def r_to_oxd(
r: Union[np.ndarray, xr.DataArray, float],
r_min: float = 0.852,
r_max: float = 6.65,
instrument_factor: float = 0.171,
):
"""
Convert ratios to OxD
Parameters
----------
r
r_min
r_max
instrument_factor
Returns
-------
"""
return (r - r_min) / ((r - r_min) + instrument_factor * (r_max - r))
def oxd_to_redox_potential(
oxd: Union[np.ndarray, xr.DataArray, float],
midpoint_potential: float = -265.0,
z: float = 2.0,
temperature: float = 22.0,
):
"""
Convert OxD to redox potential
.. warning::
May contain ``NaN`` values
Parameters
----------
oxd
midpoint_potential
z
temperature
Returns
-------
"""
# We can get NaN ratios because of background subtraction, this is expected
# so we suppress the warnings here
with np.errstate(invalid="ignore"):
return midpoint_potential - (
8314.462 * (273.15 + temperature) / (z * 96485.3415)
) * np.log((1 - oxd) / oxd)
| python |
from Bio.Align import MultipleSeqAlignment, AlignInfo
from Bio.SeqRecord import SeqRecord
from Bio import SeqIO
import pandas as pd
import numpy as np
import subprocess
from scipy.spatial.distance import pdist
from scipy.cluster.hierarchy import linkage, fcluster, dendrogram
from Levenshtein import ratio, distance
import matplotlib.pyplot as plt
def remove_chimeras_from_umi_pairs(starcode1Path, starcode2Path, output, tdd = False):
s1UMI, s1Indices = gather_umis_and_corresponding_indices_from_starcode(starcode1Path, tdd = tdd)
s2UMI, s2Indices = gather_umis_and_corresponding_indices_from_starcode(starcode2Path, tdd = tdd)
umiMatch1, umiMatch2, sharedIndices = sort_umi_pairs_by_number_of_matching_indices(s1UMI, s1Indices, s2UMI, s2Indices)
umiMatch1, umiMatch2, sharedIndices = remove_duplicate_umis_from_pairs(umiMatch1, umiMatch2, sharedIndices)
data = []
for i in range(len(sharedIndices)): data.append([umiMatch1[i] + umiMatch2[i], len(sharedIndices[i]), ','.join([str(x) for x in sorted(sharedIndices[i])])])
df = pd.DataFrame(data)
df.to_csv(output, sep='\t', index=False, header=False)
def gather_umis_and_corresponding_indices_from_starcode(starcodePath, tdd = False):
s1 = pd.read_csv(starcodePath, sep='\t', header=None)
if isinstance(list(s1.iloc[:,2])[0],int): raise Exception('Fewer that 5 UMI clusters found with more than a single sequence')
s1UMI = s1.iloc[:,0]
s1Indices = [set([int(y) for y in x.split(',')]) for x in list(s1.iloc[:,2])]
remove = []
for i in range(len(s1Indices)):
if len(s1Indices) < 10: remove.append(i)
if not tdd:
s1UMI, s1Indices = [np.delete(np.array(x),(remove)) for x in [s1UMI, s1Indices]]
if len(s1Indices) < 5: raise Exception('Fewer that 5 UMI clusters found with more than a single sequence')
return s1UMI, s1Indices
def sort_umi_pairs_by_number_of_matching_indices(s1UMI, s1Indices, s2UMI, s2Indices):
umi1List = []
umi2List = []
indicesList = []
for i in range(len(s1UMI)):
umi1 = s1UMI[i]
indices1 = s1Indices[i]
for j in range(len(s2UMI)):
umi2 = s2UMI[j]
indices2 = s2Indices[j]
intersect = indices1.intersection(indices2)
if len(intersect) != 0:
umi1List.append(umi1)
umi2List.append(umi2)
indicesList.append(intersect)
lengths = [len(i) for i in indicesList]
lengths, indicesList, umi1List, umi2List = zip(*sorted(zip(lengths, indicesList, umi1List, umi2List), reverse=True))
return umi1List, umi2List, indicesList
def remove_duplicate_umis_from_pairs(umi1List, umi2List, indicesList):
umi1Set = set()
umi2Set = set()
remove = []
for i in range(len(indicesList)):
umi1 = umi1List[i]
umi2 = umi2List[i]
if umi1 in umi1Set or umi2 in umi2Set: remove.append(i)
else: umi1Set.add(umi1); umi2Set.add(umi2)
indicesList, umi1List, umi2List = [np.delete(np.array(x),(remove)) for x in [indicesList, umi1List, umi2List]]
return umi1List, umi2List, indicesList
def bin_sequences_by_umi_pair(seqPath, starcodePath):
index_recordID = {}
with open(seqPath) as handle:
count = 1
for record in SeqIO.parse(handle, "fastq"): index_recordID[count] = record.id; count += 1
starcode = pd.read_csv(starcodePath, sep='\t', header=None)
starcode = starcode[starcode.iloc[:,1] >= 50]
starcode = list(starcode.iloc[:,2])
fq = SeqIO.index(seqPath, "fastq")
for i in range(len(starcode)):
indices = [int(y) for y in starcode[i].split(',')]
records = [fq[index_recordID[j]] for j in indices]
outputPath = '.'.join(seqPath.split('.')[:-1]) + '_bin' + str(i) + '.fq'
with open(outputPath, "w") as output_handle:
SeqIO.write(records, output_handle, "fastq")
fq.close()
def make_hamming_distance_matrix(seqs):
array = np.array(seqs).reshape(-1,1)
return pdist(np.array(array), lambda x,y: 1-ratio(x[0],y[0]))
def cluster_longread_consensus_sequences(seqs, threshold = 1/20, dendrogramFile=None):
dist_matrix = make_hamming_distance_matrix(np.array(seqs))
link_matrix = linkage(dist_matrix, method = 'centroid')
labels = fcluster(link_matrix, threshold, criterion='distance')
if dendrogramFile:
plt.figure()
dn = dendrogram(link_matrix)
plt.savefig(dendrogramFile)
seqs = np.array(seqs)
for cluster_id in np.unique(labels):
yield labels==cluster_id
| python |
from fastapi import FastAPI, Response, status
from fastapi.middleware.cors import CORSMiddleware
import os
import requests
from dotenv import load_dotenv
load_dotenv()
from .models import FromForm
from .database import db
from .payment import Payment
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origin_regex=".*localhost.*",
allow_origins=[
"http://127.0.0.1:5500",
"https://felipe-e-wendy.github.io/",
"https://felipe-e-wendy.github.io",
],
allow_methods=["GET", "POST"],
allow_headers=["*"],
)
def format_msg(msg):
document = dict()
msg_dict = msg.dict()
document["name"] = msg_dict["name"]
document["wpp"] = msg_dict["wpp"]
document["msg"] = msg_dict["msg"]
document["price"] = msg_dict["price"]
document["paid"] = False
return document
@app.on_event("shutdown")
async def disconnect():
db.close()
@app.get("/")
async def read_root():
return {"status": "OK"}
@app.post("/msg", status_code=200)
async def create_item(msg: FromForm, response: Response):
SECRET_RECAPTCHAV2 = os.environ["SECRET_RECAPTCHAV2"]
data = {"secret": SECRET_RECAPTCHAV2, "response": msg.token}
response_captcha = requests.post(
"https://www.google.com/recaptcha/api/siteverify", data=data
).json()
if response_captcha["success"]:
document = format_msg(msg)
payment = Payment(document)
if payment.status == 201:
document["payment_id"] = payment.pay_id
document["payment_url"] = payment.url
db.msg.insert_one(document.copy())
return document
else:
response.status_code = payment.status
return {"error": "Mercado Pago Error"}
else:
response.status_code = status.HTTP_401_UNAUTHORIZED
return {"error": "Captcha Error"}
| python |
from django.contrib.auth.models import User
from project.models import Project
from django.test import TestCase, Client
from django.urls import reverse
from django.core import validators
import mongoengine
from decouple import config
import json
from faker import Faker
# def setUp(self):
# credentials = base64.b64encode('username:password')
# self.client.defaults['HTTP_AUTHORIZATION'] = 'Basic ' + credentials
def test_db_setup():
mongoengine.connection.disconnect()
mongoengine.connect(
db=config('MONGODB_TEST_DB'),
username=config('MONGODB_USER'),
password=config('MONGODB_PASSWORD'),
host='mongodb',
port=config('MONGODB_PORT', cast=int),
authentication_source='admin',
connect=False
)
def test_db_tearDown():
connection = mongoengine.connection.get_connection()
connection.drop_database(config('MONGODB_TEST_DB'))
mongoengine.connection.disconnect()
class TestProject(TestCase):
headers = {}
@classmethod
def setUpClass(self):
super().setUpClass()
test_db_setup()
self.fake = Faker()
self.client = Client()
@classmethod
def tearDownClass(self):
test_db_tearDown()
super().tearDownClass()
def setUp(self):
super().setUp()
# Valid user registered
body = {
'email': self.fake.first_name()+'@karpuz.ml',
'username': self.fake.first_name(),
'password': "karpuz123",
'full_name': self.fake.name()
}
response = self.client.post(reverse('register'), json.dumps(body), content_type='application/json')
token = response.json()['api_token']
self.headers = {
'HTTP_AUTHORIZATION': token
}
# workaround to not to raise settings.DATABASE error
def _post_teardown(self):
return
def test_project(self):
url = reverse('get_projects')
# GET Guest call
response = self.client.get(url)
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
# Token missing
body = {
'title': "Project Title",
'description': "Simple Desc",
'project_deadline': "2018-10-10",
'budget': 200
}
response = self.client.post(url, json.dumps(body), content_type='application/json')
exp_data = {
'error': 'Unauthorized',
'response': False
}
self.assertEqual(exp_data, response.json())
# Title missing
body = {
'description': "Simple Desc",
'project_deadline': "2018-10-10",
'budget': 200
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
exp_data = {
'error': "'title'",
'response': False
}
self.assertEqual(exp_data, response.json())
# Description missing
body = {
'title': "Simple Title",
'project_deadline': "2018-10-10",
'budget': 200
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
exp_data = {
'error': "'description'",
'response': False
}
self.assertEqual(exp_data, response.json())
# project_deadline missing
body = {
'title': "Simple Title",
'description': "Simple Desc",
'budget': 200
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
exp_data = {
'error': "'project_deadline'",
'response': False
}
self.assertEqual(exp_data, response.json())
# budget missing
body = {
'title': "Simple Title",
'description': "Simple Desc",
'project_deadline': "2018-10-10",
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
exp_data = {
'error': "'budget'",
'response': False
}
self.assertEqual(exp_data, response.json())
# Valid Project
body = {
'title': "Project Title",
'description': "Simple Desc",
'project_deadline': "2018-10-10",
'budget': 200
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue('project' in response.json() and response.json()['response'] == True)
project_id = response.json()['project']['project_id']
# GET projects and project
response = self.client.get(url, {'ids': project_id}, content_type='application/json')
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
# PUT project
body = {
'project_id': project_id,
'title': "Project Title",
}
response = self.client.put(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue(body['title'] == response.json()['project']['title'] and response.json()['response'] == True)
def test_project_own(self):
# Create Freelancer
body = {
'email': self.fake.first_name() + '@karpuz.ml',
'username': self.fake.first_name(),
'password': "karpuz123",
'full_name': self.fake.name()
}
response = self.client.post(reverse('register'), json.dumps(body), content_type='application/json')
token = response.json()['api_token']
headers = {
'HTTP_AUTHORIZATION': token
}
url = reverse('get_own_projects')
response = self.client.get(url, content_type='application/json', **self.headers)
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
def test_project_search(self):
# Create Freelancer
body = {
'email': self.fake.first_name() + '@karpuz.ml',
'username': self.fake.first_name(),
'password': "karpuz123",
'full_name': self.fake.name()
}
response = self.client.post(reverse('register'), json.dumps(body), content_type='application/json')
token = response.json()['api_token']
headers = {
'HTTP_AUTHORIZATION': token
}
url = reverse('search_projects')
query = "test"
response = self.client.get(url, {'query': query}, content_type='application/json')
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
def test_bid_accept_and_finish(self):
# Create Freelancer
body = {
'email': self.fake.first_name()+'@karpuz.ml',
'username': self.fake.first_name(),
'password': "karpuz123",
'full_name': self.fake.name()
}
response = self.client.post(reverse('register'), json.dumps(body), content_type='application/json')
token = response.json()['api_token']
headers = {
'HTTP_AUTHORIZATION': token
}
url = reverse('get_user')
response = self.client.get(url, **headers)
self.assertTrue('user' in response.json() and response.json()['response'] == True)
freelancer = response.json()['user']['id']
url = reverse('get_projects')
# Create Project
body = {
'title': "Project Title",
'description': "Simple Desc",
'project_deadline': "2018-10-10",
'budget': 0
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue('project' in response.json() and response.json()['response'] == True)
project_id = response.json()['project']['project_id']
# Add Bid to the project from freelancer
url = reverse('add_bid')
body = {
'project_id': project_id,
'freelancer': freelancer,
'note': "I am the best for this job.",
'offer': 0
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **headers)
self.assertTrue(response.json()['response'] == True)
# Get project
url = reverse('get_projects')
response = self.client.get(url, {'ids': project_id}, content_type='application/json')
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
bid_id = response.json()['projects'][0]['bids'][0]['bid_id']
# Accept Bid from client
url = reverse('accept_bid')
body = {
'bid_id': bid_id,
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue(response.json()['response'] == True)
# Finish Project
url = reverse('finish_project')
body = {
'project_id': project_id,
}
response = self.client.put(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue(2 == response.json()['project']['status'] and response.json()['response'] == True)
def test_bid_discard(self):
# Create Freelancer
body = {
'email': self.fake.first_name()+'@karpuz.ml',
'username': self.fake.first_name(),
'password': "karpuz123",
'full_name': self.fake.name()
}
response = self.client.post(reverse('register'), json.dumps(body), content_type='application/json')
token = response.json()['api_token']
headers = {
'HTTP_AUTHORIZATION': token
}
url = reverse('get_user')
response = self.client.get(url, **self.headers)
self.assertTrue('user' in response.json() and response.json()['response'] == True)
freelancer = response.json()['user']['id']
url = reverse('get_projects')
# Create Project
body = {
'title': "Project Title",
'description': "Simple Desc",
'project_deadline': "2018-10-10",
'budget': 0
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue('project' in response.json() and response.json()['response'] == True)
project_id = response.json()['project']['project_id']
# Add Bid to the project from freelancer
url = reverse('add_bid')
body = {
'project_id': project_id,
'freelancer': freelancer,
'note': "I am the best for this job.",
'offer': 0
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **headers)
self.assertTrue(response.json()['response'] == True)
# Get project
url = reverse('get_projects')
response = self.client.get(url, {'ids': project_id}, content_type='application/json')
self.assertTrue('projects' in response.json() and response.json()['response'] == True)
bid_id = response.json()['projects'][0]['bids'][0]['bid_id']
# Discard Bid from client
url = reverse('discard_bid')
body = {
'bid_id': bid_id,
}
response = self.client.post(url, json.dumps(body), content_type='application/json', **self.headers)
self.assertTrue(response.json()['response'] == True)
| python |
import urllib.parse
from docutils import nodes, utils
arts_elements = ('group', 'variable', 'method', 'agenda')
arts_path = {el: el+'s' for el in arts_elements}
def make_arts_link(name, rawtext, text, lineno, inliner, options={}, content=[]):
parts = name.split(':')
if len(parts) < 2 or parts[1] not in arts_elements:
msg = inliner.reporter.error(
'Unknown arts role "{}".'.format(name), line=lineno)
prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg]
kind = parts[1]
env = inliner.document.settings.env
docserver_url = env.config.arts_docserver_url.strip('/')
uri = '/'.join([docserver_url, kind+'s', text])
node = nodes.reference(rawtext, utils.unescape(text), refuri=uri, **options)
return [node], []
def setup(app):
"""Setup function to register the extension"""
app.add_config_value('arts_docserver_url',
'http://radiativetransfer.org/docserver-trunk',
'env')
for kind in arts_elements:
app.add_role('arts:'+kind, make_arts_link)
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import os
import argparse
musicConf = """CURRENTFILENAME="filename"
ELAPSED="0"
PLAYSTATUS="Stopped"
RESUME="OFF"
SHUFFLE="OFF"
LOOP="OFF"
SINGLE="OFF"
"""
audiobookConf = """CURRENTFILENAME="filename"
ELAPSED="0"
PLAYSTATUS="Stopped"
RESUME="ON"
SHUFFLE="OFF"
LOOP="OFF"
SINGLE="OFF"
"""
def readShortcuts(shortcutsDir):
result = {}
for f in os.listdir(shortcutsDir):
absf = os.path.join(shortcutsDir, f)
if os.path.isfile(absf):
val = []
with open(absf, "r") as fobj:
for line in fobj:
if len(line.strip()) != 0:
val.append(line.rstrip())
result[f] = val
return result
def readFolders(audioDir, relpath=None, isFirst=True):
result = {}
relpath = "" if relpath is None else relpath
hasAudioFiles = False
for f in os.listdir(audioDir):
absf = os.path.join(audioDir, f)
if os.path.isfile(absf):
if not isFirst:
hasAudioFiles = True
elif os.path.isdir(absf):
childResult = readFolders(audioDir=absf, relpath=os.path.join(relpath, f), isFirst=False)
for k, v in childResult.items():
assert(k not in result)
result[k] = v
if hasAudioFiles:
result[relpath] = os.path.exists(os.path.join(audioDir, "folder.conf"))
return result
def _deleteBrokenSymlink(shortcutsDir, cardid, d):
i = input("\ndelete broken symlink [" + cardid + " --> " + str(d) + "]? [y/N]")
if i == "y":
print("deleting symlink.")
os.remove(os.path.join(shortcutsDir, cardid))
else:
print("keeping broken symlink.")
def fixBrokenShortcuts(shortcutsDir, shortcuts, audioFolders):
for cardid, dirs in shortcuts.items():
if len(dirs) == 0 and cardid != "placeholder":
_deleteBrokenSymlink(shortcutsDir=shortcutsDir, cardid=cardid, d=None)
for d in dirs:
if d not in audioFolders and d != cardid:
_deleteBrokenSymlink(shortcutsDir=shortcutsDir, cardid=cardid, d=d)
def _writeFolderConf(audioDir, d, content):
with open(os.path.join(audioDir, d, "folder.conf"), "w") as f:
f.write(content)
def _askFolderType(audioDir, d):
i = input("\ntype of " + d + " ? [m]usic/[a]udiobook/[I]gnore: ")
if i == "m":
_writeFolderConf(audioDir=audioDir, d=d, content=musicConf)
elif i == "a":
_writeFolderConf(audioDir=audioDir, d=d, content=audiobookConf)
else:
print("ignoring folder.")
def linkLooseFolders(shortcutsDir, audioDir, shortcuts, audioFolders, latestRFIDFile):
allShortcutsDirs = []
looseFolders = {}
print("\n\n=== linking loose folders")
for cardid, dirs in shortcuts.items():
allShortcutsDirs.extend(dirs)
lc2 = 0
for d2, hasFolderConf2 in sorted(audioFolders.items()):
if d2 not in allShortcutsDirs:
looseFolders[lc2] = d2
lc2 = lc2 + 1
while len(looseFolders) != 0:
print("\n== loose folders:")
for lc, d in looseFolders.items():
print(str(lc) + ": " + d)
selectedOption = input("\nplease select folder: ")
if len(selectedOption.strip()) == 0:
print("cancel.")
break
if not selectedOption.isnumeric():
print("invalid input.")
continue
selectedOptionInt = int(selectedOption)
if selectedOptionInt < 0 or selectedOptionInt not in looseFolders:
print("invalid input.")
continue
with open(latestRFIDFile, "r") as rf:
latestRFID = rf.read().strip()
d = looseFolders[selectedOptionInt]
cardid = input("\ncardid for \"" + d + "\" [" + latestRFID + "] (enter \"c\" to cancel): ")
if cardid == "c":
print("ok, ignoring this folder.")
else:
if len(cardid) == 0:
cardid = latestRFID
doit = True
if cardid in shortcuts:
doit = False
yn = input("WARNING: cardid already assigned to " + str(shortcuts[cardid]) + ". Override? [y/N] ")
if yn == "y":
doit = True
if doit:
if not audioFolders[d]:
_askFolderType(audioDir=audioDir, d=d)
with open(os.path.join(shortcutsDir, cardid), "w") as f:
f.write(d)
looseFolders.pop(selectedOptionInt, None)
else:
print("skipping.")
print("done.")
def fixFoldersWithoutFolderConf(audioDir, audioFolders):
print("\n\n=== Fixing folders with missing folder.conf ...")
for d, hasFolderConf in audioFolders.items():
if not hasFolderConf:
_askFolderType(audioDir=audioDir, d=d)
print("=== done.")
def findDuplicateShortcuts(shortcuts):
print("\n\n=== Checking folders with multiple shortcuts ...")
linkedFolders = {}
for cardid, dirs in shortcuts.items():
for d in dirs:
if d not in linkedFolders:
linkedFolders[d] = []
linkedFolders[d].append(cardid)
for d, cardids in linkedFolders.items():
if len(cardids) > 1:
print("WARNING: multiple shortcuts for folder [" + d + "]: " + str(cardids))
print("=== done.")
if __name__ == "__main__":
baseDir = "/home/pi/RPi-Jukebox-RFID"
latestRFIDFile = os.path.join(baseDir, "settings", "Latest_RFID")
shortcutsDir = os.path.join(baseDir, "shared", "shortcuts")
audioDir = os.path.join(baseDir, "shared", "audiofolders")
parser = argparse.ArgumentParser()
parser.add_argument("--baseDir", help="directory containing the phoniebox code; defaults to " + baseDir)
parser.add_argument("--latestRFIDFile", help="file storing the latest RFID card id; defaults to " + latestRFIDFile)
parser.add_argument("--shortcutsDir", help="directory containing the RFID card id shortcuts; defaults to " + shortcutsDir)
parser.add_argument("--audioDir", help="directory containing the audio files; defaults to " + audioDir)
parser.add_argument("--printShortcuts", help="print list of available shortcuts", action="store_true")
parser.add_argument("--linkLooseFolders", help="iterate through list of folders that are currently unbound to any card id and ask user whether to link them", action="store_true")
parser.add_argument("--fixBrokenShortcuts", help="find and delete dangling shortcuts ", action="store_true")
parser.add_argument("--findDuplicateShortcuts", help="find and delete duplicate shortcuts ", action="store_true")
parser.add_argument("--fixFoldersWithoutFolderConf", help="ask user whether folders without a folder.conf file should be either treated as a music album or an audio book", action="store_true")
args = parser.parse_args()
if args.baseDir:
baseDir = args.baseDir
if args.latestRFIDFile:
latestRFIDFile = args.latestRFIDFile
if args.shortcutsDir:
shortcutsDir = args.shortcutsDir
if args.audioDir:
audioDir = args.audioDir
shortcuts = readShortcuts(shortcutsDir=shortcutsDir)
audioFolders = readFolders(audioDir=audioDir)
if args.printShortcuts:
print("===== shortcuts =====")
shortcutslist = []
for cardid, thefolders in sorted(shortcuts.items()):
for f in thefolders:
shortcutslist.append([cardid, f])
for e in sorted(shortcutslist, key=lambda x: x[1]):
print("\"" + e[1] + "\";\t\"" + e[0] + "\"")
print("==================================")
if args.linkLooseFolders:
linkLooseFolders(shortcutsDir=shortcutsDir, audioDir=audioDir, shortcuts=shortcuts, audioFolders=audioFolders, latestRFIDFile=latestRFIDFile)
if args.fixBrokenShortcuts:
fixBrokenShortcuts(shortcutsDir=shortcutsDir, shortcuts=shortcuts, audioFolders=audioFolders)
if args.findDuplicateShortcuts:
shortcuts2 = readShortcuts(shortcutsDir=shortcutsDir)
findDuplicateShortcuts(shortcuts=shortcuts2)
if args.fixFoldersWithoutFolderConf:
audioFolders2 = readFolders(audioDir=audioDir)
fixFoldersWithoutFolderConf(audioDir=audioDir, audioFolders=audioFolders2)
| python |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
xosutil/autoversion_setup.py
This module exports a function, setup_with_auto_version(), that will automatically generate a version.py file
dynamically from the version option passed to the setup function. It does this without having to modify the
source copy of version.py.
It also automatically searches for VERSION files in the directory of the caller and its parent hierarchy, and will
automatically load the version number from the VERSION file, if one is detected.
"""
import os
from setuptools import setup
from setuptools.command.sdist import sdist
from setuptools.command.build_py import build_py
import inspect
from autodiscover_version import autodiscover_version
class SdistCommand(sdist):
def copy_file(self, infile, outfile, *args, **kwargs):
if kwargs.get("dry_run"):
return (outfile, 1)
if os.path.split(outfile)[1] == "version.py":
open(outfile, "w").write(
"# do not edit. Autogenerated file.\n"
"__version__ = '%s'\n" % self.distribution.metadata.version
)
return (outfile, 1)
else:
return sdist.copy_file(self, infile, outfile, *args, **kwargs)
class BuildPyCommand(build_py):
def copy_file(self, infile, outfile, *args, **kwargs):
if kwargs.get("dry_run"):
return (outfile, 1)
if os.path.split(outfile)[1] == "version.py":
open(outfile, "w").write(
"# do not edit. Autogenerated file.\n"
"__version__ = '%s'\n" % self.distribution.metadata.version
)
return (outfile, 1)
else:
return build_py.copy_file(self, infile, outfile, *args, **kwargs)
def setup_with_auto_version(*args, **kwargs):
# Learn the module that called this function, so we can search for any VERSION files in it.
frame = inspect.stack()[1]
caller_module = inspect.getmodule(frame[0])
# Search for a VERSION file and extract the version number from it.
version = autodiscover_version(caller_filename=caller_module.__file__)
if version:
kwargs["version"] = version
cmdclass = kwargs.get("cmdclass", {}).copy()
cmdclass.update({"sdist": SdistCommand, "build_py": BuildPyCommand})
kwargs["cmdclass"] = cmdclass
return setup(*args, **kwargs)
| python |
# Copyright 2020, Schuberg Philis B.V
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import time
from datetime import datetime
from dataclasses import dataclass
from enum import Enum, auto
from operator import itemgetter
from xml.etree import ElementTree
import click_spinner
import hpilo
import libvirt
import paramiko
from cs import CloudStackApiException
from fabric import Connection
from invoke import UnexpectedExit, CommandTimedOut
from cosmicops import get_config, logging
from .object import CosmicObject
from .router import CosmicRouter
from .vm import CosmicVM
FABRIC_PATCHED = False
class RebootAction(Enum):
REBOOT = auto()
HALT = auto()
FORCE_RESET = auto()
UPGRADE_FIRMWARE = auto()
PXE_REBOOT = auto()
SKIP = auto()
@dataclass(frozen=True, order=True)
class DomJobInfo:
jobType: int = libvirt.VIR_DOMAIN_JOB_NONE
operation: int = 0
timeElapsed: int = 0
timeRemaining: int = 0
dataTotal: int = 0
dataProcessed: int = 0
dataRemaining: int = 0
memTotal: int = 0
memProcessed: int = 0
memRemaining: int = 0
fileTotal: int = 0
fileProcessed: int = 0
fileRemaing: int = 0
@classmethod
def from_list(cls, l: list):
return cls(*l)
@dataclass(frozen=True, order=True)
class BlkJobInfo:
jobType: int = 0
bandWidth: int = 0
current: int = 0
end: int = 0
# Patch Fabric connection to use different host policy (see https://github.com/fabric/fabric/issues/2071)
def unsafe_open(self): # pragma: no cover
self.client.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy())
Connection.open_orig(self)
class CosmicHost(CosmicObject):
def __init__(self, ops, data):
super().__init__(ops, data)
global FABRIC_PATCHED
if not FABRIC_PATCHED:
Connection.open_orig = Connection.open
Connection.open = unsafe_open
FABRIC_PATCHED = True
# Load configuration
config = get_config()
ssh_user = config.get('ssh', 'user', fallback=None)
ssh_key_file = config.get('ssh', 'ssh_key_file', fallback=None)
connect_kwargs = {'key_filename': ssh_key_file} if ssh_key_file else None
ilo_user = config.get('ilo', 'user', fallback=None)
ilo_password = config.get('ilo', 'password', fallback=None)
# Setup SSH connection
self._connection = Connection(self['name'], user=ssh_user, connect_kwargs=connect_kwargs)
# Setup ILO connection
ilo_address = self['name'].split('.')
ilo_address.insert(1, 'ilom')
ilo_address = '.'.join(ilo_address)
self._ilo = hpilo.Ilo(ilo_address, login=ilo_user, password=ilo_password)
self.vms_with_shutdown_policy = []
def refresh(self):
self._data = self._ops.get_host(id=self['id'], json=True)
def disable(self):
if self.dry_run:
logging.info(f"Would disable host '{self['name']}'")
return True
else:
logging.info(f"Disabling host '{self['name']}'", self.log_to_slack)
if not self._ops.cs.updateHost(id=self['id'], allocationstate='Disable').get('host'):
logging.error(f"Failed to disable host '{self['name']}'", self.log_to_slack)
return False
with click_spinner.spinner():
while True:
self.refresh()
if self['resourcestate'] == 'Disabled':
break
time.sleep(5)
return True
def enable(self):
if self.dry_run:
logging.info(f"Would enable host '{self['name']}'")
return True
else:
logging.info(f"Enabling host '{self['name']}'", self.log_to_slack)
if not self._ops.cs.updateHost(id=self['id'], allocationstate='Enable').get('host'):
logging.error(f"Failed to enable host '{self['name']}'", self.log_to_slack)
return False
with click_spinner.spinner():
while True:
self.refresh()
if self['resourcestate'] == 'Enabled':
break
time.sleep(5)
return True
def empty(self, target=None):
total = success = failed = 0
all_vms = self.get_all_vms() + self.get_all_project_vms() + self.get_all_routers() + self.get_all_project_routers() + self.get_all_system_vms()
if not all_vms:
logging.warning(f"No VMs found on host '{self['name']}'")
return total, success, failed
total = len(all_vms)
target_message = f" to target '{target['name']}'" if target else ''
if self.dry_run:
logging.info(f"Dry run of VM migration away from host '{self['name']}'" + target_message)
else:
logging.info(f"Migrating VMs away from host '{self['name']}'" + target_message)
for vm in all_vms:
if vm.get('maintenancepolicy') == 'ShutdownAndStart':
if not vm.stop():
failed += 1
continue
success += 1
# If the host is disabled, try to restart the VM. Will fail if the host is on NVMe.
if self['resourcestate'] == 'Disabled':
if vm.start():
continue
self.vms_with_shutdown_policy.append(vm)
continue
vm_on_dedicated_hv = False
dedicated_affinity_id = None
for affinity_group in vm.get_affinity_groups():
if affinity_group['type'] == 'ExplicitDedication':
vm_on_dedicated_hv = True
dedicated_affinity_id = affinity_group['id']
if target:
available_hosts = [target]
else:
try:
available_hosts = self._ops.cs.findHostsForMigration(virtualmachineid=vm['id']).get('host', [])
except CloudStackApiException as e:
logging.error(f"Encountered API exception while finding suitable host for migration: {e}")
failed += 1
continue
available_hosts.sort(key=itemgetter('memoryallocated'))
migration_host = None
for available_host in available_hosts:
if not target:
# Skip hosts that require storage migration
if available_host['requiresStorageMotion']:
logging.debug(
f"Skipping '{available_host['name']}' because migrating VM '{vm['name']}' requires a storage migration")
continue
# Ensure host is suitable for migration
if not available_host['suitableformigration']:
logging.debug(f"Skipping '{available_host['name']}' because it's not suitable for migration")
continue
# Only hosts in the same cluster
if available_host['clusterid'] != self['clusterid']:
logging.debug(f"Skipping '{available_host['name']}' because it's part of a different cluster")
continue
if vm_on_dedicated_hv:
# Ensure the dedication group matches
if available_host.get('affinitygroupid') != dedicated_affinity_id:
logging.info(
f"Skipping '{available_host['name']}' because host does not match the dedication group of VM '{vm['name']}'")
continue
else:
# If the user VM isn't dedicated, skip dedicated hosts
if vm.is_user_vm() and 'affinitygroupid' in available_host:
logging.info(
f"Skipping '{available_host['name']}' because host is dedicated and VM '{vm['name']}' is not")
continue
logging.debug(f"Selected '{available_host['name']}' for VM '{vm['name']}'")
migration_host = available_host
break
if not migration_host:
logging.error(
f"Failed to find host with capacity to migrate VM '{vm['name']}'. Please migrate manually to another cluster.")
failed += 1
continue
if not vm.migrate(migration_host):
failed += 1
else:
success += 1
return total, success, failed
def get_all_vms(self, domain=None, keyword_filter=None):
domain_id = domain['id'] if domain else None
vms = self._ops.cs.listVirtualMachines(fetch_list=True, hostid=self['id'], domainid=domain_id,
keyword=keyword_filter, listall='true')
return [CosmicVM(self._ops, vm) for vm in vms]
def get_all_project_vms(self, project=None):
if project:
project_id = project['id']
else:
project_id = '-1'
project_vms = self._ops.cs.listVirtualMachines(fetch_list=True, hostid=self['id'], listall='true',
projectid=project_id)
return [CosmicVM(self._ops, vm) for vm in project_vms]
def get_all_routers(self, domain=None):
domain_id = domain['id'] if domain else None
routers = self._ops.cs.listRouters(fetch_list=True, hostid=self['id'], domainid=domain_id, listall='true')
return [CosmicRouter(self._ops, router) for router in routers]
def get_all_project_routers(self, project=None):
if project:
project_id = project['id']
else:
project_id = '-1'
project_routers = self._ops.cs.listRouters(fetch_list=True, hostid=self['id'], listall='true',
projectid=project_id)
return [CosmicRouter(self._ops, router) for router in project_routers]
def get_all_system_vms(self):
system_vms = self._ops.cs.listSystemVms(fetch_list=True, hostid=self['id'])
return [CosmicVM(self._ops, vm) for vm in system_vms]
def copy_file(self, source, destination, mode=None):
if self.dry_run:
logging.info(f"Would copy '{source}' to '{destination}' on '{self['name']}")
return
self._connection.put(source, destination)
if mode:
self._connection.sudo(f'chmod {mode:o} {destination}')
def execute(self, command, sudo=False, hide_stdout=True, pty=False, always=False):
if self.dry_run and not always:
logging.info(f"Would execute '{command}' on '{self['name']}")
return
if sudo:
runner = self._connection.sudo
else:
runner = self._connection.run
return runner(command, hide=hide_stdout, pty=pty)
def reboot(self, action=RebootAction.REBOOT):
reboot_or_halt = 'halt' if action == RebootAction.HALT else 'reboot'
if self.dry_run:
logging.info(f"Would {reboot_or_halt} host '{self['name']}' with action '{action}'")
return True
if self.execute('virsh list | grep running | wc -l').stdout.strip() != '0':
logging.error(f"Host '{self['name']}' has running VMs, will not {reboot_or_halt}", self.log_to_slack)
return False
try:
if action == RebootAction.REBOOT:
logging.info(f"Rebooting '{self['name']}' in 60s", self.log_to_slack)
self.execute('shutdown -r 1', sudo=True)
elif action == RebootAction.HALT:
logging.info(
f"Halting '{self['name']}' in 60s, be sure to start it manually to continue the rolling reboot",
self.log_to_slack)
self.execute('shutdown -h 1', sudo=True)
elif action == RebootAction.FORCE_RESET:
logging.info(f"Force resetting '{self['name']}'", self.log_to_slack)
self.execute('sync', sudo=True)
self.execute('echo b > /proc/sysrq-trigger', sudo=True)
elif action == RebootAction.UPGRADE_FIRMWARE:
logging.info(f"Rebooting '{self['name']}' after firmware upgrade", self.log_to_slack)
self.execute("tmux new -d 'yes | sudo /usr/sbin/smartupdate upgrade && sudo reboot'", pty=True)
elif action == RebootAction.PXE_REBOOT:
logging.info(f"PXE Rebooting '{self['name']}' in 10s", self.log_to_slack)
self.execute("tmux new -d 'sleep 10 && sudo /usr/sbin/hp-reboot pxe'", pty=True)
elif action == RebootAction.SKIP:
logging.info(f"Skipping reboot for '{self['name']}'", self.log_to_slack)
except Exception as e:
logging.warning(f"Ignoring exception as it's likely related to the {reboot_or_halt}: {e}",
self.log_to_slack)
return True
def set_uid_led(self, state):
new_state = 'on' if state else 'off'
if self.dry_run:
logging.info(f"Would set UID led {new_state}")
else:
self.execute(f'hpasmcli -s "set uid {new_state}"', sudo=True)
def wait_until_offline(self):
if self.dry_run:
logging.info(f"Would wait for '{self['name']}' to complete it's reboot")
else:
logging.info(f"Waiting for '{self['name']}' to complete it's reboot", self.log_to_slack)
with click_spinner.spinner():
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(5)
result = s.connect_ex((self['name'], 22))
if result != 0:
break
time.sleep(5)
def wait_until_online(self):
if self.dry_run:
logging.info(f"Would wait for '{self['name']}' to come back online")
else:
logging.info(f"Waiting for '{self['name']}' to come back online", self.log_to_slack)
with click_spinner.spinner():
while True:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(5)
result = s.connect_ex((self['name'], 22))
if result == 0:
break
if self.dry_run:
logging.info(f"Would wait for libvirt on '{self['name']}'")
else:
logging.info(f"Waiting for libvirt on '{self['name']}'", self.log_to_slack)
with click_spinner.spinner():
while True:
try:
if self.execute('virsh list').return_code == 0:
break
except (ConnectionResetError, UnexpectedExit, CommandTimedOut):
pass
time.sleep(5)
def restart_vms_with_shutdown_policy(self):
if self.dry_run:
logging.info(f"Would restart VMs with 'ShutdownAndStart' policy on host '{self['name']}'")
else:
logging.info(f"Starting VMs with 'ShutdownAndStart' policy on host '{self['name']}'", self.log_to_slack)
for vm in self.vms_with_shutdown_policy:
vm.start()
def wait_for_agent(self):
if self.dry_run:
logging.info(f"Would wait for agent to became up on host '{self['name']}'")
return
else:
logging.info(f"Waiting for agent on host '{self['name']}'", self.log_to_slack)
with click_spinner.spinner():
while True:
self.refresh()
if self['state'] == 'Up':
break
time.sleep(5)
def get_disks(self, vm):
lv = libvirt.openReadOnly(f"qemu+tcp://{self['name']}/system")
domain = lv.lookupByName(vm['instancename'])
tree = ElementTree.fromstring(domain.XMLDesc())
block_devs = tree.findall('devices/disk')
disk_data = {}
for disk in block_devs:
if disk.get('device') != 'disk':
continue
dev = disk.find('target').get('dev')
full_path = disk.find('source').get('file')
_, _, pool, path = full_path.split('/')
size, _, _ = domain.blockInfo(dev)
disk_data[path] = {
'dev': dev,
'pool': pool,
'path': path,
'size': size
}
lv.close()
return disk_data
def get_domjobinfo(self, vm):
try:
lv = libvirt.openReadOnly(f"qemu+tcp://{self['name']}/system")
all_domains = lv.listAllDomains()
if any([x for x in all_domains if x.name() == vm]):
domain = lv.lookupByName(vm)
domjobinfo = domain.jobInfo()
return DomJobInfo.from_list(domjobinfo)
except libvirt.libvirtError as _:
pass # Ignore exception
return DomJobInfo()
def get_domjobstats(self, vm, correction=True):
try:
lv = libvirt.openReadOnly(f"qemu+tcp://{self['name']}/system")
all_domains = lv.listAllDomains()
if any([x for x in all_domains if x.name() == vm]):
domain = lv.lookupByName(vm)
domjobstats = domain.jobStats()
memory_total = domjobstats.get('memory_total', 0)
if correction:
if memory_total == 0:
c_add = domain.info()[0]
memory_total = memory_total + c_add
return DomJobInfo(
jobType=domjobstats.get('type', libvirt.VIR_DOMAIN_JOB_NONE),
operation=domjobstats.get('operation', 0),
timeElapsed=domjobstats.get('time_elapsed', 0),
timeRemaining=domjobstats.get('time_remaining', 0),
dataTotal=domjobstats.get('data_total', 0),
dataProcessed=domjobstats.get('data_processed', 0),
dataRemaining=domjobstats.get('data_remaining', 0),
memTotal=memory_total,
memProcessed=domjobstats.get('memory_processed', 0),
memRemaining=domjobstats.get('memory_remaining', 0),
fileTotal=domjobstats.get('disk_total', 0),
fileProcessed=domjobstats.get('disk_processed', 0),
fileRemaing=domjobstats.get('disk_remaining', 0)
)
except libvirt.libvirtError as _:
pass # Ignore exception
return DomJobInfo()
def get_blkjobinfo(self, vm, volume):
try:
disks = self.get_disks(vm)
disk = dict(filter(lambda x: x[0] == volume, disks.items()))
lv = libvirt.openReadOnly(f"qemu+tcp://{self['name']}/system")
all_domains = lv.listAllDomains()
if any([x for x in all_domains if x.name() == vm['instancename']]):
domain = lv.lookupByName(vm['instancename'])
blkjobinfo = domain.blockJobInfo(disk[volume]['dev'], 0)
return BlkJobInfo(
jobType=blkjobinfo.get('type', 0),
bandWidth=blkjobinfo.get('bandwidth', 0),
current=blkjobinfo.get('cur', 0),
end=blkjobinfo.get('end', 0)
)
except libvirt.libvirtError as _:
pass # Ignore exception
return BlkJobInfo()
def set_iops_limit(self, vm, max_iops):
command = f"""
for i in $(/usr/bin/virsh domblklist --details '{vm['name']}' | grep disk | grep file | /usr/bin/awk '{{print $3}}'); do
/usr/bin/virsh blkdeviotune '{vm['name']}' $i --total-iops-sec {max_iops} --live
done
"""
if not self.execute(command, sudo=True).return_code == 0:
logging.error(f"Failed to set IOPS limit for '{vm['name']}'")
return False
else:
return True
def merge_backing_files(self, vm):
command = f"""
for i in $(/usr/bin/virsh domblklist --details '{vm['name']}' | grep disk | grep file | /usr/bin/awk '{{print $3}}'); do
echo /usr/bin/virsh blockpull '{vm['name']}' $i --wait --verbose
done
"""
if not self.execute(command, sudo=True).return_code == 0:
logging.error(f"Failed to merge backing volumes for '{vm['name']}'")
return False
else:
return True
def power_on(self):
try:
self._ilo.set_host_power(True)
return True
except Exception as err:
logging.error(f"Failed to power on '{self['name']}': {err}")
return False
def file_exists(self, path):
try:
result = self.execute(f"/bin/ls -la \"{path}\"", always=True).stdout
return result.split()
except UnexpectedExit:
return []
def rename_file(self, source, destination):
try:
if not self.execute(f"/bin/mv \"{source}\" \"{destination}\"", True).return_code == 0:
return False
return True
except UnexpectedExit:
return False
def rename_existing_destination_file(self, path):
timestamp = datetime.now().strftime("%d-%m-%Y-%H-%M-%S")
magweg = f"magweg-migration-{timestamp}"
logging.info(f"Renaming {path} to {path}.{magweg} on host {self['name']}")
if not self.rename_file(path, f"{path}.{magweg}"):
return False
return True
def __del__(self):
if self._connection:
self._connection.close()
| python |
from random import choice
from string import ascii_lowercase, digits
from django import forms
from django.contrib.auth.models import User
from django.db import models
from django.utils.importlib import import_module
from avocado.conf import settings
# 41 characters @ 30 characters per username = 3.16 billion permutations
# I think that will cover it..
USERNAME_CHARS = ascii_lowercase + digits + '@.+-_'
def get_form_class(name):
# Absolute import if a period exists, otherwise assume the
# name refers to a built-in Django class
if '.' in name:
path = name.split('.')[:-1]
module = import_module(path)
else:
if not name.endswith('Field'):
name = name + 'Field'
module = forms
return getattr(module, name)
def get_internal_type(field):
"Get model field internal type with 'field' off."
datatype = field.get_internal_type().lower()
if datatype.endswith('field'):
datatype = datatype[:-5]
return datatype
def get_simple_type(internal):
"""Returns a simple type mapped from the internal type."
By default, it will use the field's internal type, but can be
overridden by the ``SIMPLE_TYPES`` setting.
"""
if isinstance(internal, models.Field):
internal = get_internal_type(internal)
return settings.SIMPLE_TYPES.get(internal, internal)
def get_heuristic_flags(field):
# TODO add better conditions for determining how to set the
# flags for most appropriate interface.
# - Determine length of MAX value for string-based fields to rather
# than relying on the `max_length`. This will enable checking TextFields
# - Numerical fields may be enumerable, check the size of them if an
# option is set?
# For strings and booleans, set the enumerable flag by default
# it below the enumerable threshold
# TextFields are typically used for free text
enumerable = False
if field.internal_type != 'text' \
and field.simple_type in ('string', 'boolean') \
and field.size() <= settings.ENUMERABLE_MAXIMUM:
enumerable = True
return {
'enumerable': enumerable,
}
def parse_field_key(key):
"Returns a field lookup based on a variety of key types."
if isinstance(key, int):
return {'pk': key}
keys = ('app_name', 'model_name', 'field_name')
if isinstance(key, models.Field):
opts = key.model._meta
toks = [opts.app_label, opts.module_name, key.name]
elif isinstance(key, basestring):
toks = key.split('.')
elif isinstance(key, (list, tuple)):
toks = key
offset = len(keys) - len(toks)
return dict(zip(keys[offset:], toks))
def generate_random_username(length=30, max_attempts=100):
for i in xrange(max_attempts):
username = ''.join(choice(USERNAME_CHARS) for i in xrange(length))
if not User.objects.filter(username=username).exists():
return username
raise ValueError('Maximum attempts made to generate username')
def create_email_based_user(email):
"""
Creates an inactive user from the email address. These users are
placeholders for those users that do not have accounts. This is initially
planned for use in conjunction with adding users to DataQuery.shared_users.
"""
username = generate_random_username()
email = User.objects.normalize_email(email)
user = User(username=username, email=email, is_active=False)
user.set_unusable_password()
user.full_clean()
user.save()
return user
| python |
birth_year = input('Birth year: ')
print(type(birth_year))
age = 2019 - int(birth_year)
print(type(age))
print(age)
#exercise
weight_in_lbs = input('What is your weight (in pounds)? ')
weight_in_kg = float(weight_in_lbs) * 0.454
print('Your weight is (in kg): ' + str(weight_in_kg))
| python |
from typing import Any, Dict, Iterable, List, Optional, TypedDict
ActionPayload = Iterable[Dict[str, Any]]
ActionPayloadWithLabel = TypedDict(
"ActionPayloadWithLabel", {"action": str, "data": ActionPayload}
)
Payload = List[ActionPayloadWithLabel]
ActionResponseResultsElement = Dict[str, Any]
ActionResponseResults = List[Optional[List[Optional[ActionResponseResultsElement]]]]
ActionResponse = TypedDict(
"ActionResponse",
{"success": bool, "message": str, "results": ActionResponseResults},
)
ActionError = Any
| python |
from setuptools import setup
setup(
name='vertvideo',
version="1.0.1",
description='python package to help you convert video/audio files.',
url='https://github.com/ellipyhub/vertvideo',
author='Ellipyhub',
license='MIT License',
packages=['vertvideo'],
long_description=open('README.md', 'r').read(),
long_description_content_type='text/markdown',
keywords='audio video file convert',
install_requires=[
'autopep8==1.5.7',
'certifi==2021.5.30',
'charset-normalizer==2.0.3',
'decorator==4.4.2',
'idna==3.2',
'imageio==2.9.0',
'imageio-ffmpeg==0.4.4',
'moviepy==1.0.3',
'numpy==1.21.1',
'Pillow==8.3.1',
'proglog==0.1.9',
'pycodestyle==2.7.0',
'requests==2.26.0',
'toml==0.10.2',
'tqdm==4.61.2',
'urllib3==1.26.6',
],
include_package_data=True,
classifiers=[
'License :: OSI Approved :: MIT License',
'Intended Audience :: End Users/Desktop',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Video :: Conversion',
],
entry_points={
"console_scripts": [
"vertvideo=vertvideo.__main__:main",
]
},
)
| python |
#! /usr/bin/env python3
import os, math
import requests
import sqlalchemy
from sqlalchemy import MetaData, create_engine, Column, BigInteger, DateTime, String, ForeignKey, Integer
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, sessionmaker
# Environment variables
if os.path.exists('config.env'):
for line in open('config.env'):
var = line.strip().split('=')
if len(var) == 2:
os.environ[var[0]] = var[1].replace("\"", "")
# Metadata settings
convention = {
"ix": 'ix_%(column_0_label)s',
"uq": "uq_%(table_name)s_%(column_0_name)s",
"ck": "ck_%(table_name)s_%(constraint_name)s",
"fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
"pk": "pk_%(table_name)s"
}
Base = declarative_base()
Base.metadata = MetaData(naming_convention=convention)
# Models
class Product(Base):
__tablename__ = 'products'
id = Column(BigInteger, primary_key=True)
title = Column(String)
class Customer(Base):
__tablename__ = 'customers'
id = Column(BigInteger, primary_key=True)
first_name = Column(String)
last_name = Column(String)
email = Column(String)
orders = relationship('Order', back_populates='customer')
class Order(Base):
__tablename__ = 'orders'
id = Column(BigInteger, primary_key=True)
customer_id = Column(BigInteger, ForeignKey('customers.id', ondelete='cascade'))
currency = Column(String)
total_price = Column(String)
customer = relationship('Customer', back_populates='orders')
# Create tables
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_DATABASE_URI = os.getenv('DATABASE_URL')
engine = create_engine(SQLALCHEMY_DATABASE_URI)
session = sessionmaker()
session.configure(bind=engine)
db = session()
Base.metadata.create_all(engine)
# Ingest data
s = requests.Session()
s.auth = (os.getenv('SHOPIFY_API_KEY'), os.getenv('SHOPIFY_API_PASSWORD'))
url = 'https://' + os.getenv('SHOPIFY_URL') + '/admin/'
params = {'limit': 250}
## Products
Model = Product
model = 'products'
field_values = ['title']
count = s.get(url + model + '/count.json').json().get('count')
pages = math.ceil(count/250)
print("Starting import for {}...".format(model))
num = 0
for page in range(1, pages+1):
r = s.get(url + model + '.json', params={'page': page, **params})
objs = [i for i in r.json().get(model)]
for i in objs:
fields = {k: i.get(k) for k in field_values}
obj = db.query(Model).filter_by(id=i['id'])
if obj.first() is not None:
obj.update(fields)
else:
obj = Model(id=i['id'], **fields)
db.add(obj)
num += 1
print("Imported {} {}.".format(num, model))
## Customers
Model = Customer
model = 'customers'
field_values = ['first_name', 'last_name', 'email']
count = s.get(url + model + '/count.json').json().get('count')
pages = math.ceil(count/250) # max 250 results per page
print("Starting import for {}...".format(model))
num = 0
for page in range(1, pages+1):
r = s.get(url + model + '.json', params={'page': page, **params})
objs = [i for i in r.json().get(model)]
for i in objs:
fields = {k: i.get(k) for k in field_values}
obj = db.query(Model).filter_by(id=i['id'])
if obj.first() is not None:
obj.update(fields)
else:
obj = Model(id=i['id'], **fields)
db.add(obj)
num += 1
print("Imported {} {}.".format(num, model))
## Store products and customers for orders later
db.commit()
## Orders
Model = Order
model = 'orders'
field_values = ['currency', 'total_price']
count = s.get(url + model + '/count.json', params={'status': 'any'}).json().get('count')
pages = math.ceil(count/250)
print("Starting import for {}...".format(model))
num = 0
for page in range(1, pages+1):
r = s.get(url + model + '.json', params={'page': page, 'status': 'any', **params})
objs = [i for i in r.json().get(model)]
for i in objs:
customer = db.query(Customer).get(i['customer']['id'])
if customer is None:
continue
fields = {k: i.get(k) for k in field_values}
obj = db.query(Model).filter_by(id=i['id'])
if obj.first() is not None:
obj.update(fields)
else:
obj = Model(id=i['id'], customer_id=customer.id, **fields)
customer.orders.append(obj)
db.add(obj)
num += 1
print("Imported {} {}.".format(num, model))
## Store orders
db.commit()
| python |
import os
import sys
import soundfile as sf
import numpy as np
pcm = sys.argv[1]
wav = os.path.splitext(pcm)[0] + '.wav'
sig = np.fromfile(pcm, dtype=np.int16)
sf.write(wav, sig, 16000)
| python |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from trt_layer_auto_scan_test import TrtLayerAutoScanTest, SkipReasons
from program_config import TensorConfig, ProgramConfig
import unittest
import numpy as np
import paddle.inference as paddle_infer
from functools import partial
from typing import Optional, List, Callable, Dict, Any, Set
class TrtConvertMultiHeadMatmulTest(TrtLayerAutoScanTest):
def is_program_valid(self, program_config: ProgramConfig) -> bool:
return True
def sample_program_configs(self):
def generate_input1(batch, dim1):
return np.random.random((batch, dim1, 768)).astype(np.float32)
def generate_input2(shape):
return np.random.random(shape).astype(np.float32)
def generate_weight1():
return np.random.random((768, 768)).astype(np.float32)
def generate_weight2():
return np.random.random(768).astype(np.float32)
for batch in [1, 2, 4]:
self.batch = batch
for reshape_shape in [[0, 0, 12, 64]]:
for dim1 in [128]:
input2_shapes = [[batch, reshape_shape[2], dim1, dim1],
[batch, 1, 1, dim1]]
for input2_shape in input2_shapes:
for axis in [0]:
dics = [{
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}, {
"axis": 2
}, {
"shape": reshape_shape
}, {
"axis": [0, 2, 1, 3]
}, {
"scale": 0.125,
"bias": 0.0,
"bias_after_scale": True
}, {
"alpha": 1.0,
"transpose_X": False,
"transpose_Y": True,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
}, {
"axis": axis
}, {
"axis": -1,
"is_test": True
}, {
"seed": 0,
"dropout_prob": 0.10000000149011612,
"dropout_implementation": "upscale_in_train",
"fix_seed": False,
"is_test": True
}, {
"alpha": 1.0,
"transpose_X": False,
"transpose_Y": False,
"fused_reshape_X": [],
"fused_reshape_Y": [],
"fused_transpose_X": [],
"fused_transpose_Y": [],
"fused_reshape_Out": [],
"fused_transpose_Out": []
}, {
"axis": [0, 2, 1, 3]
}, {
"shape": [0, 0, 768]
}, {
"x_num_col_dims": 2,
"y_num_col_dims": 1
}]
ops_config = [
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul1_weight"]
},
"op_outputs": {
"Out": ["mul1_output"]
},
"op_attrs": dics[0]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul1_output"],
"Y": ["elementwise_add1_weight"]
},
"op_outputs": {
"Out": ["elementwise_add1_output"]
},
"op_attrs": dics[1]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add1_output"],
},
"op_outputs": {
"Out": ["reshape21_output"],
"XShape": ["reshape21_output_xshape"]
},
"op_attrs": dics[2]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape21_output"]
},
"op_outputs": {
"Out": ["transpose21_output"],
"XShape":
["transpose21_output_xshape"]
},
"op_attrs": dics[3]
},
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul2_weight"]
},
"op_outputs": {
"Out": ["mul2_output"]
},
"op_attrs": dics[4]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul2_output"],
"Y": ["elementwise_add2_weight"]
},
"op_outputs": {
"Out": ["elementwise_add2_output"]
},
"op_attrs": dics[5]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add2_output"]
},
"op_outputs": {
"Out": ["reshape22_output"],
"XShape": ["reshape22_output_xshape"]
},
"op_attrs": dics[6]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape22_output"]
},
"op_outputs": {
"Out": ["transpose22_output"],
"XShape":
["transpose22_output_xshape"]
},
"op_attrs": dics[7]
},
{
"op_type": "mul",
"op_inputs": {
"X": ["input_data1"],
"Y": ["mul3_weight"]
},
"op_outputs": {
"Out": ["mul3_output"]
},
"op_attrs": dics[8]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["mul3_output"],
"Y": ["elementwise_add3_weight"]
},
"op_outputs": {
"Out": ["elementwise_add3_output"]
},
"op_attrs": dics[9]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["elementwise_add3_output"]
},
"op_outputs": {
"Out": ["reshape23_output"],
"XShape": ["reshape23_output_xshape"]
},
"op_attrs": dics[10]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["reshape23_output"]
},
"op_outputs": {
"Out": ["transpose23_output"],
"XShape":
["transpose23_output_xshape"]
},
"op_attrs": dics[11]
},
{
"op_type": "scale",
"op_inputs": {
"X": ["transpose23_output"],
},
"op_outputs": {
"Out": ["scale_output"]
},
"op_attrs": dics[12]
},
{
"op_type": "matmul",
"op_inputs": {
"X": ["scale_output"],
"Y": ["transpose22_output"],
},
"op_outputs": {
"Out": ["matmul1_output"]
},
"op_attrs": dics[13]
},
{
"op_type": "elementwise_add",
"op_inputs": {
"X": ["matmul1_output"],
"Y": ["input_data2"]
},
"op_outputs": {
"Out": ["elementwise_add4_output"]
},
"op_attrs": dics[14]
},
{
"op_type": "softmax",
"op_inputs": {
"X": ["elementwise_add4_output"]
},
"op_outputs": {
"Out": ["softmax_output"]
},
"op_attrs": dics[15]
},
{
"op_type": "dropout",
"op_inputs": {
"X": ["softmax_output"],
},
"op_outputs": {
"Out": ["dropout3_output"]
},
"op_attrs": dics[16]
},
{
"op_type": "matmul",
"op_inputs": {
"X": ["dropout3_output"],
"Y": ["transpose21_output"],
},
"op_outputs": {
"Out": ["matmul2_output"]
},
"op_attrs": dics[17]
},
{
"op_type": "transpose2",
"op_inputs": {
"X": ["matmul2_output"]
},
"op_outputs": {
"Out": ["transpose24_output"],
"XShape":
["transpose24_output_xshape"]
},
"op_attrs": dics[18]
},
{
"op_type": "reshape2",
"op_inputs": {
"X": ["transpose24_output"]
},
"op_outputs": {
"Out": ["reshape24_output"],
"XShape": ["reshape24_output_xshape"]
},
"op_attrs": dics[19]
},
# In order to fuse ops with
# multihead_matmul_fuse_pass_v2, the last op
# must be mul.
{
"op_type": "mul",
"op_inputs": {
"X": ["reshape24_output"],
"Y": ["mul4_weight"]
},
"op_outputs": {
"Out": ["mul4_output"]
},
"op_attrs": dics[20]
}
]
ops = self.generate_op_config(ops_config)
program_config = ProgramConfig(
ops=ops,
weights={
"mul1_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul2_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul3_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"mul4_weight": TensorConfig(
data_gen=partial(generate_weight1)),
"elementwise_add1_weight": TensorConfig(
data_gen=partial(generate_weight2)),
"elementwise_add2_weight": TensorConfig(
data_gen=partial(generate_weight2)),
"elementwise_add3_weight": TensorConfig(
data_gen=partial(generate_weight2)),
},
inputs={
"input_data1": TensorConfig(
data_gen=partial(generate_input1, batch,
dim1)),
"input_data2": TensorConfig(
data_gen=partial(generate_input2,
input2_shape)),
},
outputs=["mul4_output"])
yield program_config
def sample_predictor_configs(
self, program_config) -> (paddle_infer.Config, List[int], float):
def generate_dynamic_shape(attrs):
# The last dim of input1 and input2 should be static.
self.dynamic_shape.min_input_shape = {
"input_data1": [1, 8, 768],
"input_data2": [1, 1, 1, 128],
"reshape24_output": [1, 128, 768]
}
self.dynamic_shape.max_input_shape = {
"input_data1": [16, 512, 768],
"input_data2": [16, 256, 512, 128],
"reshape24_output": [1, 128, 768]
}
self.dynamic_shape.opt_input_shape = {
"input_data1": [8, 128, 768],
"input_data2": [8, 32, 64, 128],
"reshape24_output": [1, 128, 768]
}
def clear_dynamic_shape():
self.dynamic_shape.max_input_shape = {}
self.dynamic_shape.min_input_shape = {}
self.dynamic_shape.opt_input_shape = {}
attrs = [
program_config.ops[i].attrs
for i in range(len(program_config.ops))
]
# for static_shape
clear_dynamic_shape()
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 4), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 4), (1e-5, 1e-5)
# for dynamic_shape
generate_dynamic_shape(attrs)
self.trt_param.precision = paddle_infer.PrecisionType.Float32
yield self.create_inference_config(), (1, 3), (1e-5, 1e-5)
self.trt_param.precision = paddle_infer.PrecisionType.Half
yield self.create_inference_config(), (1, 3), (1e-5, 1e-5)
def add_skip_trt_case(self):
def teller1(program_config, predictor_config):
if self.trt_param.precision == paddle_infer.PrecisionType.Half:
return True
return False
self.add_skip_case(
teller1, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output has diff between gpu and trt in fp16 mode.")
def teller2(program_config, predictor_config):
if self.trt_param.precision == paddle_infer.PrecisionType.Float32 and len(
self.dynamic_shape.min_input_shape) != 0 and self.batch > 2:
return True
return False
self.add_skip_case(
teller2, SkipReasons.TRT_NOT_IMPLEMENTED,
"The output has diff between gpu and trt when dynamic fp32 mode and batch size > 2."
)
def test(self):
self.add_skip_trt_case()
self.run_test()
if __name__ == "__main__":
unittest.main()
| python |
from types import SimpleNamespace
import pytest
from syncx import rollback
from syncx import tag
from syncx import untag
from syncx.manager import Manager
from syncx.wrappers import CustomObjectWrapper
from syncx.wrappers import DictWrapper
from syncx.wrappers import ListWrapper
from syncx.wrappers import SetWrapper
def check_callback(wrapped, callback, expected_path=None):
assert len(callback.calls) == 1
details = callback.calls[0].args[0]
assert details.location is wrapped
assert details.path_to_location == (expected_path or [])
def test_dict(mock_simple):
wrapped = tag(dict(), mock_simple)
assert type(wrapped) is DictWrapper
wrapped['key'] = 'value'
check_callback(wrapped, mock_simple)
def test_list(mock_simple):
wrapped = tag(list(), mock_simple)
assert type(wrapped) is ListWrapper
wrapped.append('value')
check_callback(wrapped, mock_simple)
def test_set(mock_simple):
wrapped = tag(set(), mock_simple)
assert type(wrapped) is SetWrapper
wrapped.add('value')
check_callback(wrapped, mock_simple)
def test_inherited_from_list(mock_simple):
class CustomList(list):
pass
custom_list = CustomList()
assert hasattr(custom_list, '__dict__')
wrapped = tag(custom_list, mock_simple)
assert type(wrapped) is ListWrapper
wrapped.append('value')
check_callback(wrapped, mock_simple)
assert wrapped._manager.root_type is CustomList
def test_custom_object(mock_simple):
wrapped = tag(SimpleNamespace(test='initial value'), mock_simple)
assert type(wrapped) is CustomObjectWrapper
wrapped.test = 'value'
check_callback(wrapped.__dict__, mock_simple, ['__dict__'])
assert wrapped._manager.root_type is SimpleNamespace
def test_type(mock_simple):
wrapped = tag(SimpleNamespace, mock_simple)
wrapped.test = 'value'
check_callback(wrapped.__dict__, mock_simple, ['__dict__'])
assert wrapped._manager.root_type is SimpleNamespace
def test_multiple_levels(catcher):
wrapped = tag(SimpleNamespace(data={'key': ['value1']}), catcher.changed)
wrapped.data['key'].append(set())
wrapped.data['key'][1].add('value2')
assert catcher.paths == [[], ['key'], ['key', 1]]
assert catcher.function_names == ['__setitem__', 'append', 'add']
def test_same_object_different_paths(catcher):
root = tag({'a': {}}, catcher.changed)
root['b'] = root['a']
root['a']['aa'] = 1
root['b']['aa'] = 2
root['a']['aa'] = 3
assert catcher.paths == [[], ['a'], ['b'], ['a']] # Different paths preserved
assert root['a'] == root['b'] # But same object
assert root['b']['aa'] == 3 # Same values
def test_revert_to_regular(catcher):
wrapped = tag({'a': [{'b'}]}, catcher.changed)
original = untag(wrapped)
assert type(original) is dict
assert type(original['a']) is list
assert type(original['a'][0]) is set
@pytest.mark.parametrize('should_rollback', (False, True))
def test_context_manager(mock_func, should_rollback):
mock_start = mock_func(Manager, 'start_transaction')
mock_end = mock_func(Manager, 'end_transaction')
wrapped = tag([])
with wrapped:
if should_rollback:
rollback()
assert len(mock_start.calls) == 1
assert len(mock_end.calls) == 1
assert mock_end.kwargs == {'do_rollback': should_rollback}
| python |
# Copyright (c) 2021 Cloudification GmbH.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from keystoneauth1.exceptions import http as k_exc
from neutronclient.common import exceptions as n_client_exc
from oslo_config import cfg
from oslo_log import log
from networking_interconnection.common import clients
from networking_interconnection.common import constants
from networking_interconnection.db import interconnaction_db as intc_db
from networking_interconnection.extensions import interconnection as intc_exc
from networking_interconnection.neutronclient.osc.v2 import (
interconnection as osc_v2)
from networking_interconnection import opts
LOG = log.getLogger(__name__)
CONF = cfg.CONF
@registry.has_registry_receivers
class InterconnectionPlugin(intc_exc.InterconnectionPluginBase,
intc_db.InterconnectionPluginDb):
def __init__(self):
super(InterconnectionPlugin, self).__init__()
# Register config options
opts.register_interconnection_options(CONF)
# Save config
self.cfg = CONF.interconnection
self.mngr = clients.ClientManager(CONF.interconnection)
self.db = intc_db.InterconnectionPluginDb()
def create_interconnection(self, context, interconnection):
data = interconnection[constants.API_RESOURCE_NAME]
if not data['remote_interconnection_id']:
data['state'] = constants.STATE_WAITING
else:
data['state'] = constants.STATE_VALIDATING
if not self.cfg.allow_regions_coincidence:
self._validate_regions(data)
remote_neutron, remote_keystone = self.mngr.get_clients(
data['remote_region'])
local_neutron, local_keystone = self.mngr.get_clients(
self.cfg.region_name)
local, remote = self._validate_resources(
data, remote_neutron, remote_keystone, local_neutron,
local_keystone)
self._validate_remote_interconnection(
data, remote_neutron, remote_keystone, local_keystone)
data['local_parameters'] = self._get_parameters(local)
data['remote_parameters'] = self._get_parameters(remote)
db_obj = self.db.create_interconnection(context, data)
# Neutron Callback System the only one way how we can start validating
# interconnection in background. This notification will be catch by
# _validating_interconnection function.
registry.notify(
constants.INVTERCONNECTION_RESOURCE,
events.AFTER_CREATE, self, context=context, interconnection=db_obj,
local_resource=local, remote_resource=remote,
remote_neutron=remote_neutron, local_neutron=local_neutron)
return db_obj
def get_interconnections(self, context, filters=None, fields=None):
return self.db.get_interconnections(context, filters, fields)
def get_interconnection(self, context, id, fields=None):
return self.db.get_interconnection(context, id, fields)
def update_interconnection(self, context, id, interconnection):
data = interconnection[constants.API_RESOURCE_NAME]
db_obj = self.db.update_interconnection(context, id, data)
# if state was changed to VALIDATED we have to synchronize resources
if data.get('state') and data['state'] == constants.STATE_VALIDATED:
# Neutron Callback System the only one way how we can start
# synchronization in background.
registry.notify(
constants.INVTERCONNECTION_RESOURCE, events.AFTER_UPDATE, self,
context=context, interconnection=db_obj)
return db_obj
def delete_interconnection(self, context, id):
db_obj = self.db.delete_interconnection(context, id)
# Neutron Callback System the only one way how we can start
# synchronization in background.
registry.notify(
constants.INVTERCONNECTION_RESOURCE, events.AFTER_DELETE, self,
context=context, interconnection=db_obj)
return db_obj
@registry.receives(
constants.INVTERCONNECTION_RESOURCE, [events.AFTER_CREATE])
def _sync_interconnections(self, resource, event, trigger, **kwargs):
intcn = kwargs.get('interconnection')
local_neutron = kwargs.get('local_neutron')
remote_neutron = kwargs.get('remote_neutron')
# nothing to validate if remote interconection is not ready
if not intcn['remote_interconnection_id']:
return
# set state VALIDATED for each side to start resources synchronization
# see _sync_resources function. We have to update local interconnection
# via API instead of database because we need to start background
# action for AFTER_UPDATE event on each side in the same way.
self._update_interconnection(
remote_neutron, intcn['remote_interconnection_id'],
state=constants.STATE_VALIDATED,
remote_interconnection_id=intcn['id'])
self._update_interconnection(
local_neutron, intcn['id'],
state=constants.STATE_VALIDATED)
@registry.receives(
constants.INVTERCONNECTION_RESOURCE, [events.AFTER_UPDATE,
events.AFTER_DELETE])
def _sync_resources(self, resource, event, trigger, **kwargs):
intcn = kwargs.get('interconnection')
context = kwargs.get('context')
try:
# get local and remote clients
local_neutron, _ = self.mngr.get_clients(self.cfg.region_name)
remote_neutron, _ = self.mngr.get_clients(intcn['remote_region'])
# get local and remote resources
remote_res = self._get_bgpvpn(
remote_neutron, intcn['remote_resource_id'])
local_res = self._get_bgpvpn(
local_neutron, intcn['local_resource_id'])
if event == events.AFTER_UPDATE:
# import/export targets synchronization
imports = set(
local_res['import_targets'] + remote_res['export_targets'])
local_neutron.update_bgpvpn(
intcn['local_resource_id'],
body={'bgpvpn': {'import_targets': list(imports)}})
# update interconnection to ACTIVE
self.db.update_interconnection(
context, intcn['id'], {'state': constants.STATE_ACTIVE})
if event == events.AFTER_DELETE:
# import/export targets synchronization
imports = set(
local_res['import_targets']) - set(
remote_res['export_targets'])
local_neutron.update_bgpvpn(
intcn['local_resource_id'],
body={'bgpvpn': {'import_targets': list(imports)}})
except n_client_exc.NeutronClientException as err:
LOG.error('Could not synchronize targets for local resource bgpvpn'
' with ID %s. Details: request_ids=%s msg=%s'
% (intcn['local_resource_id'], err.request_ids, err))
if event != events.AFTER_DELETE:
self.db.update_interconnection(
context, intcn['id'],
{'state': constants.STATE_TEARDOWN})
def _update_interconnection(self, client, id, **kwargs):
client.put(
osc_v2.PATH_SINGLE + id,
body={constants.API_RESOURCE_NAME: kwargs})
def _validate_resources(self, data, remote_neutron, remote_keystone,
local_neutron, local_keystone):
# get local and remote resources
remote_res = self._get_bgpvpn(
remote_neutron, data['remote_resource_id'])
local_res = self._get_bgpvpn(local_neutron, data['local_resource_id'])
# validate owner of resources
remote_domain_name = self._get_domain_name(
remote_keystone, remote_res['project_id'])
local_domain_name = self._get_domain_name(
local_keystone, local_res['project_id'])
if remote_domain_name != local_domain_name:
raise intc_exc.ResourcesOwnedByDifferentDomains()
# validate targets
if not remote_res['export_targets']:
raise intc_exc.BgpvpnExportTargetsIsEpmty(bgpvpn=remote_res['id'])
if not local_res['export_targets']:
raise intc_exc.BgpvpnExportTargetsIsEpmty(bgpvpn=local_res['id'])
return local_res, remote_res
def _validate_remote_interconnection(self, data, remote_neutron,
remote_keystone, local_keystone):
if not data['remote_interconnection_id']:
return
# get remote interconnection
r_intcn = remote_neutron.get(
osc_v2.PATH_SINGLE + data['remote_interconnection_id']
)[constants.API_RESOURCE_NAME]
# check owner of remote interconnection
remote_domain_name = self._get_domain_name(
remote_keystone, r_intcn['project_id'])
local_domain_name = self._get_domain_name(
local_keystone, data['project_id'])
if remote_domain_name != local_domain_name:
raise intc_exc.InterconnectionOwnedByDifferentDomains(
local=data['project_id'], remote=r_intcn['project_id'])
# update remote interconnection to set state VALIDATING and remote
# interconnection ID
self._update_interconnection(
remote_neutron, data['remote_interconnection_id'],
state=constants.STATE_VALIDATING)
# check local and remote resources
if (r_intcn['remote_resource_id'] != data['local_resource_id']
or r_intcn['local_resource_id'] != data['remote_resource_id']):
LOG.error('Invalid resource settings in remote interconnection %s.'
% (data['remote_interconnection_id']))
raise intc_exc.InvalidRemoteInterconnection()
def _validate_regions(self, data):
if data['remote_region'] == self.cfg.region_name:
raise intc_exc.RegionConflict(
remote_region=data['remote_region'],
local_region=self.cfg.region_name)
def _get_parameters(self, bgpvpn):
params_to_copy = ['project_id']
params = {}
for key, val in bgpvpn.items():
if key in params_to_copy and val:
# all values in parameters should be a list for pretty format
params[key] = [val] if not isinstance(val, list) else val
return params
def _get_bgpvpn(self, neutron_client, bgpvpn_id):
try:
return neutron_client.show_bgpvpn(bgpvpn_id)['bgpvpn']
except n_client_exc.NotFound:
raise intc_exc.ResourceNotFound(
resource_type='bgpvpn',
remote_resource_id=bgpvpn_id)
def _get_domain_name(self, keystone_client, project_id):
try:
project = keystone_client.projects.get(project_id)
return keystone_client.domains.get(project.domain_id).name
except k_exc.NotFound:
raise intc_exc.ProjectOrDomainNotFound(
project_id=project_id)
| python |
# Copyright 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from numba.extending import typeof_impl, register_model
from numba_dppy.dppy_array_type import DPPYArray, DPPYArrayModel
import numba_dppy.target as dppy_target
from dpctl.tensor import usm_ndarray
from numba.np import numpy_support
class USMNdArrayType(DPPYArray):
"""
USMNdArrayType(dtype, ndim, layout, usm_type,
readonly=False, name=None,
aligned=True, addrspace=None)
creates Numba type to represent ``dpctl.tensor.usm_ndarray``.
"""
def __init__(
self,
dtype,
ndim,
layout,
usm_type,
readonly=False,
name=None,
aligned=True,
addrspace=None,
):
self.usm_type = usm_type
# This name defines how this type will be shown in Numba's type dumps.
name = "USM:ndarray(%s, %sd, %s)" % (dtype, ndim, layout)
super(USMNdArrayType, self).__init__(
dtype,
ndim,
layout,
py_type=usm_ndarray,
readonly=readonly,
name=name,
addrspace=addrspace,
)
def copy(self, *args, **kwargs):
return super(USMNdArrayType, self).copy(*args, **kwargs)
# This tells Numba to use the DPPYArray data layout for object of type USMNdArrayType.
register_model(USMNdArrayType)(DPPYArrayModel)
dppy_target.spirv_data_model_manager.register(USMNdArrayType, DPPYArrayModel)
@typeof_impl.register(usm_ndarray)
def typeof_usm_ndarray(val, c):
"""
This function creates the Numba type (USMNdArrayType) when a usm_ndarray is passed.
"""
try:
dtype = numpy_support.from_dtype(val.dtype)
except NotImplementedError:
raise ValueError("Unsupported array dtype: %s" % (val.dtype,))
layout = "C"
readonly = False
return USMNdArrayType(dtype, val.ndim, layout, val.usm_type, readonly=readonly)
| python |
"""
Assingment No. 11 Part V
Name: Mohamed Gamal Zaid
ID: 201700399
"""
import numpy as np
from numpy import exp as E
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
import time
J=1
T=1
H=-5
n=20
total = np.power(n,2)
ts=1100
nCut = 100
plot = False
def interactingSpinsIndices(i,j,s):
"""
Find all possible combination between the indices of the
desired spins and all of its neighbors
to calculate Eflip Accurately
and allowing for Boundary Conditions
indices ordered as follows: 0:center, 1:left, 2:right, 3:up, 4:down
each time 0:i, 1:j
"""
indices=np.array([(i,j),(i,j-s),(i,j+s),(i-s,j),(i+s,j)],dtype=int)
#We have four corners and four edges at corners we need two indices
#at edges we need just one
right = n-j-s-1
down = n-i-s-1
left = j-s
up = i-s
if left<0: #left edge 1
indices[1,1] = (n+left) #j
elif right<0: #right edge 2
indices[2,1] = (-right-1) #j
if up<0: #upper edge 3
indices[3,0] = (n+up) #i
elif down<0: #lower edge 4
indices[4,0] = (-down-1) #i
#print(indices)
return indices
def MontCarlo(T, H, n, ts, sign=1):
spins = sign *np.ones([n,n])
Ms = np.zeros(ts-nCut) #Magnetization
for t in range(ts):
#First I remove the boundary spins to allow for looping without worrying about the BCs
for i in range(n):
for j in range(n):
inds = interactingSpinsIndices(i,j,s=1)
if (t!=0):
Eflip = 2*(J*np.sum( [np.product([spins[i,j], spins[ind1,ind2]]) for ind1,ind2 in inds[1:]])+spins[i,j]*H)
if Eflip <= 0:
spins[i,j]=-spins[i,j]
elif Eflip > 0:
r=np.random.rand()
BoltzFactor = E(-Eflip/T)
if(r <= BoltzFactor):
spins[i,j]=-spins[i,j]
if plot:
plt.matshow(spins,cmap = cm.viridis)
plt.savefig("run\\"+str(t)+".jpeg")
plt.close("all")
if t>=nCut:
Ms[t-nCut]=np.sum(spins)/total
return Ms
st = time.perf_counter()
Hs = np.linspace(0.01,0.05,5)
nH = np.size(Hs)
color=cm.rainbow(np.linspace(0, 1, nH))
TempRange = np.arange(1.5,3.1,0.1)
h_t_15_8 = np.zeros([nH,len(TempRange)])
m_t_1_8 = np.zeros_like(h_t_15_8)
signs = [-1,1]
lss=['-','--']
mss=['o','^','*','s','+']
for i,H in enumerate(Hs):
H=np.round(H,2)
M=np.zeros(len(TempRange))
st1 = time.perf_counter()
for q, T in enumerate(TempRange):
T=np.round(T,2)
print("for T= "+ str(T)+" ,H= " + str(H))
M[q]=np.mean(MontCarlo(T, H, n, ts, 1))
#t = 1- 4/T
t = (T-2.27)/2.27
m_t_1_8[i,q] = M[q]/(np.power(np.abs(t),(1/8)))
h_t_15_8[i,q] = H/(np.power(np.abs(t),(15/8)))
plt.scatter(TempRange ,M ,c=color[i].reshape(1,4),marker=mss[i]
,label="H="+str(Hs[i]),alpha=0.6)
en1 = time.perf_counter()
print("It took: "+str(np.round((en1-st1)/60,3))+" Mins")
print("In total it took: "+str(np.round((en1-st)/60,3))+" Mins")
Title = "Figure 8.15"
plt.ylabel("M")
plt.xlabel("T")
plt.title(Title)
plt.grid(alpha=0.2)
plt.legend()
plt.savefig(Title+".jpeg",dpi=300,pad_inches=0.5)
plt.close("all")
fig, ax = plt.subplots()
for r in range(nH):
plt.scatter(h_t_15_8[r], m_t_1_8[r], c=color[r].reshape(1,4),marker=mss[r]
,alpha=0.6,label="H="+str(Hs[r]))
plt.xlabel("h / |t| ^ 15/8")
plt.ylabel("m / |t| ^ 1/8")
#ax.set_yscale('log')
ax.set_xscale('log')
Title = "Figure 8.16_Log x"
plt.title(Title)
plt.legend()
plt.grid(alpha=0.2)
plt.savefig(Title+".jpeg",dpi=300,pad_inches=0.5)
#plt.close("all")
| python |
#Modified to store in the same txt file everytime
# prototype of vanilla LSTM for pedestrian modeling
# written by: Bryan Zhao and Ashish Roongta, Fall 2018
# carnegie mellon university
# import relevant libraries
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib
import numpy as np
import trajectories
import loader
import argparse
import gc
import logging
import os
import sys
import time
import matplotlib.pyplot as plt
# build argparser
parser = argparse.ArgumentParser()
parser.add_argument('--input_size', type=int, default=2)
parser.add_argument('--output_size', type=int, default=2)
# RNN size parameter (dimension of the output/hidden state)
parser.add_argument('--rnn_size', type=int, default=128,
help='size of RNN hidden state')
# size of each batch parameter
parser.add_argument('--batch_size', type=int, default=10,
help='minibatch size')
# Length of sequence to be considered parameter
parser.add_argument('--seq_length', type=int, default=20,
help='RNN sequence length')
parser.add_argument('--pred_length', type=int, default=12,
help='prediction length')
# number of epochs parameter
parser.add_argument('--num_epochs', type=int, default=20,
help='number of epochs')
# frequency at which the model should be saved parameter
parser.add_argument('--save_every', type=int, default=400,
help='save frequency')
# gradient value at which it should be clipped
parser.add_argument('--grad_clip', type=float, default=10.,
help='clip gradients at this value')
# learning rate parameter
parser.add_argument('--learning_rate', type=float, default=0.003,
help='learning rate')
# decay rate for the learning rate parameter
parser.add_argument('--decay_rate', type=float, default=0.95,
help='decay rate for rmsprop')
# dropout probability parameter
parser.add_argument('--dropout', type=float, default=0.5,
help='dropout probability')
# dimension of the embeddings parameter
parser.add_argument('--embedding_size', type=int, default=64,
help='Embedding dimension for the spatial coordinates')
# size of neighborhood to be considered parameter
parser.add_argument('--neighborhood_size', type=int, default=32,
help='Neighborhood size to be considered for social grid')
# size of the social grid parameter
parser.add_argument('--grid_size', type=int, default=4,
help='Grid size of the social grid')
# maximum number of pedestrians to be considered
parser.add_argument('--maxNumPeds', type=int, default=27,
help='Maximum Number of Pedestrians')
# lambda regularization parameter (L2)
parser.add_argument('--lambda_param', type=float, default=0.0005,
help='L2 regularization parameter')
# cuda parameter
parser.add_argument('--use_cuda', action="store_true", default=False,
help='Use GPU or not')
# GRU parameter
parser.add_argument('--gru', action="store_true", default=False,
help='True : GRU cell, False: LSTM cell')
# drive option
parser.add_argument('--drive', action="store_true", default=False,
help='Use Google drive or not')
# number of validation will be used
parser.add_argument('--num_validation', type=int, default=2,
help='Total number of validation dataset for validate accuracy')
# frequency of validation
parser.add_argument('--freq_validation', type=int, default=1,
help='Frequency number(epoch) of validation using validation data')
# frequency of optimizer learning decay
parser.add_argument('--freq_optimizer', type=int, default=8,
help='Frequency number(epoch) of learning decay for optimizer')
# store grids in epoch 0 and use further.2 times faster -> Intensive memory use around 12 GB
parser.add_argument('--grid', action="store_true", default=True,
help='Whether store grids and use further epoch')
# dataset options
parser.add_argument('--dataset_name', default='zara1', type=str)
parser.add_argument('--delim', default='\t')
parser.add_argument('--loader_num_workers', default=4, type=int)
parser.add_argument('--obs_len', default=8, type=int)
parser.add_argument('--pred_len', default=12, type=int)
parser.add_argument('--skip', default=1, type=int)
args = parser.parse_args()
data_dir = "/home/roongtaaahsih/ped_traj/sgan_ab/scripts/datasets/eth/train"
''' Class for defining the Vanilla LSTM Network '''
class VanillaLSTMNet(nn.Module):
def __init__(self):
super(VanillaLSTMNet, self).__init__()
''' Inputs to the LSTMCell's are (input, (h_0, c_0)):
1. input of shape (batch, input_size): tensor containing input
features
2a. h_0 of shape (batch, hidden_size): tensor containing the
initial hidden state for each element in the batch.
2b. c_0 of shape (batch, hidden_size): tensor containing the
initial cell state for each element in the batch.
Outputs: h_1, c_1
1. h_1 of shape (batch, hidden_size): tensor containing the next
hidden state for each element in the batch
2. c_1 of shape (batch, hidden_size): tensor containing the next
cell state for each element in the batch '''
# set parameters for network architecture
self.embedding_size = 64
self.input_size = 2
self.output_size = 2
self.dropout_prob = 0.5
# linear layer to embed the input position
self.input_embedding_layer = nn.Linear(self.input_size, self.embedding_size)
# define lstm cell
self.lstm_cell = nn.LSTMCell(self.embedding_size, self.embedding_size)
# linear layer to map the hidden state of LSTM to output
self.output_layer = nn.Linear(self.embedding_size, self.output_size)
# ReLU and dropout unit
self.relu = nn.ReLU()
self.dropout = nn.Dropout(self.dropout_prob)
pass
def forward(self, observed_batch, pred_len = 0):
''' this function takes the input sequence and predicts the output sequence.
args:
observed_batch (torch.Tensor) : input batch with shape <seq length x num pedestrians x number of dimensions>
pred_len (int) : length of the sequence to be predicted.
'''
output_seq = []
ht = torch.zeros(observed_batch.size(1), self.embedding_size, dtype=torch.float)
ct = torch.zeros(observed_batch.size(1), self.embedding_size, dtype=torch.float)
seq, peds, coords = observed_batch.shape
for step in range(seq):
observed_step = observed_batch[step, :, :]
lin_out = self.input_embedding_layer(observed_step.view(peds,2))
ht, ct = self.lstm_cell(lin_out, (ht, ct))
out = self.output_layer(ht)
# now, make predictions for future trajectories
# print("predicted length input taken by forward function---------------------",pred_len)
for i in range(pred_len):
lin_out = self.input_embedding_layer(out)
ht, ct = self.lstm_cell(lin_out, (ht,ct))
out = self.output_layer(ht)
output_seq += [out]
output_seq = torch.stack(output_seq).squeeze() # convert list to tensor
return output_seq
#Defininig the test function to calculate and return avg test loss after each epoch
def test(vanilla_lstm_net,args,pred_len=0):
test_data_dir = "/home/roongtaaahsih/ped_traj/sgan_ab/scripts/datasets/eth/test"
# retrieve dataloader
dataset, dataloader = loader.data_loader(args, test_data_dir)
# define parameters for training and testing loops
criterion = nn.MSELoss() # MSE works best for difference between predicted and actual coordinate paths
# initialize lists for capturing losses
test_loss = []
test_avgD_error=[]
test_finalD_error=[]
# now, test the model
for i, batch in enumerate(dataloader):
test_observed_batch = batch[0]
test_target_batch = batch[1]
out = vanilla_lstm_net(test_observed_batch, pred_len=pred_len) # forward pass of lstm network for training
# print("vnet out's shape",out.shape)
cur_test_loss = criterion(out, test_target_batch) # calculate MSE loss
test_loss.append(cur_test_loss.item())
out1=out
target_batch1=test_target_batch #making a copy of the tensors to convert them to array
seq, peds, coords = test_target_batch.shape # q is number of pedestrians
avgD_error=(np.sum(np.sqrt(np.square(out1[:,:,0].detach().numpy()-target_batch1[:,:,0].detach().numpy())+
np.square(out1[:,:,1].detach().numpy()-target_batch1[:,:,1].detach().numpy()))))/(pred_len*peds)
test_avgD_error.append(avgD_error)
# print("current avg Disp error:",avgD_error)
#calculating final displacement error
finalD_error=(np.sum(np.sqrt(np.square(out1[pred_len-1,:,0].detach().numpy()-target_batch1[pred_len-1,:,0].detach().numpy())+
np.square(out1[pred_len-1,:,1].detach().numpy()-target_batch1[pred_len-1,:,1].detach().numpy()))))/peds
test_finalD_error.append(finalD_error)
avg_testloss = sum(test_loss)/len(test_loss)
avg_testD_error=sum(test_avgD_error)/len(test_avgD_error)
avg_testfinalD_error=sum(test_finalD_error)/len(test_finalD_error)
print("============= Average test loss:", avg_testloss, "====================")
return avg_testloss, avg_testD_error,avg_testfinalD_error
def main(args):
'''define parameters for training and testing loops!'''
# num_epoch = 20
# pred_len = 12
# learning_rate = 0.001
num_epoch = args.num_epochs
pred_len = args.pred_len
learning_rate = args.learning_rate
batch_size = args.batch_size
# retrieve dataloader
dataset, dataloader = loader.data_loader(args, data_dir)
''' define the network, optimizer and criterion '''
vanilla_lstm_net = VanillaLSTMNet()
criterion = nn.MSELoss() # MSE works best for difference between predicted and actual coordinate paths
optimizer = optim.Adam(vanilla_lstm_net.parameters(), lr=learning_rate)
# initialize lists for capturing losses
train_loss = []
test_loss = []
avg_train_loss = []
avg_test_loss = []
train_avgD_error=[]
train_finalD_error=[]
avg_train_avgD_error=[]
avg_train_finalD_error=[]
test_finalD_error=[]
test_avgD_error=[]
std_train_loss = []
std_test_loss = []
'''training loop'''
for i in range(num_epoch):
print('======================= Epoch: {cur_epoch} / {total_epochs} =======================\n'.format(cur_epoch=i, total_epochs=num_epoch))
def closure():
for i, batch in enumerate(dataloader):
train_batch = batch[0]
target_batch = batch[1]
# print("train_batch's shape", train_batch.shape)
# print("target_batch's shape", target_batch.shape)
seq, peds, coords = train_batch.shape # q is number of pedestrians
out = vanilla_lstm_net(train_batch, pred_len=pred_len) # forward pass of lstm network for training
# print("out's shape:", out.shape)
optimizer.zero_grad() # zero out gradients
cur_train_loss = criterion(out, target_batch) # calculate MSE loss
# print('Current training loss: {}'.format(cur_train_loss.item())) # print current training loss
print('Current training loss: {}'.format(cur_train_loss.item())) # print current training loss
#calculating average deisplacement error
out1=out
target_batch1=target_batch #making a copy of the tensors to convert them to array
avgD_error=(np.sum(np.sqrt(np.square(out1[:,:,0].detach().numpy()-target_batch1[:,:,0].detach().numpy())+
np.square(out1[:,:,1].detach().numpy()-target_batch1[:,:,1].detach().numpy()))))/(pred_len*peds)
train_avgD_error.append(avgD_error)
# print("current avg Disp error:",avgD_error)
#calculating final displacement error
finalD_error=(np.sum(np.sqrt(np.square(out1[pred_len-1,:,0].detach().numpy()-target_batch1[pred_len-1,:,0].detach().numpy())+
np.square(out1[pred_len-1,:,1].detach().numpy()-target_batch1[pred_len-1,:,1].detach().numpy()))))/peds
train_finalD_error.append(finalD_error)
# print("current final displacement error:",finalD_error)
train_loss.append(cur_train_loss.item())
cur_train_loss.backward() # backward prop
optimizer.step() # step like a mini-batch (after all pedestrians)
return cur_train_loss
optimizer.step(closure) # update weights
# save model at every epoch (uncomment)
# torch.save(vanilla_lstm_net, './saved_models/vanilla_lstm_model_lr0005.pt')
# print("Saved vanilla_lstm_net!")
avg_train_loss.append(np.sum(train_loss)/len(train_loss))
avg_train_avgD_error.append(np.sum(train_avgD_error)/len(train_avgD_error))
avg_train_finalD_error.append(np.sum(train_finalD_error)/len(train_finalD_error))
std_train_loss.append(np.std(np.asarray(train_loss)))
train_loss = [] # empty train loss
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
print("average train loss: {}".format(avg_train_loss))
print("average std loss: {}".format(std_train_loss))
avgTestLoss,avgD_test,finalD_test=test(vanilla_lstm_net,args,pred_len)
print("train_final_D:",train_finalD_error)
print("train avg_D: ",train_avgD_error)
print("test_finalD: ",finalD_test)
print("test_avgD",avgD_test)
avg_test_loss.append(avgTestLoss)
test_finalD_error.append(finalD_test)
test_avgD_error.append(avgD_test)
#avg_test_loss.append(test(vanilla_lstm_net,args,pred_len)) ##calliing test function to return avg test loss at each epoch
'''after running through epochs, save your model and visualize.
then, write your average losses and standard deviations of
losses to a text file for record keeping.'''
save_path = os.path.join('./saved_models/', 'vanilla_lstm_model_lr_' + str(learning_rate) + '_epoch_' + str(num_epoch) + '_predlen_' + str(pred_len) + '_batchsize_' + str(batch_size) + '.pt')
# torch.save(vanilla_lstm_net, './saved_models/vanilla_lstm_model_lr001_ep20.pt')
torch.save(vanilla_lstm_net, save_path)
print("saved vanilla_lstm_net! location: " + save_path)
''' visualize losses vs. epoch'''
plt.figure() # new figure
plt.title("Average train loss vs {} epochs".format(num_epoch))
plt.plot(avg_train_loss,label='avg train_loss')
plt.plot(avg_test_loss,color='red',label='avg test_loss')
plt.legend()
plt.savefig("./saved_figs/" + "avgtrainloss_lr_"+ str(learning_rate) + '_epochs_' + str(num_epoch) + '_predlen_' + str(pred_len) + '_batchsize_' + str(batch_size) + '.jpeg')
# plt.show()
# plt.show(block=True)
plt.figure() # new figure
plt.title("Average and final displacement error {} epochs".format(num_epoch))
plt.plot(avg_train_finalD_error,label='train:final displacement error')
plt.plot(avg_train_avgD_error,color='red',label='train:avg displacement error')
plt.plot(test_finalD_error,color='green',label='test:final displacement error')
plt.plot(test_avgD_error,color='black',label='test:avg displacement error')
plt.legend()
# plt.show()
plt.savefig("./saved_figs/" + "avg_final_displacement_lr_"+ str(learning_rate) + '_epochs_' + str(num_epoch) + '_predlen_' + str(pred_len) + '_batchsize_' + str(batch_size) + '.jpeg')
plt.figure()
plt.title("Std of train loss vs epoch{} epochs".format(num_epoch))
plt.plot(std_train_loss)
plt.savefig("./saved_figs/" + "stdtrainloss_lr_"+ str(learning_rate) + '_epochs_' + str(num_epoch) + '_predlen_' + str(pred_len) + '_batchsize_' + str(batch_size) + '.jpeg')
# plt.show(block=True)
print("saved images for avg training losses! location: " + "./saved_figs")
# save results to text file
txtfilename = os.path.join("./txtfiles/", r"Results_table_lr_"+ str(learning_rate) + '_epochs_' + str(num_epoch) + '_batchsize_' + str(batch_size) + ".txt")
os.makedirs(os.path.dirname("./txtfiles/"), exist_ok=True) # make directory if it doesn't exist
with open(txtfilename, "a+") as f: #will append to a file, create a new one if it doesn't exist
# if(pred_len==2): #To print the heading in the txt file
f.write("Pred_Len"+"\t"+"Avg_Train_Loss"+"\t"+"Std_Train_Loss"+"\t"+"Avg_Test_Loss"+"\t"+
"\t"+"Train_AvgD_Error"+"\t"+"Train_FinalD_Error"+"\t"+
"Test_AvgD_Error"+"\t"+"Test_FinalD_Error"+"\n")
f.write("\n==============Average train loss vs. epoch:===============")
f.write(str(pred_len)+"\n")
f.write(str(avg_train_loss)+"\n")
f.write("\nepochs: " + str(num_epoch))
f.write("\n==============Std train loss vs. epoch:===================")
f.write(str(std_train_loss)+"\n")
f.write("\n==============avg test loss vs. epoch:===================")
f.write(str(avg_test_loss)+"\n")
f.write("\n==============Avg train displacement error:===================")
f.write(str(avg_train_avgD_error)+"\n")
f.write("\n==============final train displacement error:===================")
f.write(str(avg_train_finalD_error)+"\n")
f.write("\n==============Avg test displacement error:===================")
f.write(str(test_avgD_error)+"\n")
f.write("\n==============final test displacement error:===================")
f.write(str(test_finalD_error)+"\n")
f.close()
print("saved average and std of training losses to text file in: ./txtfiles")
'''main function'''
if __name__ == '__main__':
main(args)
| python |
import os
import copy
import hashlib
import math
from typing import Union
from shapely.geometry import LineString
import pandas as pd
import geopandas as gpd
from shapely.geometry import LineString
from geographiclib.geodesic import Geodesic
from .logger import WranglerLogger
def point_df_to_geojson(df: pd.DataFrame, properties: list):
"""
Author: Geoff Boeing:
https://geoffboeing.com/2015/10/exporting-python-data-geojson/
"""
from .roadwaynetwork import RoadwayNetwork
geojson = {"type": "FeatureCollection", "features": []}
for _, row in df.iterrows():
feature = {
"type": "Feature",
"properties": {},
"geometry": {"type": "Point", "coordinates": []},
}
feature["geometry"]["coordinates"] = [row["geometry"].x, row["geometry"].y]
feature["properties"][RoadwayNetwork.NODE_FOREIGN_KEY] = row.name
for prop in properties:
feature["properties"][prop] = row[prop]
geojson["features"].append(feature)
return geojson
def link_df_to_json(df: pd.DataFrame, properties: list):
""" Export pandas dataframe as a json object.
Modified from: Geoff Boeing:
https://geoffboeing.com/2015/10/exporting-python-data-geojson/
Args:
df: Dataframe to export
properties: list of properties to export
"""
# can't remember why we need this?
if "distance" in properties:
df["distance"].fillna(0)
json = []
for _, row in df.iterrows():
feature = {}
for prop in properties:
feature[prop] = row[prop]
json.append(feature)
return json
def topological_sort(adjacency_list, visited_list):
"""
Topological sorting for Acyclic Directed Graph
"""
output_stack = []
def _topology_sort_util(vertex):
if not visited_list[vertex]:
visited_list[vertex] = True
for neighbor in adjacency_list[vertex]:
_topology_sort_util(neighbor)
output_stack.insert(0, vertex)
for vertex in visited_list:
_topology_sort_util(vertex)
return output_stack
def make_slug(text, delimiter: str = "_"):
"""
makes a slug from text
"""
import re
text = re.sub("[,.;@#?!&$']+", "", text.lower())
return re.sub("[\ ]+", delimiter, text)
def parse_time_spans(times):
"""
parse time spans into tuples of seconds from midnight
can also be used as an apply function for a pandas series
Parameters
-----------
times: tuple(string) or tuple(int) or list(string) or list(int)
returns
--------
tuple(integer)
time span as seconds from midnight
"""
try:
start_time, end_time = times
except:
msg = "ERROR: times should be a tuple or list of two, got: {}".format(times)
WranglerLogger.error(msg)
raise ValueError(msg)
# If times are strings, convert to int in seconds, else return as ints
if isinstance(start_time, str) and isinstance(end_time, str):
start_time = start_time.strip()
end_time = end_time.strip()
# If time is given without seconds, add 00
if len(start_time) <= 5:
start_time += ":00"
if len(end_time) <= 5:
end_time += ":00"
# Convert times to seconds from midnight (Partride's time storage)
h0, m0, s0 = start_time.split(":")
start_time_sec = int(h0) * 3600 + int(m0) * 60 + int(s0)
h1, m1, s1 = end_time.split(":")
end_time_sec = int(h1) * 3600 + int(m1) * 60 + int(s1)
return (start_time_sec, end_time_sec)
elif isinstance(start_time, int) and isinstance(end_time, int):
return times
else:
WranglerLogger.error("ERROR: times should be ints or strings")
raise ValueError()
return (start_time_sec, end_time_sec)
def get_bearing(lat1, lon1, lat2, lon2):
"""
calculate the bearing (forward azimuth) b/w the two points
returns: bearing in radians
"""
# bearing in degrees
brng = Geodesic.WGS84.Inverse(lat1, lon1, lat2, lon2)["azi1"]
# convert bearing to radians
brng = math.radians(brng)
return brng
def offset_point_with_distance_and_bearing(lat, lon, distance, bearing):
"""
Get the new lat long (in degrees) given current point (lat/lon), distance and bearing
returns: new lat/long
"""
# Earth's radius in meters
radius = 6378137
# convert the lat long from degree to radians
lat_radians = math.radians(lat)
lon_radians = math.radians(lon)
# calculate the new lat long in radians
out_lat_radians = math.asin(
math.sin(lat_radians) * math.cos(distance / radius)
+ math.cos(lat_radians) * math.sin(distance / radius) * math.cos(bearing)
)
out_lon_radians = lon_radians + math.atan2(
math.sin(bearing) * math.sin(distance / radius) * math.cos(lat_radians),
math.cos(distance / radius) - math.sin(lat_radians) * math.sin(lat_radians),
)
# convert the new lat long back to degree
out_lat = math.degrees(out_lat_radians)
out_lon = math.degrees(out_lon_radians)
return (out_lat, out_lon)
def offset_location_reference(location_reference, offset_meters=10):
"""
Creates a new location reference
using the node a and node b of given location reference,
offseting it by 90 degree to the bearing of given location reference
and distance equals to offset_meters
returns: new location_reference with offset
"""
lon_1 = location_reference[0]["point"][0]
lat_1 = location_reference[0]["point"][1]
lon_2 = location_reference[1]["point"][0]
lat_2 = location_reference[1]["point"][1]
bearing = get_bearing(lat_1, lon_1, lat_2, lon_2)
# adding 90 degrees (1.57 radians) to the current bearing
bearing = bearing + 1.57
new_lat_1, new_lon_1 = offset_point_with_distance_and_bearing(
lat_1, lon_1, offset_meters, bearing
)
new_lat_2, new_lon_2 = offset_point_with_distance_and_bearing(
lat_2, lon_2, offset_meters, bearing
)
out_location_reference = [
{"sequence": 1, "point": [new_lon_1, new_lat_1]},
{"sequence": 2, "point": [new_lon_2, new_lat_2]},
]
return out_location_reference
def haversine_distance(origin: list, destination: list):
"""
Calculates haversine distance between two points
Args:
origin: lat/lon for point A
destination: lat/lon for point B
Returns: string
"""
lon1, lat1 = origin
lon2, lat2 = destination
radius = 6378137 # meter
dlat = math.radians(lat2 - lat1)
dlon = math.radians(lon2 - lon1)
a = math.sin(dlat / 2) * math.sin(dlat / 2) + math.cos(
math.radians(lat1)
) * math.cos(math.radians(lat2)) * math.sin(dlon / 2) * math.sin(dlon / 2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
d = radius * c # meters
d = d * 0.000621371 # miles
return d
def create_unique_shape_id(line_string: LineString):
"""
Creates a unique hash id using the coordinates of the geomtery
Args:
line_string: Line Geometry as a LineString
Returns: string
"""
x1, y1 = list(line_string.coords)[0] # first co-ordinate (A node)
x2, y2 = list(line_string.coords)[-1] # last co-ordinate (B node)
message = "Geometry {} {} {} {}".format(x1, y1, x2, y2)
unhashed = message.encode("utf-8")
hash = hashlib.md5(unhashed).hexdigest()
return hash
def create_location_reference_from_nodes(node_a, node_b):
"""
Creates a location reference using the node a and node b coordinates
Args:
node_a: Node A as Series
node_b: Node B as Series
"""
out_location_reference = [
{"sequence": 1, "point": [node_a["X"], node_a["Y"]]},
{"sequence": 2, "point": [node_b["X"], node_b["Y"]]},
]
return out_location_reference
def create_line_string(location_reference: list):
"""
Creates a geometry as a LineString using location reference
"""
return LineString([location_reference[0]["point"], location_reference[1]["point"]])
| python |
import logging
from django.db import models
from jsonfield import JSONField
from django.conf import settings
from model_utils.models import TimeStampedModel
from .constants import LOG_LEVELS, LOG_TYPES
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.utils.timezone import now
logger = logging.getLogger(__name__)
class Log(TimeStampedModel):
case = models.ForeignKey("legalaid.Case")
timer = models.ForeignKey("timer.Timer", null=True, blank=True)
code = models.CharField(db_index=True, max_length=50)
type = models.CharField(db_index=True, choices=LOG_TYPES.CHOICES, max_length=20)
level = models.PositiveSmallIntegerField(db_index=True, choices=LOG_LEVELS.CHOICES)
created_by = models.ForeignKey(settings.AUTH_USER_MODEL)
notes = models.TextField(null=True, blank=True)
# patch is a json field with the following structure:
# {
# "serializer": "<...serializerClass...>"
# "forwards": <...jsonpatch...>,
# "backwards": <...jsonpatch...>
# }
# where <...jsonpatch...> is a RFC6903 json patch obj
# and <...serializerClass...> is the serializer used to
# to create this pair of patches.
patch = JSONField(null=True, blank=True)
context = JSONField(null=True, blank=True, help_text="Field to store extra event data for reporting")
content_type = models.ForeignKey(ContentType, null=True, blank=True)
object_id = models.PositiveIntegerField(null=True, blank=True)
content_object = GenericForeignKey()
class Meta(object):
ordering = ("-created",)
def __unicode__(self):
return u"%s - %s:%s" % (self.case, self.type, self.code)
def is_consecutive_outcome_today(self):
"""LGA-125 Debounce consecutive outcome codes since start of today"""
case_outcome_codes = Log.objects.filter(case=self.case, level__gte=LOG_LEVELS.HIGH, type=LOG_TYPES.OUTCOME)
start_of_today = now().replace(hour=0, minute=0, second=0, microsecond=0)
try:
latest_outcome_code_today = case_outcome_codes.filter(created__gte=start_of_today).latest("created")
except Log.DoesNotExist:
logger.debug("LGA-125 No outcome codes exist for case today")
else:
codes_match = latest_outcome_code_today.code == self.code
notes_match = latest_outcome_code_today.notes == self.notes
return codes_match and notes_match
return False
def save(self, *args, **kwargs):
if kwargs.pop("save_model_only", False):
return super(Log, self).save(*args, **kwargs)
if self.is_consecutive_outcome_today():
logger.warning("LGA-125 Preventing save of consecutive duplicate outcome code on same day")
return
super(Log, self).save(*args, **kwargs)
if self.type == LOG_TYPES.OUTCOME:
logger.info(
"LGA-293 Saved outcome code {} (Log id: {}, Case ref:{})".format(
self.case.outcome_code, self.id, self.case.reference
)
)
if self.type == LOG_TYPES.OUTCOME and self.level >= LOG_LEVELS.HIGH:
logger.info("LGA-275 Denormalizing outcome event fields to Case (ref:{})".format(self.case.reference))
self.case.outcome_code = self.code
self.case.level = self.level
self.case.outcome_code_id = self.pk
self.case.save(update_fields=["level", "outcome_code_id", "outcome_code", "modified"])
self.case.log_denormalized_outcome_fields()
if self.code == "CASE_VIEWED" and hasattr(self.created_by, "staff"):
self.case.view_by_provider(self.created_by.staff.provider)
class ComplaintLog(Log):
class Meta(Log.Meta):
proxy = True
def __unicode__(self):
return u"%s: %s - %s:%s" % (self.complaint, self.case, self.type, self.code)
@property
def complaint(self):
return self.content_object
| python |
# example = lambda: 'example'
| python |
import os.path
from crds.core import reftypes
HERE = os.path.abspath(os.path.dirname(__file__) or ".")
TYPES = reftypes.from_package_file("roman", __file__)
OBSERVATORY = TYPES.observatory
INSTRUMENTS = TYPES.instruments
EXTENSIONS = TYPES.extensions
TEXT_DESCR = TYPES.text_descr
FILEKINDS = TYPES.filekinds
INSTRUMENT_FIXERS = {
}
TYPE_FIXERS = {
}
# List of keywords whose values are logged in certifier output when "dump provenance" is enabled:
PROVENANCE_KEYWORDS = ("ROMAN.META.DESCRIPTION", "ROMAN.META.PEDIGREE", "ROMAN.META.USEAFTER", "HISTORY", "ROMAN.META.AUTHOR")
USEAFTER_KEYWORDS = ("ROMAN.META.OBSERVATION.DATE", "ROMAN.META.OBSERVATION.TIME") # Dataset keywords matching in UseAfter selectors
DEFAULT_SELECTORS = ("Match", "UseAfter") # Normal selector hierarchy in rmap
| python |
import re
from discord import AuditLogAction, Colour, Embed, Member
from discord.ext.commands import Bot, Cog, Context, command, has_any_role
from cdbot.constants import (
ADMIN_MENTOR_ROLE_ID,
ADMIN_ROLES,
CD_BOT_ROLE_ID,
LOGGING_CHANNEL_ID,
NICKNAME_PATTERNS,
PLACEHOLDER_NICKNAME,
ROOT_ROLE_ID,
STATIC_NICKNAME_ROLE_ID,
SUDO_ROLE_ID
)
def check_bad_name(nick):
for i in NICKNAME_PATTERNS:
if re.match(i, nick, re.IGNORECASE):
return True
return False
class Admin(Cog):
"""
Admin functionality
"""
def __init__(self, bot: Bot):
self.bot = bot
@Cog.listener() # triggered on new/removed nickname
async def on_member_update(self, member_before: Member, member_after: Member):
# get corresponding audit log entry to find who initiated member change
corresponding_audit_entry = None
# get all audit log entries for Member Updated
async for entry in self.bot.guilds[0].audit_logs(
action=AuditLogAction.member_update
):
# if this entry was to the user in question, and was this specific nickname change
if entry.target == member_before and entry.after.nick == member_after.nick:
corresponding_audit_entry = entry
break
if (
corresponding_audit_entry is not None
): # successfully found audit log entry before
# user changed their own nickname; ignore if admin/bot changed it
admin_role_check = (
corresponding_audit_entry.user.top_role.name in ADMIN_ROLES
)
bot_role_check = (
corresponding_audit_entry.user.top_role.id == CD_BOT_ROLE_ID
)
mentor_role_check = (
corresponding_audit_entry.user.top_role.id == ADMIN_MENTOR_ROLE_ID
)
if not (admin_role_check or bot_role_check or mentor_role_check):
for i in member_after.roles:
if i.id == STATIC_NICKNAME_ROLE_ID: # user has Static Name role
await member_after.edit(
nick=member_before.display_name
) # revert nickname
return
else: # check for bad words
new_nickname = member_after.display_name
if check_bad_name(new_nickname): # bad display name
if not check_bad_name(
member_after.name
): # username is okay
await member_after.edit(nick=None) # reset nickname
else:
# assign placeholder nickname
await member_after.edit(nick=PLACEHOLDER_NICKNAME)
@Cog.listener() # triggered on username change
async def on_user_update(self, member_before: Member, member_after: Member):
new_username = member_after.name
if check_bad_name(new_username): # bad username
# assign placeholder nickname
await member_after.edit(nick=PLACEHOLDER_NICKNAME)
@Cog.listener()
async def on_member_join(self, member: Member):
username = member.name
if check_bad_name(username): # bad username
# assign placeholder nickname
await member.edit(nick=PLACEHOLDER_NICKNAME)
@command()
@has_any_role(ROOT_ROLE_ID, SUDO_ROLE_ID)
async def raid(
self,
ctx: Context,
operand: str = ""
):
"""
Allows an admin user to lock down the server in case of a raid.
This command toggles invite link generation for @everyone and
revokes all existing invite links.
"""
everyone = ctx.channel.guild.default_role
perms = everyone.permissions
enabled = not perms.create_instant_invite
logs_channel = self.bot.get_channel(LOGGING_CHANNEL_ID)
operand = operand.lower()
boolonoff = ("OFF", "ON")
action = True
embed = None
if not operand: # status query
await ctx.send(f"Raid protection currently {boolonoff[enabled]}. Use `:raid [on/off]` to toggle.")
action = False
elif operand in ("on", "yes") and not enabled: # need to turn it on
enabled = True
perms.update(create_instant_invite=False)
embed = Embed(
color=Colour.blue(),
title="Raid Protection ON.",
description=("Raid protection now ON - All invite links were"
" deleted and members may not create new ones")
)
for invite in await ctx.channel.guild.invites(): # delete links
await invite.delete()
elif operand in ("off", "no") and enabled:
enabled = False
perms.update(create_instant_invite=True)
embed = Embed(
color=Colour.blue(),
title="Raid Protection OFF.",
description=("Raid protection now OFF - Members can now create"
" new invite links")
)
else: # no changes
await ctx.send(f"Raid protection {boolonoff[enabled]}, nothing was changed.")
action = False
if action: # if we toggled it
msg = f"{ctx.author.name} toggled raid protection {boolonoff[enabled]}."
await everyone.edit(reason=msg, permissions=perms) # make the perm change
await ctx.send(msg) # direct response to invocation
embed.set_author(name=ctx.author.name, icon_url=ctx.author.avatar_url)
await logs_channel.send(embed=embed) # log the event
def setup(bot):
bot.add_cog(Admin(bot))
| python |
class Position:
def __init__(self, index, lineno, column):
# This is for tracking the position of the
# Lexer in the whole source
self.index = index
# This is for tracking new lines
self.lineno = lineno
# This is for tracking the position of the
# Lexer in the current line
self.column = column
def advance(self, current_char = None):
# Increment the position of the lexer by one in the whole file
self.index += 1
# Increment the position of the lexer by one in the current line
self.column += 1
# Increment the lineno and reset the column back
# To zero if its encounters newline
if current_char == "\n":
self.lineno += 1
self.column = 1
return self
def copy(self):
return Position(self.index, self.lineno, self.column) | python |
from __future__ import print_function
import argparse
import os
import matplotlib.pyplot as plt
import torch
import torch.backends.cudnn as cudnn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import transforms
from datasets.breeds import BREEDSFactory
from models.util import create_model, load_model
def parse_option():
parser = argparse.ArgumentParser('argument for training')
# load pretrained model
parser.add_argument('--model', type=str, default='resnet12')
parser.add_argument('--model_path', type=str, default=None, help='absolute path to .pth model')
# dataset
parser.add_argument('--dataset', type=str, default='miniImageNet'
)
# parser.add_argument('--transform', type=str, default='A', choices=transforms_list)
# specify data_root
parser.add_argument('--data_root', type=str, default='', help='path to data root')
# meta setting
parser.add_argument('--n_test_runs', type=int, default=1000, metavar='N',
help='Number of test runs')
parser.add_argument('--n_ways', type=int, default=5, metavar='N',
help='Number of classes for doing each classification run')
parser.add_argument('--n_shots', type=int, default=1, metavar='N',
help='Number of shots in test')
parser.add_argument('--n_queries', type=int, default=15, metavar='N',
help='Number of query in test')
parser.add_argument('--n_aug_support_samples', default=5, type=int,
help='The number of augmented samples for each meta test sample')
parser.add_argument('--num_workers', type=int, default=3, metavar='N',
help='Number of workers for dataloader')
parser.add_argument('--test_batch_size', type=int, default=1, metavar='test_batch_size',
help='Size of test batch)')
parser.add_argument('-b', dest='batch_size', type=int)
parser.add_argument('--mode', type=str, required=True, choices=['coarse', 'fine'])
parser.add_argument('--only-base', action='store_true')
parser.add_argument('--partition', type=str, required=True, choices=['train', 'test', 'validation'])
parser.add_argument('--gpu', default=0, type=int,
help='GPU id to use.')
# ===========IRRELEVANT===============
parser.add_argument('--dim', type=int, default=128)
parser.add_argument('--head', default=None)
parser.add_argument('--fg', action='store_true')
parser.add_argument('--simclr', action='store_true')
parser.add_argument('--cascade', action='store_true')
opt = parser.parse_args()
if 'trainval' in opt.model_path:
opt.use_trainval = True
else:
opt.use_trainval = False
opt.data_aug = True
return opt
def main():
args = parse_option()
train_dataset, n_cls = get_datasets(args)
train_loader = DataLoader(train_dataset,
batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers)
model = create_model(args.model, n_cls, args.only_base, args.head, args.dim)
load_model(model, args.model_path, not args.only_base)
if torch.cuda.is_available():
torch.cuda.set_device(args.gpu)
model = model.cuda()
cudnn.benchmark = True
for i, (images, labels) in enumerate(train_loader):
if args.gpu is not None:
images = images.cuda(args.gpu)
def attention_forward(encoder, imgs):
# hard-coded forward because we need the feature-map and not the finalized feature
x = encoder.conv1(imgs)
x = encoder.bn1(x)
x = encoder.relu(x)
x = encoder.maxpool(x)
x = encoder.layer1(x)
x = encoder.layer2(x)
x = encoder.layer3(x)
feats = encoder.layer4(x)
feats_as_batch = feats.permute((0, 2, 3, 1)).contiguous().view((-1, feats.shape[1]))
# reminder: "fc" layer outputs: (feature, class logits)
feats_as_batch = encoder.fc(feats_as_batch)[0]
feats_as_batch = feats_as_batch.view(
(feats.shape[0], feats.shape[2], feats.shape[3], feats_as_batch.shape[1]))
feats_as_batch = feats_as_batch.permute((0, 3, 1, 2))
return feats_as_batch
f_q = attention_forward(model, images)
localization(images, f_q, args.batch_size, batch_id=i, img_size=448)
if i == 10:
break
def get_datasets(args):
augs = [
transforms.RandomResizedCrop(448, scale=(0.2, 1.)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4717, 0.4499, 0.3837], std=[0.2600, 0.2516, 0.2575])
]
if args.dataset in ['living17', 'nonliving26', 'entity30', 'entity13']:
breeds_factory = BREEDSFactory(info_dir=os.path.join(args.data_root, "BREEDS"),
data_dir=os.path.join(args.data_root, "Data", "CLS-LOC"))
train_dataset = breeds_factory.get_breeds(ds_name=args.dataset, partition=args.partition, mode=args.mode,
transforms=transforms.Compose(augs))
n_cls = int(args.dataset[-2:])
else:
raise NotImplementedError(args.dataset)
return train_dataset, n_cls
def localization(im_q, f_q, batch_size, batch_id, img_size):
os.makedirs('imgs', exist_ok=True)
for idd in range(batch_size):
aa = torch.norm(f_q, dim=1)
imgg = im_q[idd] * torch.Tensor([[[0.229, 0.224, 0.225]]]).view(
(1, 3, 1, 1)).cuda() + torch.Tensor(
[[[0.485, 0.456, 0.406]]]).view((1, 3, 1, 1)).cuda()
heatmap = F.interpolate((aa[idd] / aa[0].max()).detach().unsqueeze(0).unsqueeze(0).repeat((1, 3, 1, 1)),
[img_size, img_size])
thresh = 0
heatmap[heatmap < thresh] = 0
plt.imsave(f'imgs/bImg_{idd}_batch_{batch_id}.png',
torch.cat((imgg, heatmap * imgg), dim=3).squeeze(0).cpu().permute(
(1, 2, 0)).clamp(0, 1).numpy().astype(float))
if __name__ == '__main__':
main()
| python |
import os
from datetime import datetime
from polyaxon_client.tracking import get_outputs_path
def define_prepare_tb_path():
logdir_tb = os.path.join(".", "tf_logs", "scalars") # ".\\tf_logs\\scalars\\"
outputs_path = get_outputs_path()
if outputs_path is not None: # polyaxon behavior
logdir_tb = outputs_path + "/" + logdir_tb
else: # local behavior
logdir_tb = logdir_tb + datetime.now().strftime("%Y%m%d-%H%M%S")
return logdir_tb
def define_prepare_mdl_path(plx):
logdir_mdl = "mdl_chkpts/"
outputs_path = get_outputs_path()
if outputs_path is not None: # polyaxon behavior
logdir_mdl = outputs_path + "/" + logdir_mdl
if not os.path.exists(logdir_mdl):
try:
os.mkdir(logdir_mdl)
except OSError:
print("Creation of the directory %s failed" % logdir_mdl)
else:
print("Successfully created the directory %s " % logdir_mdl)
file_path_mdl = logdir_mdl + plx.get('mdl_architecture') + '_' + plx.get('eng_kind') + ".hdf5"
# >>> @sp - add untrained model path
file_path_raw_mdl = logdir_mdl + plx.get('mdl_architecture') + '_' + 'untrained' + ".hdf5"
return file_path_mdl, file_path_raw_mdl
# <<< @sp
| python |
# This Python file uses the following encoding: utf-8
# !/usr/local/bin/python3.4
####################################################
# <Copyright (C) 2012, 2013, 2014, 2015 Yeray Alvarez Romero>
# This file is part of MULLPY.
####################################################
import numpy as np
from mullpy.patterns import Pattern
from mullpy.auxiliar import AutoVivification, path_exists
class PreProcess():
"""
Scheduler of the PreProcess execution.
"""
def schedule(self, context):
#TODO: connect to other libraries with a strong preprocessing library
for preprocess_function in context["preprocess"].keys():
if context["preprocess"][preprocess_function]["activate"]:
getattr(self, preprocess_function)(context)
#########################################################################
@staticmethod
def transform_multilabel_to_n_classes(context):
for classifier_name in context["classifiers"].keys():
Pattern(context).transform_multilabel_to_N_classes(context, classifier_name)
#########################################################################
@staticmethod
def bagging(context, filters, lengths, total_length):
learning_length = lengths["learning"]
for i in range(context["preprocess"]["random_distribution"]["number_base_classifiers"]):
temp = []
while len(set(temp)) != learning_length:
temp.append(np.random.randint(0, total_length))
filters["learning"].append(temp)
filters["validation"].append([x for x in range(total_length) if x not in set(filters["learning"][i])])
#########################################################################
@staticmethod
def pasting_rvotes(context, filters, lengths, total_length):
learning_length = lengths["learning"]
for i in range(context["preprocess"]["random_distribution"]["number_base_classifiers"]):
temp = []
while len(temp) != learning_length:
value = np.random.randint(0, total_length)
if value not in temp:
temp.append(value)
filters["learning"].append(temp)
filters["validation"].append([x for x in range(total_length) if x not in temp])
#########################################################################
@staticmethod
def all_features_combination(context, filters, characteristics_length):
import itertools
min_characteristics = context["preprocess"]["random_distribution"]["all_features_combination"][
"min_characteristics"]
max_characteristics = context["preprocess"]["random_distribution"]["all_features_combination"][
"max_characteristics"]
for characteristics_amount in range(min_characteristics, max_characteristics + 1):
temporal = list(itertools.combinations(range(characteristics_length), characteristics_amount))
for t in temporal:
filters["learning"].append(list(t))
# filters["test"] = filters["learning"]
filters["validation"] = filters["learning"]
#########################################################################
@staticmethod
def random_subspaces(context, filters, characteristics_length):
for i in range(context["preprocess"]["random_distribution"]["number_base_classifiers"]):
temp = []
characteristics_amount = np.random.randint(
context["preprocess"]["random_distribution"]["random_subspaces"]["min_characteristics"],
characteristics_length)
while len(temp) != characteristics_amount:
temp.append(np.random.randint(0, characteristics_length))
filters["learning"].append(temp)
# filters["test"] = filters["learning"]
filters["validation"] = filters["learning"]
#########################################################################
@staticmethod
def classes_indexes(context, data_set):
classes_indexes = AutoVivification()
classes_texts = context["classifiers"][context["classifier_list"][0]]["classes_names"]
len_inputs = len(data_set[0]) - len(classes_texts)
for class_text in classes_texts:
column = [data_set[i][len_inputs + classes_texts.index(class_text)] for i in range(len(data_set))]
classes_indexes[class_text] = column
return classes_indexes
#########################################################################
@staticmethod
def classes_counter_indexes(context, data_set):
classes_counter = AutoVivification()
classes_indexes = AutoVivification()
classes_texts = context["classifiers"][context["classifier_list"][0]]["classes_names"]
len_inputs = len(data_set[0]) - len(classes_texts)
for class_text in classes_texts:
column = [data_set[i][len_inputs + classes_texts.index(class_text)] for i in range(len(data_set))]
classes_counter[class_text] = np.sum(column)
classes_indexes[class_text] = column
return classes_counter, classes_indexes
#########################################################################
@staticmethod
def forecasting_distribution(context, filters):
data_set = context["patterns"].patterns[context["classifier_list"][0]]["learning"]
validation_size = context["preprocess"]["random_distribution"]["forecasting_distribution"]["validation_size"]
activate = context["preprocess"]["random_distribution"]["forecasting_distribution"]["walking_forward"]
folds = context["preprocess"]["random_distribution"]["number_base_classifiers"]
filters["learning"] = []
filters["validation"] = []
if activate is not 0:
for fold in range(folds):
filters["learning"].append([i for i in range(fold * validation_size,
len(data_set) - validation_size * (folds - fold))])
filters["validation"].append([i for i in range(len(data_set) - validation_size * (folds - fold),
len(data_set) - validation_size * (folds - fold) +
validation_size)])
else:
filters["learning"].append([i for i in range(0, len(data_set) - validation_size)])
filters["validation"].append([i for i in range(len(data_set) - validation_size, len(data_set))])
return filters
#########################################################################
def binarize_data(self, context):
from sklearn.preprocessing import LabelBinarizer
pattern_kind = "learning"
lb = LabelBinarizer()
inputs_len = len(context["patterns"].patterns[context["classifier_list"][0]][pattern_kind][0]) - 1
inputs = [x[:, range(inputs_len)] for x in
context["patterns"].patterns[context["classifier_list"][0]]["learning"]]
outputs = context["patterns"].patterns[context["classifier_list"][0]][pattern_kind][:, -1]
multilabel_outputs = [(x,) for x in outputs]
lb.fit(multilabel_outputs)
new_outputs = lb.transform(multilabel_outputs)
context["patterns"].patterns[context["classifier_list"][0]][pattern_kind] = []
for i, input in enumerate(inputs):
temp = [x for x in inputs[i]]
temp.extend(new_outputs[i])
context["patterns"].patterns[context["classifier_list"][0]]["learning"].append(temp)
dir_name = context["general_path"] + "patterns/" + context["classifiers"][context["classifier_list"][0]]["set"]
file_name = dir_name + "/" + pattern_kind + "_binarized" + ".pat"
context["patterns"].create_new_patterns(context, context["classifier_list"][0], pattern_kind, file_name)
#########################################################################
def k_fold(self, context, filters):
classes_texts = context["classifiers"][context["classifier_list"][0]]["classes_names"]
num_instances = sum([len(context["patterns"].patterns[context["classifier_list"][0]][x])
for x in context["patterns"].patterns[context["classifier_list"][0]]])
data_set = None
for i, filter_name in enumerate(context["patterns"].patterns[context["classifier_list"][0]].keys()):
if i == 0:
data_set = context["patterns"].patterns[context["classifier_list"][0]][filter_name]
else:
data_set = np.concatenate(data_set,
context["patterns"].patterns[context["classifier_list"][0]][filter_name])
total_classes_counter, classes_indexes = self.classes_counter_indexes(context, data_set)
classes_counter = AutoVivification()
min_limit_classes = np.min([total_classes_counter[class_counter] for class_counter in total_classes_counter])
for i in range(context["preprocess"]["random_distribution"]["number_base_classifiers"]):
total_indexes = []
for j, filter_name in enumerate(["learning", "validation"]):
aux_list = []
aux_percent = context["preprocess"]["random_distribution"]["k_fold"]["percents"][filter_name]
if j == len(context["preprocess"]["random_distribution"]["k_fold"]["percents"]) - 1:
filters[filter_name].append([x for x in range(len(data_set)) if x not in total_indexes])
break
else:
if context["preprocess"]["random_distribution"]["k_fold"]["balanced"]:
total_instances = 0
for class_text in context["classifiers"][context["classifier_list"][0]]["classes_names"]:
classes_counter[filter_name][class_text] = np.ceil(aux_percent * min_limit_classes)
total_instances += classes_counter[filter_name][class_text]
else:
total_instances = np.ceil(aux_percent * num_instances)
len_inputs = len(data_set[0]) - len(classes_texts)
while len(aux_list) != total_instances:
value = np.random.randint(0, len(data_set))
if value not in total_indexes:
if context["preprocess"]["random_distribution"]["k_fold"]["balanced"]:
if classes_counter[filter_name][
classes_texts[list(data_set[value][len_inputs:]).index(1)]] > 0:
total_indexes.append(value)
aux_list.append(value)
classes_counter[filter_name][
classes_texts[list(data_set[value][len_inputs:]).index(1)]] -= 1
else:
total_indexes.append(value)
aux_list.append(value)
filters[filter_name].append(aux_list)
#########################################################################
@staticmethod
def check_features_amount(context):
classes_texts = context["classifiers"][context["classifier_list"][0]]["classes_names"]
data_set = context["patterns"].patterns[context["classifier_list"][0]]["learning"]
features_amount = len(data_set[0]) - len(classes_texts)
for classifier_name in context["classifier_list"]:
if features_amount != (len(context["patterns"].patterns[classifier_name]["learning"][0]) -
len(classes_texts)):
raise ValueError("Different lengths in learning patterns of classifier %s and %s" % (
context["classifier_list"][0], classifier_name))
return features_amount
#########################################################################
def random_distribution(self, context):
"""
Bagging methods come in many flavours but mostly differ from each other by the way they draw random subsets
of the training set:
-When random subsets of the dataset are drawn as random subsets of the samples, then this algorithm is known
as Pasting Rvotes.
-When samples are drawn with replacement, then the method is known as Bagging.
-When random subsets of the dataset are drawn as random subsets of the features, then the method is known as
Random Subspaces.
-When base estimators are built on subsets of both samples and features, then the method is known as Random
Patches.
group_successive variable groups each X instances. Each of these successive instances has to be together in
the sampling process
"""
total_length = 0
lengths = AutoVivification()
for pattern_kind in context["patterns"].patterns[context["classifier_list"][0]]:
lengths[pattern_kind] = len(context["patterns"].patterns[context["classifier_list"][0]][pattern_kind])
total_length += lengths[pattern_kind]
#Check if the length of patterns have the same size
for classifier_name in context["classifier_list"]:
for pattern_kind in context["patterns"].patterns[classifier_name]:
if len(context["patterns"].patterns[classifier_name][pattern_kind]) != lengths[pattern_kind]:
raise ValueError(
'The length of the %s pattern of classifier %s has different size from others' % pattern_kind,
classifier_name)
if context["preprocess"]["random_distribution"]["group_successive"]:
total_length = int(total_length / context["preprocess"]["random_distribution"]["group_successive"])
for pattern_kind in lengths:
lengths[pattern_kind] = int(
lengths[pattern_kind] / context["preprocess"]["random_distribution"]["group_successive"])
dir_name = context["general_path"] + "patterns/" + context["classifiers"][context["classifier_list"][0]]["set"]
filters = AutoVivification()
###Specific kind of sampling###
#############
######BAGGING
#############
if "bagging" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["bagging"]["activate"]:
for pattern_kind in context["patterns_texts"]:
filters[pattern_kind] = []
self.bagging(context, filters, lengths, total_length)
dir_name += "_bagging/"
#############
######PASTING
#############
elif "pasting_Rvotes" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["pasting_Rvotes"]["activate"]:
for pattern_kind in context["patterns_texts"]:
filters[pattern_kind] = []
self.pasting_rvotes(context, filters, lengths, total_length)
dir_name += "_pasting_Rvotes/"
#################
#RANDOM SUBSPACES
#################
elif "random_subspaces" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["random_subspaces"]["activate"]:
features_amount = self.check_features_amount(context)
for pattern_kind in context["patterns_texts"]:
filters[pattern_kind] = []
self.random_subspaces(context, filters, features_amount)
dir_name += "_random_subspaces/"
#############
#COMBINATIONS
#############
elif "all_features_combination" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["all_features_combination"]["activate"]:
features_amount = self.check_features_amount(context)
for pattern_kind in context["patterns_texts"]:
filters[pattern_kind] = []
self.all_features_combination(context, filters, features_amount)
dir_name += "_features_combination/"
context["preprocess"]["random_distribution"]["number_base_classifiers"] = len(filters["learning"])
###############
#RANDOM PATCHES
###############
elif "random_patches" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["random_patches"]["activate"]:
dir_name += "_random_patches/"
###############
#K-FOLD
###############
elif "k_fold" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["k_fold"]["activate"]:
for pattern_kind in context["preprocess"]["random_distribution"]["k_fold"]["percents"]:
filters[pattern_kind] = []
self.k_fold(context, filters)
dir_name += "_k_fold/"
###############
#Forecasting distribution
###############
elif "forecasting_distribution" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["forecasting_distribution"]["activate"]:
self.forecasting_distribution(context, filters)
dir_name += "_walking_forward/"
###Common functions###
elif "bagging" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["bagging"]["activate"] \
or "pasting_Rvotes" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["pasting_Rvotes"]["activate"]:
if context["preprocess"]["random_distribution"]["group_successive"]:
for kind_of in filters:
for filter in filters[kind_of]:
for i in range(len(filter)):
filter[i] = (
filter[i] * context["preprocess"]["random_distribution"]["group_successive"])
for j in range(1, context["preprocess"]["random_distribution"]["group_successive"]):
filter.append(filter[i] + j)
path_exists(dir_name)
self._generate_new_patterns_random_distribution(context, filters, dir_name)
#########################################################################
@staticmethod
def _generate_new_patterns_random_distribution(context, filters, dir_name):
for classifier_name in context["classifiers"].keys():
all_patterns = [context["patterns"].patterns[classifier_name][pattern_kind][i]
for pattern_kind in context["patterns"].patterns[classifier_name].keys()
for i in range(len(context["patterns"].patterns[classifier_name][pattern_kind]))]
for pattern_kind in filters:
for number in range(context["preprocess"]["random_distribution"]["number_base_classifiers"]):
file_name = dir_name + "/" + pattern_kind + "_" + str(number) + ".pat"
if "random_subspaces" in context["preprocess"]["random_distribution"] and \
context["preprocess"]["random_distribution"]["random_subspaces"]["activate"] or \
"all_features_combination" in context["preprocess"][
"random_distribution"] and \
context["preprocess"]["random_distribution"]["all_features_combination"][
"activate"]:
temporal_pattern = context["patterns"].patterns[classifier_name][pattern_kind]
new_pattern = context["patterns"]. \
filter_characteristics(classifier_name, pattern_kind, filters[pattern_kind][number])
context["patterns"].modify_patterns_temporally(classifier_name, pattern_kind, new_pattern)
context["patterns"].create_new_patterns(context, classifier_name, pattern_kind, file_name)
context["patterns"].modify_patterns_temporally(classifier_name, pattern_kind,
temporal_pattern)
else:
new_pattern = np.asarray([all_patterns[i] for i in filters[pattern_kind][number]])
context["patterns"].modify_patterns_temporally(classifier_name, pattern_kind, new_pattern)
context["patterns"].create_new_patterns(context, classifier_name, pattern_kind, file_name)
#########################################################################
@staticmethod
def create_data_transformer(classifier_name, context, list_divided):
from mullpy.auxiliar import check_equal_classifier_patterns
for pattern_kind in context["patterns_texts"]:
for classifier_name_2 in list_divided:
if check_equal_classifier_patterns(context, classifier_name, classifier_name_2, pattern_kind):
context["classifiers"][classifier_name]["transformer"] = \
context["classifiers"][classifier_name_2]["transformer"]
return
from sklearn import preprocessing
if "learning" not in context["patterns_texts"]:
raise ValueError("Learning set is not defined in patterns_texts")
learning_set = context["patterns"].patterns[classifier_name]["learning"]
classes_texts = context["classifiers"][classifier_name]["classes_names"]
len_inputs = len(learning_set[0]) - len(classes_texts)
# classes_texts = context["classifiers"][classifier_name]["classes_names"]
# if "deployment" in context["execution_kind"]:
# len_inputs = len(learning_set[0])
# else:
# len_inputs = len(learning_set[0]) - len(classes_texts)
#
# #Check regression or classification type, to get all the features with class included or not
# if context["ml_paradigm"] == "regression":
# inputs_learning = learning_set
# elif context["ml_paradigm"] == "classification":
# inputs_learning = learning_set[:, range(len_inputs)]
# else:
# raise Exception("bad definition of variable ml_paradigm")
if "args" in context["classifiers"][classifier_name]["data_transformation"]:
args = context["classifiers"][classifier_name]["data_transformation"]["args"]
else:
args = {}
context["classifiers"][classifier_name]["transformer"] = \
getattr(preprocessing, context["classifiers"][classifier_name]["data_transformation"]["kind"])(
**args).fit(learning_set[:, range(len_inputs)])
#########################################################################
@staticmethod
def apply_data_transformation(classifier_name, context, pattern_kind):
"""
Performs the data transformation of a classifier and copy it from another classifier if exist and corresponds.
:param classifier_name:
:param context:
:param list_divided:
:return:
"""
if "deployment" in context["execution_kind"]:
if context["ml_paradigm"] == "classification":
len_inputs = len(context["patterns"].patterns[classifier_name][pattern_kind][0])
else:
if context["ml_paradigm"] == "classification":
len_classes = len(context["classifiers"][classifier_name]["classes_names"])
len_inputs = len(context["patterns"].patterns[classifier_name]["learning"][0]) - len_classes
for i, instance in enumerate(context["patterns"].patterns[classifier_name][pattern_kind]):
if context["ml_paradigm"] == "regression":
context["patterns"].patterns[classifier_name][pattern_kind] = \
context["classifiers"][classifier_name]["transformer"].transform(instance)
elif context["ml_paradigm"] == "classification":
instance[:len_inputs] = \
context["classifiers"][classifier_name]["transformer"].transform(instance[:len_inputs])
else:
raise NameError("ml_paradigm not valid")
#########################################################################
def create_data_transformation(self, classifier_name, list_divided, out_q, context):
self.create_data_transformer(classifier_name[0], context, list_divided)
if out_q is not None:
out_q.put([context["patterns"].patterns, context["classifiers"]])
out_q.close()
#########################################################################
@staticmethod
def points2series(context):
import pandas as pd
from mullpy.auxiliar import csv2pat
import sys
import os
serie_points_amount = context["preprocess"]["points2series"]["serie_size"]
input_file = context["preprocess"]["points2series"]["input_file"]
output_file = context["preprocess"]["points2series"]["output_file"]
class_variable = context["preprocess"]["points2series"]["class_variable"]
series_limit = context["preprocess"]["points2series"]["series_limit"]
# TODO: Add support for multiple class variables. Now classes_len = 1
classes_len = 1
defined_features_list = context["preprocess"]["points2series"]["columns"]
if defined_features_list == "all":
input_df = pd.read_csv(input_file)
defined_features_list = input_df.columns
else:
defined_features_list.append(class_variable)
input_df = pd.read_csv(input_file, usecols=defined_features_list)
# We have to take only the (series_limit + series_size) last points of input_df
input_df_last = input_df.iloc[len(input_df) - (series_limit + serie_points_amount):].reset_index(drop=True)
# Building output columns list defined_features_list
features_list = []
for i in range(serie_points_amount):
for j in range(len(defined_features_list)):
features_list.append("%s_%d" % (defined_features_list[j].upper(), i))
# Adding last column, that is class variable.
if "deployment" not in context["execution_kind"]:
features_list.append("%s_%s" % (class_variable.upper(), "CLASS"))
output_df = pd.DataFrame(columns=features_list, dtype=np.float32)
if "deployment" not in context["execution_kind"]:
iteration = range(len(input_df_last) - serie_points_amount)
else:
iteration = range(1, len(input_df_last) - serie_points_amount + 1)
for i in iteration:
# Percentage completed
if "deployment" not in context["execution_kind"]:
sys.stdout.write("\r{0}".format("Loaded:%f%%" % (i * 100 / (len(input_df_last) - serie_points_amount))))
sys.stdout.flush()
#Iterate over a numpy row in order to optimize the performance
row = np.zeros((1, len(features_list)), dtype=np.float32)
j, z = 0, 0
for j in range(serie_points_amount):
for column in defined_features_list:
# We have to test if the exchange value was correctly given (between 1 and 2 in those dates)
row[0, z] = input_df_last.iloc[i + j][column]
z += 1
if "deployment" not in context["execution_kind"]:
row[0, z] = PreProcess.check_eurusd_values(input_df_last[class_variable][i + serie_points_amount])
output_df.loc[i] = row
#Check the variable series_limit and break the for if the amount of rows was reached
if series_limit is not None and i + 1 >= series_limit:
break
#Create the dataFrame to output the csv
# output_df = pd.DataFrame(matrix, columns=features_list)
# Building csv and pat files
file_name = output_file + ".csv"
path_exists(os.path.dirname(file_name))
output_df.to_csv(file_name, index=False)
if context["preprocess"]["points2series"]["to_pat"]:
csv2pat(file_name, classes_len)
if not context["preprocess"]["points2series"]["to_csv"]:
os.remove(file_name)
# Displaying info
serie_name = output_file[output_file.rfind("/") + 1:]
serie_path = output_file[:output_file.rfind("/")]
if "deployment" not in context["execution_kind"]:
print("\n%s pattern files built at %s" % (serie_name, serie_path))
#########################################################################
@staticmethod
def check_eurusd_values(value):
# We have to test if the exchange value was correctly given (between 1 and 2 in those dates)
return value
if value > 1000:
return value / 1000.
else:
return value
| python |
import functools
import sys
__all__ = ('NiceDecorator',)
def available_attrs(fn):
"""
Return the list of functools-wrappable attributes on a callable.
This is required as a workaround for http://bugs.python.org/issue3445
under Python 2.
"""
if sys.version > '3.':
return functools.WRAPPER_ASSIGNMENTS
else:
return tuple(a for a in functools.WRAPPER_ASSIGNMENTS if hasattr(fn, a))
def wraps(fn, **kwargs):
"""
Wraps plain functools.wraps to workaround http://bugs.python.org/issue3445 which
means __call__ methods make it explode.
"""
return functools.wraps(fn, assigned=available_attrs(fn), **kwargs)
def with_metaclass(meta, base=object):
"""
Create a base class with a metaclass.
Required to support both the Python 2 and 3 ways of doing metaclasses.
"""
return meta("NewBase", (base,), {})
class NiceDecoratorMeta(type):
def __call__(self, *args, **kwargs):
# yeah, this is confusing...
# `self`: a NiceDecoratorMeta *instance*, ie NiceDecorator or a subclass
# `args`, `kwargs`: arguments that we're going to pass to
# NiceDecorator.__init__ eventually (i.e. decorator arguments)
args = list(args)
def decorate(func):
decorated = super(NiceDecoratorMeta, self).__call__(func, *args, **kwargs)
return wraps(func, updated=())(decorated)
is_decorator_factory = self.is_decorator_factory
if is_decorator_factory is None:
# auto-detect whether this is a decorator factory.
is_decorator_factory = not (len(args) == 1 and callable(args[0]) and not kwargs)
if is_decorator_factory:
# decorator factory, like @dec()
return decorate
else:
# plain decorator, like @dec
func = args.pop(0)
return decorate(func)
class NiceDecorator(with_metaclass(NiceDecoratorMeta, base=object)):
"""
Base class for class-based decorators.
Subclasses should define a `__call__` method which takes the same args
as the function. It may call `self.func` which is the original function.
If the decorator takes arguments, you should also override __init__()
to accept them.
Example:
class debug_call(NiceDecorator):
def __init__(self, func, a_decorator_kwarg=None):
super(debug_call, self).__init__(func)
self.a_decorator_kwarg = a_decorator_kwarg
def __call__(self, *args, **kwargs):
print "decorated with a_decorator_kwarg=%s" % self.a_decorator_kwarg
print "calling func", args, kwargs
self.func(*args, **kwargs)
print "returning"
Notes:
* Works with functions, no worries.
* When used with instance methods, the instance is passed as the
second argument to the decorator's __call__ method.
That's fine if you're just dumbly passing (*args, **kwargs) to the decorated
function, but otherwise you should use something like
django.utils.decorators.method_decorator to prevent this from happening.
* Works with classmethods, but same caveat as instance methods, and also this
decorator must be inside the @classmethod decorator. i.e.:
@classmethod
@mydecorator
def foo(cls):
pass
"""
__metaclass__ = NiceDecoratorMeta
# if this is set to None, the decorator will try to detect
# whether it has been called as @decorator or @decorator().
# Set this to True if your decorator-factory needs to accept a
# single callable argument, since that will muck up the detection.
is_decorator_factory = None
def __init__(self, func):
if isinstance(func, classmethod):
raise TypeError(
"@classmethod must be outside %s decorator" %
self.__class__.__name__
)
self.func = func
def __get__(self, instance, klass):
"""Support instance methods."""
func = functools.partial(self.__call__, instance)
return wraps(self.func)(func)
| python |
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import *
from nnunet.paths import network_training_output_dir
if __name__ == "__main__":
# run collect_all_fold0_results_and_summarize_in_one_csv.py first
summary_files_dir = join(network_training_output_dir, "summary_jsons_fold0_new")
output_file = join(network_training_output_dir, "summary_cascade.csv")
folds = (0, )
folds_str = ""
for f in folds:
folds_str += str(f)
plans = "nnUNetPlansv2.1"
overwrite_plans = {
'nnUNetTrainerCascadeFullRes': ['nnUNetPlans'],
}
trainers = [
'nnUNetTrainerCascadeFullRes',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess2',
'nnUNetTrainerV2CascadeFullRes_EducatedGuess3',
'nnUNetTrainerV2CascadeFullRes_lowerLR',
'nnUNetTrainerV2CascadeFullRes',
'nnUNetTrainerV2CascadeFullRes_noConnComp',
'nnUNetTrainerV2CascadeFullRes_shorter_lowerLR',
'nnUNetTrainerV2CascadeFullRes_shorter',
'nnUNetTrainerV2CascadeFullRes_smallerBinStrel',
#'',
#'',
#'',
#'',
#'',
#'',
]
datasets = \
{
"Task003_Liver": ("3d_cascade_fullres", ),
"Task006_Lung": ("3d_cascade_fullres", ),
"Task007_Pancreas": ("3d_cascade_fullres", ),
"Task008_HepaticVessel": ("3d_cascade_fullres", ),
"Task009_Spleen": ("3d_cascade_fullres", ),
"Task010_Colon": ("3d_cascade_fullres", ),
"Task017_AbdominalOrganSegmentation": ("3d_cascade_fullres", ),
#"Task029_LITS": ("3d_cascade_fullres", ),
"Task048_KiTS_clean": ("3d_cascade_fullres", ),
"Task055_SegTHOR": ("3d_cascade_fullres", ),
"Task056_VerSe": ("3d_cascade_fullres", ),
#"": ("3d_cascade_fullres", ),
}
expected_validation_folder = "validation_raw"
alternative_validation_folder = "validation"
alternative_alternative_validation_folder = "validation_tiledTrue_doMirror_True"
interested_in = "mean"
result_per_dataset = {}
for d in datasets:
result_per_dataset[d] = {}
for c in datasets[d]:
result_per_dataset[d][c] = []
valid_trainers = []
all_trainers = []
with open(output_file, 'w') as f:
f.write("trainer,")
for t in datasets.keys():
s = t[4:7]
for c in datasets[t]:
s1 = s + "_" + c[3]
f.write("%s," % s1)
f.write("\n")
for trainer in trainers:
trainer_plans = [plans]
if trainer in overwrite_plans.keys():
trainer_plans = overwrite_plans[trainer]
result_per_dataset_here = {}
for d in datasets:
result_per_dataset_here[d] = {}
for p in trainer_plans:
name = "%s__%s" % (trainer, p)
all_present = True
all_trainers.append(name)
f.write("%s," % name)
for dataset in datasets.keys():
for configuration in datasets[dataset]:
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, expected_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (dataset, configuration, trainer, p, alternative_validation_folder, folds_str))
if not isfile(summary_file):
summary_file = join(summary_files_dir, "%s__%s__%s__%s__%s__%s.json" % (
dataset, configuration, trainer, p, alternative_alternative_validation_folder, folds_str))
if not isfile(summary_file):
all_present = False
print(name, dataset, configuration, "has missing summary file")
if isfile(summary_file):
result = load_json(summary_file)['results'][interested_in]['mean']['Dice']
result_per_dataset_here[dataset][configuration] = result
f.write("%02.4f," % result)
else:
f.write("NA,")
result_per_dataset_here[dataset][configuration] = 0
f.write("\n")
if True:
valid_trainers.append(name)
for d in datasets:
for c in datasets[d]:
result_per_dataset[d][c].append(result_per_dataset_here[d][c])
invalid_trainers = [i for i in all_trainers if i not in valid_trainers]
num_valid = len(valid_trainers)
num_datasets = len(datasets.keys())
# create an array that is trainer x dataset. If more than one configuration is there then use the best metric across the two
all_res = np.zeros((num_valid, num_datasets))
for j, d in enumerate(datasets.keys()):
ks = list(result_per_dataset[d].keys())
tmp = result_per_dataset[d][ks[0]]
for k in ks[1:]:
for i in range(len(tmp)):
tmp[i] = max(tmp[i], result_per_dataset[d][k][i])
all_res[:, j] = tmp
ranks_arr = np.zeros_like(all_res)
for d in range(ranks_arr.shape[1]):
temp = np.argsort(all_res[:, d])[::-1] # inverse because we want the highest dice to be rank0
ranks = np.empty_like(temp)
ranks[temp] = np.arange(len(temp))
ranks_arr[:, d] = ranks
mn = np.mean(ranks_arr, 1)
for i in np.argsort(mn):
print(mn[i], valid_trainers[i])
print()
print(valid_trainers[np.argmin(mn)])
| python |
"""
Authors: Elena Vasileva, Zoran Ivanovski
E-mail: [email protected], [email protected]
Course: Mashinski vid, FEEIT, Spring 2021
Date: 09.03.2021
Description: function library
model operations: construction, loading, saving
Python version: 3.6
"""
# python imports
from keras.models import Sequential, Model, model_from_json
from keras.layers import Dense, Dropout, Flatten, Conv2D, Conv2DTranspose, MaxPool2D, UpSampling2D, BatchNormalization, Input, ZeroPadding2D, Concatenate
def load_model(model_path, weights_path):
"""
loads a pre-trained model configuration and calculated weights
:param model_path: path of the serialized model configuration file (.json) [string]
:param weights_path: path of the serialized model weights file (.h5) [string]
:return: model - keras model object
"""
# --- load model configuration ---
json_file = open(model_path, 'r')
model_json = json_file.read()
json_file.close()
model = model_from_json(model_json) # load model architecture
model.load_weights(weights_path) # load weights
return model
def construct_model(num_classes):
"""
construct model architecture
:param num_classes: number of output classes of the model [int]
:return: model - Keras model object
"""
model = Sequential()
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax')) # softmax for multi-class classification
return model
def construct_model_cnn(num_classes):
"""
construct model architecture
:param num_classes: number of output classes of the model [int]
:return: model - Keras model object
"""
model = Sequential()
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_normal'))
# model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Conv2D(filters=32, kernel_size=(3, 3), padding='same', activation='relu', kernel_initializer='he_normal'))
model.add(MaxPool2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dense(num_classes, activation='softmax')) # softmax for multi-class classification
return model
| python |
"""
Created on Jan 1, 2019
@author: CyberiaResurrection
"""
import unittest
import re
import sys
sys.path.append('../PyRoute')
from Star import Nobles
class TestNobles(unittest.TestCase):
def testDefaultString(self):
nobles = Nobles()
expected = ''
self.assertEqual(expected, nobles.__str__())
def testStringWithOneViscount(self):
nobles = Nobles()
nobles.nobles['Viscounts'] = 1
expected = 'e'
self.assertEqual(expected, nobles.__str__())
def testCountWithViscount(self):
nobles = Nobles()
nobles.count(['e'])
expected = 1
actual = nobles.nobles['Viscounts']
self.assertEqual(expected, actual)
def testAccumulateSelf(self):
nobles = Nobles()
nobles.nobles['Viscounts'] = 1
nobles.accumulate(nobles)
expected = 2
actual = nobles.nobles['Viscounts']
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| python |
MMO_USER_ALREADY_ENABLED = "MMO features for your account are already enabled."
MMO_USER_ENABLE = "MMO features for your account are now enabled."
MMO_USER_ALREADY_DISABLED = "MMO features for your account are already disabled."
MMO_USER_DISABLE = "MMO features for your account are now disabled."
MMO_CURRENTLY_DISABLED = "MMO features are currently disabled for your account, enable it via `mmo enable` to use " \
"this command."
MMO_CLASS_NOT_FOUND = "No class with that name found."
MMO_CLASS_DONT_MEET_LEVEL = "You do not meet the minimum level requirement for that class."
MMO_CLASS_CHOSEN = "{} is now a {}!"
MMO_CLASS_ON_COOLDOWN = "This ability is on cooldown for {}"
MMO_NAME_SET = "Your characters name is now \"{}\""
MMO_DEFAULT_SPELL_SET = "Your default spell is now {}"
MMO_DEFAULT_SPELL_BAD = "Could not find spell called {}, are you the wrong class?"
| python |
import datetime
import os
import re
from dateutil import tz
import sqlalchemy as sa
from sqlalchemy.engine.reflection import Inspector
from alembic import autogenerate
from alembic import command
from alembic import util
from alembic.environment import EnvironmentContext
from alembic.operations import ops
from alembic.script import ScriptDirectory
from alembic.testing import assert_raises_message
from alembic.testing import assertions
from alembic.testing import eq_
from alembic.testing import is_
from alembic.testing import mock
from alembic.testing import ne_
from alembic.testing.env import _get_staging_directory
from alembic.testing.env import _multi_dir_testing_config
from alembic.testing.env import _multidb_testing_config
from alembic.testing.env import _no_sql_testing_config
from alembic.testing.env import _sqlite_file_db
from alembic.testing.env import _sqlite_testing_config
from alembic.testing.env import _testing_config
from alembic.testing.env import clear_staging_env
from alembic.testing.env import env_file_fixture
from alembic.testing.env import script_file_fixture
from alembic.testing.env import staging_env
from alembic.testing.env import three_rev_fixture
from alembic.testing.env import write_script
from alembic.testing.fixtures import TestBase
from alembic.util import CommandError
env, abc, def_ = None, None, None
class GeneralOrderedTests(TestBase):
def setUp(self):
global env
env = staging_env()
def tearDown(self):
clear_staging_env()
def test_steps(self):
self._test_001_environment()
self._test_002_rev_ids()
self._test_003_api_methods_clean()
self._test_004_rev()
self._test_005_nextrev()
self._test_006_from_clean_env()
self._test_007_long_name()
self._test_008_long_name_configurable()
def _test_001_environment(self):
assert_set = set(["env.py", "script.py.mako", "README"])
eq_(assert_set.intersection(os.listdir(env.dir)), assert_set)
def _test_002_rev_ids(self):
global abc, def_
abc = util.rev_id()
def_ = util.rev_id()
ne_(abc, def_)
def _test_003_api_methods_clean(self):
eq_(env.get_heads(), [])
eq_(env.get_base(), None)
def _test_004_rev(self):
script = env.generate_revision(abc, "this is a message", refresh=True)
eq_(script.doc, "this is a message")
eq_(script.revision, abc)
eq_(script.down_revision, None)
assert os.access(
os.path.join(env.dir, "versions", "%s_this_is_a_message.py" % abc),
os.F_OK,
)
assert callable(script.module.upgrade)
eq_(env.get_heads(), [abc])
eq_(env.get_base(), abc)
def _test_005_nextrev(self):
script = env.generate_revision(
def_, "this is the next rev", refresh=True
)
assert os.access(
os.path.join(
env.dir, "versions", "%s_this_is_the_next_rev.py" % def_
),
os.F_OK,
)
eq_(script.revision, def_)
eq_(script.down_revision, abc)
eq_(env.get_revision(abc).nextrev, set([def_]))
assert script.module.down_revision == abc
assert callable(script.module.upgrade)
assert callable(script.module.downgrade)
eq_(env.get_heads(), [def_])
eq_(env.get_base(), abc)
def _test_006_from_clean_env(self):
# test the environment so far with a
# new ScriptDirectory instance.
env = staging_env(create=False)
abc_rev = env.get_revision(abc)
def_rev = env.get_revision(def_)
eq_(abc_rev.nextrev, set([def_]))
eq_(abc_rev.revision, abc)
eq_(def_rev.down_revision, abc)
eq_(env.get_heads(), [def_])
eq_(env.get_base(), abc)
def _test_007_long_name(self):
rid = util.rev_id()
env.generate_revision(
rid,
"this is a really long name with "
"lots of characters and also "
"I'd like it to\nhave\nnewlines",
)
assert os.access(
os.path.join(
env.dir,
"versions",
"%s_this_is_a_really_long_name_with_lots_of_.py" % rid,
),
os.F_OK,
)
def _test_008_long_name_configurable(self):
env.truncate_slug_length = 60
rid = util.rev_id()
env.generate_revision(
rid,
"this is a really long name with "
"lots of characters and also "
"I'd like it to\nhave\nnewlines",
)
assert os.access(
os.path.join(
env.dir,
"versions",
"%s_this_is_a_really_long_name_with_lots_"
"of_characters_and_also_.py" % rid,
),
os.F_OK,
)
class ScriptNamingTest(TestBase):
@classmethod
def setup_class(cls):
_testing_config()
@classmethod
def teardown_class(cls):
clear_staging_env()
def test_args(self):
script = ScriptDirectory(
_get_staging_directory(),
file_template="%(rev)s_%(slug)s_"
"%(year)s_%(month)s_"
"%(day)s_%(hour)s_"
"%(minute)s_%(second)s",
)
create_date = datetime.datetime(2012, 7, 25, 15, 8, 5)
eq_(
script._rev_path(
script.versions, "12345", "this is a message", create_date
),
os.path.abspath(
"%s/versions/12345_this_is_a_"
"message_2012_7_25_15_8_5.py" % _get_staging_directory()
),
)
def _test_tz(self, timezone_arg, given, expected):
script = ScriptDirectory(
_get_staging_directory(),
file_template="%(rev)s_%(slug)s_"
"%(year)s_%(month)s_"
"%(day)s_%(hour)s_"
"%(minute)s_%(second)s",
timezone=timezone_arg,
)
with mock.patch(
"alembic.script.base.datetime",
mock.Mock(
datetime=mock.Mock(utcnow=lambda: given, now=lambda: given)
),
):
create_date = script._generate_create_date()
eq_(create_date, expected)
def test_custom_tz(self):
self._test_tz(
"EST5EDT",
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(
2012, 7, 25, 11, 8, 5, tzinfo=tz.gettz("EST5EDT")
),
)
def test_custom_tz_lowercase(self):
self._test_tz(
"est5edt",
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(
2012, 7, 25, 11, 8, 5, tzinfo=tz.gettz("EST5EDT")
),
)
def test_custom_tz_utc(self):
self._test_tz(
"utc",
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(2012, 7, 25, 15, 8, 5, tzinfo=tz.gettz("UTC")),
)
def test_custom_tzdata_tz(self):
self._test_tz(
"Europe/Berlin",
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(
2012, 7, 25, 17, 8, 5, tzinfo=tz.gettz("Europe/Berlin")
),
)
def test_default_tz(self):
self._test_tz(
None,
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(2012, 7, 25, 15, 8, 5),
)
def test_tz_cant_locate(self):
assert_raises_message(
CommandError,
"Can't locate timezone: fake",
self._test_tz,
"fake",
datetime.datetime(2012, 7, 25, 15, 8, 5),
datetime.datetime(2012, 7, 25, 15, 8, 5),
)
class RevisionCommandTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _sqlite_testing_config()
self.a, self.b, self.c = three_rev_fixture(self.cfg)
def tearDown(self):
clear_staging_env()
def test_create_script_basic(self):
rev = command.revision(self.cfg, message="some message")
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(rev.down_revision, self.c)
assert "some message" in rev.doc
def test_create_script_splice(self):
rev = command.revision(
self.cfg, message="some message", head=self.b, splice=True
)
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(rev.down_revision, self.b)
assert "some message" in rev.doc
eq_(set(script.get_heads()), set([rev.revision, self.c]))
def test_create_script_missing_splice(self):
assert_raises_message(
util.CommandError,
"Revision %s is not a head revision; please specify --splice "
"to create a new branch from this revision" % self.b,
command.revision,
self.cfg,
message="some message",
head=self.b,
)
def test_illegal_revision_chars(self):
assert_raises_message(
util.CommandError,
r"Character\(s\) '-' not allowed in "
"revision identifier 'no-dashes'",
command.revision,
self.cfg,
message="some message",
rev_id="no-dashes",
)
assert not os.path.exists(
os.path.join(self.env.dir, "versions", "no-dashes_some_message.py")
)
assert_raises_message(
util.CommandError,
r"Character\(s\) '@' not allowed in "
"revision identifier 'no@atsigns'",
command.revision,
self.cfg,
message="some message",
rev_id="no@atsigns",
)
assert_raises_message(
util.CommandError,
r"Character\(s\) '-, @' not allowed in revision "
"identifier 'no@atsigns-ordashes'",
command.revision,
self.cfg,
message="some message",
rev_id="no@atsigns-ordashes",
)
assert_raises_message(
util.CommandError,
r"Character\(s\) '\+' not allowed in revision "
r"identifier 'no\+plussignseither'",
command.revision,
self.cfg,
message="some message",
rev_id="no+plussignseither",
)
def test_create_script_branches(self):
rev = command.revision(
self.cfg, message="some message", branch_label="foobar"
)
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision(rev.revision)
eq_(script.get_revision("foobar"), rev)
def test_create_script_branches_old_template(self):
script = ScriptDirectory.from_config(self.cfg)
with open(os.path.join(script.dir, "script.py.mako"), "w") as file_:
file_.write(
"<%text>#</%text> ${message}\n"
"revision = ${repr(up_revision)}\n"
"down_revision = ${repr(down_revision)}\n\n"
"def upgrade():\n"
" ${upgrades if upgrades else 'pass'}\n\n"
"def downgrade():\n"
" ${downgrade if downgrades else 'pass'}\n\n"
)
# works OK if no branch names
command.revision(self.cfg, message="some message")
assert_raises_message(
util.CommandError,
r"Version \w+ specified branch_labels foobar, "
r"however the migration file .+?\b does not have them; have you "
"upgraded your script.py.mako to include the 'branch_labels' "
r"section\?",
command.revision,
self.cfg,
message="some message",
branch_label="foobar",
)
class CustomizeRevisionTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _multi_dir_testing_config()
self.cfg.set_main_option("revision_environment", "true")
script = ScriptDirectory.from_config(self.cfg)
self.model1 = util.rev_id()
self.model2 = util.rev_id()
self.model3 = util.rev_id()
for model, name in [
(self.model1, "model1"),
(self.model2, "model2"),
(self.model3, "model3"),
]:
script.generate_revision(
model,
name,
refresh=True,
version_path=os.path.join(_get_staging_directory(), name),
head="base",
)
write_script(
script,
model,
"""\
"%s"
revision = '%s'
down_revision = None
branch_labels = ['%s']
from alembic import op
def upgrade():
pass
def downgrade():
pass
"""
% (name, model, name),
)
def tearDown(self):
clear_staging_env()
def _env_fixture(self, fn, target_metadata):
self.engine = engine = _sqlite_file_db()
def run_env(self):
from alembic import context
with engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
process_revision_directives=fn,
)
with context.begin_transaction():
context.run_migrations()
return mock.patch(
"alembic.script.base.ScriptDirectory.run_env", run_env
)
def test_new_locations_no_autogen(self):
m = sa.MetaData()
def process_revision_directives(context, rev, generate_revisions):
generate_revisions[:] = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model1"
),
head="model1@head",
),
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model2"
),
head="model2@head",
),
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model3"
),
head="model3@head",
),
]
with self._env_fixture(process_revision_directives, m):
revs = command.revision(self.cfg, message="some message")
script = ScriptDirectory.from_config(self.cfg)
for rev, model in [
(revs[0], "model1"),
(revs[1], "model2"),
(revs[2], "model3"),
]:
rev_script = script.get_revision(rev.revision)
eq_(
rev_script.path,
os.path.abspath(
os.path.join(
_get_staging_directory(),
model,
"%s_.py" % (rev_script.revision,),
)
),
)
assert os.path.exists(rev_script.path)
def test_renders_added_directives_no_autogen(self):
m = sa.MetaData()
def process_revision_directives(context, rev, generate_revisions):
generate_revisions[0].upgrade_ops.ops.append(
ops.CreateIndexOp("some_index", "some_table", ["a", "b"])
)
with self._env_fixture(process_revision_directives, m):
rev = command.revision(
self.cfg, message="some message", head="model1@head", sql=True
)
with mock.patch.object(rev.module, "op") as op_mock:
rev.module.upgrade()
eq_(
op_mock.mock_calls,
[
mock.call.create_index(
"some_index", "some_table", ["a", "b"], unique=False
)
],
)
def test_autogen(self):
m = sa.MetaData()
sa.Table("t", m, sa.Column("x", sa.Integer))
def process_revision_directives(context, rev, generate_revisions):
existing_upgrades = generate_revisions[0].upgrade_ops
existing_downgrades = generate_revisions[0].downgrade_ops
# model1 will run the upgrades, e.g. create the table,
# model2 will run the downgrades as upgrades, e.g. drop
# the table again
generate_revisions[:] = [
ops.MigrationScript(
util.rev_id(),
existing_upgrades,
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model1"
),
head="model1@head",
),
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(ops=existing_downgrades.ops),
ops.DowngradeOps(),
version_path=os.path.join(
_get_staging_directory(), "model2"
),
head="model2@head",
),
]
with self._env_fixture(process_revision_directives, m):
command.upgrade(self.cfg, "heads")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version"],
)
command.revision(
self.cfg, message="some message", autogenerate=True
)
command.upgrade(self.cfg, "model1@head")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version", "t"],
)
command.upgrade(self.cfg, "model2@head")
eq_(
Inspector.from_engine(self.engine).get_table_names(),
["alembic_version"],
)
def test_programmatic_command_option(self):
def process_revision_directives(context, rev, generate_revisions):
generate_revisions[0].message = "test programatic"
generate_revisions[0].upgrade_ops = ops.UpgradeOps(
ops=[
ops.CreateTableOp(
"test_table",
[
sa.Column("id", sa.Integer(), primary_key=True),
sa.Column("name", sa.String(50), nullable=False),
],
)
]
)
generate_revisions[0].downgrade_ops = ops.DowngradeOps(
ops=[ops.DropTableOp("test_table")]
)
with self._env_fixture(None, None):
rev = command.revision(
self.cfg,
head="model1@head",
process_revision_directives=process_revision_directives,
)
with open(rev.path) as handle:
result = handle.read()
assert (
(
"""
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('test_table',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
"""
)
in result
)
class ScriptAccessorTest(TestBase):
def test_upgrade_downgrade_ops_list_accessors(self):
u1 = ops.UpgradeOps(ops=[])
d1 = ops.DowngradeOps(ops=[])
m1 = ops.MigrationScript("somerev", u1, d1)
is_(m1.upgrade_ops, u1)
is_(m1.downgrade_ops, d1)
u2 = ops.UpgradeOps(ops=[])
d2 = ops.DowngradeOps(ops=[])
m1._upgrade_ops.append(u2)
m1._downgrade_ops.append(d2)
assert_raises_message(
ValueError,
"This MigrationScript instance has a multiple-entry list for "
"UpgradeOps; please use the upgrade_ops_list attribute.",
getattr,
m1,
"upgrade_ops",
)
assert_raises_message(
ValueError,
"This MigrationScript instance has a multiple-entry list for "
"DowngradeOps; please use the downgrade_ops_list attribute.",
getattr,
m1,
"downgrade_ops",
)
eq_(m1.upgrade_ops_list, [u1, u2])
eq_(m1.downgrade_ops_list, [d1, d2])
class ImportsTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _sqlite_testing_config()
def tearDown(self):
clear_staging_env()
def _env_fixture(self, target_metadata, **kw):
self.engine = engine = _sqlite_file_db()
def run_env(self):
from alembic import context
with engine.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
**kw
)
with context.begin_transaction():
context.run_migrations()
return mock.patch(
"alembic.script.base.ScriptDirectory.run_env", run_env
)
def test_imports_in_script(self):
from sqlalchemy import MetaData, Table, Column
from sqlalchemy.dialects.mysql import VARCHAR
type_ = VARCHAR(20, charset="utf8", national=True)
m = MetaData()
Table("t", m, Column("x", type_))
def process_revision_directives(context, rev, generate_revisions):
generate_revisions[0].imports.add(
"from sqlalchemy.dialects.mysql import TINYINT"
)
with self._env_fixture(
m, process_revision_directives=process_revision_directives
):
rev = command.revision(
self.cfg, message="some message", autogenerate=True
)
with open(rev.path) as file_:
contents = file_.read()
assert "from sqlalchemy.dialects import mysql" in contents
assert "from sqlalchemy.dialects.mysql import TINYINT" in contents
class MultiContextTest(TestBase):
"""test the multidb template for autogenerate front-to-back"""
def setUp(self):
self.engine1 = _sqlite_file_db(tempname="eng1.db")
self.engine2 = _sqlite_file_db(tempname="eng2.db")
self.engine3 = _sqlite_file_db(tempname="eng3.db")
self.env = staging_env(template="multidb")
self.cfg = _multidb_testing_config(
{
"engine1": self.engine1,
"engine2": self.engine2,
"engine3": self.engine3,
}
)
def _write_metadata(self, meta):
path = os.path.join(_get_staging_directory(), "scripts", "env.py")
with open(path) as env_:
existing_env = env_.read()
existing_env = existing_env.replace("target_metadata = {}", meta)
with open(path, "w") as env_:
env_.write(existing_env)
def tearDown(self):
clear_staging_env()
def test_autogen(self):
self._write_metadata(
"""
import sqlalchemy as sa
m1 = sa.MetaData()
m2 = sa.MetaData()
m3 = sa.MetaData()
target_metadata = {"engine1": m1, "engine2": m2, "engine3": m3}
sa.Table('e1t1', m1, sa.Column('x', sa.Integer))
sa.Table('e2t1', m2, sa.Column('y', sa.Integer))
sa.Table('e3t1', m3, sa.Column('z', sa.Integer))
"""
)
rev = command.revision(
self.cfg, message="some message", autogenerate=True
)
with mock.patch.object(rev.module, "op") as op_mock:
rev.module.upgrade_engine1()
eq_(
op_mock.mock_calls[-1],
mock.call.create_table("e1t1", mock.ANY),
)
rev.module.upgrade_engine2()
eq_(
op_mock.mock_calls[-1],
mock.call.create_table("e2t1", mock.ANY),
)
rev.module.upgrade_engine3()
eq_(
op_mock.mock_calls[-1],
mock.call.create_table("e3t1", mock.ANY),
)
rev.module.downgrade_engine1()
eq_(op_mock.mock_calls[-1], mock.call.drop_table("e1t1"))
rev.module.downgrade_engine2()
eq_(op_mock.mock_calls[-1], mock.call.drop_table("e2t1"))
rev.module.downgrade_engine3()
eq_(op_mock.mock_calls[-1], mock.call.drop_table("e3t1"))
class RewriterTest(TestBase):
def test_all_traverse(self):
writer = autogenerate.Rewriter()
mocker = mock.Mock(side_effect=lambda context, revision, op: op)
writer.rewrites(ops.MigrateOperation)(mocker)
addcolop = ops.AddColumnOp("t1", sa.Column("x", sa.Integer()))
directives = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(ops=[ops.ModifyTableOps("t1", ops=[addcolop])]),
ops.DowngradeOps(ops=[]),
)
]
ctx, rev = mock.Mock(), mock.Mock()
writer(ctx, rev, directives)
eq_(
mocker.mock_calls,
[
mock.call(ctx, rev, directives[0]),
mock.call(ctx, rev, directives[0].upgrade_ops),
mock.call(ctx, rev, directives[0].upgrade_ops.ops[0]),
mock.call(ctx, rev, addcolop),
mock.call(ctx, rev, directives[0].downgrade_ops),
],
)
def test_double_migrate_table(self):
writer = autogenerate.Rewriter()
idx_ops = []
@writer.rewrites(ops.ModifyTableOps)
def second_table(context, revision, op):
return [
op,
ops.ModifyTableOps(
"t2",
ops=[ops.AddColumnOp("t2", sa.Column("x", sa.Integer()))],
),
]
@writer.rewrites(ops.AddColumnOp)
def add_column(context, revision, op):
idx_op = ops.CreateIndexOp("ixt", op.table_name, [op.column.name])
idx_ops.append(idx_op)
return [op, idx_op]
directives = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(
ops=[
ops.ModifyTableOps(
"t1",
ops=[
ops.AddColumnOp(
"t1", sa.Column("x", sa.Integer())
)
],
)
]
),
ops.DowngradeOps(ops=[]),
)
]
ctx, rev = mock.Mock(), mock.Mock()
writer(ctx, rev, directives)
eq_(
[d.table_name for d in directives[0].upgrade_ops.ops], ["t1", "t2"]
)
is_(directives[0].upgrade_ops.ops[0].ops[1], idx_ops[0])
is_(directives[0].upgrade_ops.ops[1].ops[1], idx_ops[1])
def test_chained_ops(self):
writer1 = autogenerate.Rewriter()
writer2 = autogenerate.Rewriter()
@writer1.rewrites(ops.AddColumnOp)
def add_column_nullable(context, revision, op):
if op.column.nullable:
return op
else:
op.column.nullable = True
return [
op,
ops.AlterColumnOp(
op.table_name,
op.column.name,
modify_nullable=False,
existing_type=op.column.type,
),
]
@writer2.rewrites(ops.AddColumnOp)
def add_column_idx(context, revision, op):
idx_op = ops.CreateIndexOp("ixt", op.table_name, [op.column.name])
return [op, idx_op]
directives = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(
ops=[
ops.ModifyTableOps(
"t1",
ops=[
ops.AddColumnOp(
"t1",
sa.Column(
"x", sa.Integer(), nullable=False
),
)
],
)
]
),
ops.DowngradeOps(ops=[]),
)
]
ctx, rev = mock.Mock(), mock.Mock()
writer1.chain(writer2)(ctx, rev, directives)
eq_(
autogenerate.render_python_code(directives[0].upgrade_ops),
"# ### commands auto generated by Alembic - please adjust! ###\n"
" op.add_column('t1', "
"sa.Column('x', sa.Integer(), nullable=True))\n"
" op.create_index('ixt', 't1', ['x'], unique=False)\n"
" op.alter_column('t1', 'x',\n"
" existing_type=sa.Integer(),\n"
" nullable=False)\n"
" # ### end Alembic commands ###",
)
def test_no_needless_pass(self):
writer1 = autogenerate.Rewriter()
@writer1.rewrites(ops.AlterColumnOp)
def rewrite_alter_column(context, revision, op):
return []
directives = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(
ops=[
ops.ModifyTableOps(
"t1",
ops=[
ops.AlterColumnOp(
"foo",
"bar",
modify_nullable=False,
existing_type=sa.Integer(),
),
ops.AlterColumnOp(
"foo",
"bar",
modify_nullable=False,
existing_type=sa.Integer(),
),
],
),
ops.ModifyTableOps(
"t1",
ops=[
ops.AlterColumnOp(
"foo",
"bar",
modify_nullable=False,
existing_type=sa.Integer(),
)
],
),
]
),
ops.DowngradeOps(ops=[]),
)
]
ctx, rev = mock.Mock(), mock.Mock()
writer1(ctx, rev, directives)
eq_(
autogenerate.render_python_code(directives[0].upgrade_ops),
"# ### commands auto generated by Alembic - please adjust! ###\n"
" pass\n"
" # ### end Alembic commands ###",
)
def test_multiple_passes_with_mutations(self):
writer1 = autogenerate.Rewriter()
@writer1.rewrites(ops.CreateTableOp)
def rewrite_alter_column(context, revision, op):
op.table_name += "_pass"
return op
directives = [
ops.MigrationScript(
util.rev_id(),
ops.UpgradeOps(
ops=[
ops.CreateTableOp(
"test_table",
[sa.Column("id", sa.Integer(), primary_key=True)],
)
]
),
ops.DowngradeOps(ops=[]),
)
]
ctx, rev = mock.Mock(), mock.Mock()
writer1(ctx, rev, directives)
directives[0].upgrade_ops_list.extend(
[
ops.UpgradeOps(
ops=[
ops.CreateTableOp(
"another_test_table",
[sa.Column("id", sa.Integer(), primary_key=True)],
)
]
),
ops.UpgradeOps(
ops=[
ops.CreateTableOp(
"third_test_table",
[sa.Column("id", sa.Integer(), primary_key=True)],
)
]
),
]
)
writer1(ctx, rev, directives)
eq_(
autogenerate.render_python_code(directives[0].upgrade_ops_list[0]),
"# ### commands auto generated by Alembic - please adjust! ###\n"
" op.create_table('test_table_pass',\n"
" sa.Column('id', sa.Integer(), nullable=False),\n"
" sa.PrimaryKeyConstraint('id')\n"
" )\n"
" # ### end Alembic commands ###",
)
eq_(
autogenerate.render_python_code(directives[0].upgrade_ops_list[1]),
"# ### commands auto generated by Alembic - please adjust! ###\n"
" op.create_table('another_test_table_pass',\n"
" sa.Column('id', sa.Integer(), nullable=False),\n"
" sa.PrimaryKeyConstraint('id')\n"
" )\n"
" # ### end Alembic commands ###",
)
eq_(
autogenerate.render_python_code(directives[0].upgrade_ops_list[2]),
"# ### commands auto generated by Alembic - please adjust! ###\n"
" op.create_table('third_test_table_pass',\n"
" sa.Column('id', sa.Integer(), nullable=False),\n"
" sa.PrimaryKeyConstraint('id')\n"
" )\n"
" # ### end Alembic commands ###",
)
class MultiDirRevisionCommandTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _multi_dir_testing_config()
def tearDown(self):
clear_staging_env()
def test_multiple_dir_no_bases(self):
assert_raises_message(
util.CommandError,
"Multiple version locations present, please specify "
"--version-path",
command.revision,
self.cfg,
message="some message",
)
def test_multiple_dir_no_bases_invalid_version_path(self):
assert_raises_message(
util.CommandError,
"Path foo/bar/ is not represented in current version locations",
command.revision,
self.cfg,
message="x",
version_path=os.path.join("foo/bar/"),
)
def test_multiple_dir_no_bases_version_path(self):
script = command.revision(
self.cfg,
message="x",
version_path=os.path.join(_get_staging_directory(), "model1"),
)
assert os.access(script.path, os.F_OK)
def test_multiple_dir_chooses_base(self):
command.revision(
self.cfg,
message="x",
head="base",
version_path=os.path.join(_get_staging_directory(), "model1"),
)
script2 = command.revision(
self.cfg,
message="y",
head="base",
version_path=os.path.join(_get_staging_directory(), "model2"),
)
script3 = command.revision(
self.cfg, message="y2", head=script2.revision
)
eq_(
os.path.dirname(script3.path),
os.path.abspath(os.path.join(_get_staging_directory(), "model2")),
)
assert os.access(script3.path, os.F_OK)
class TemplateArgsTest(TestBase):
def setUp(self):
staging_env()
self.cfg = _no_sql_testing_config(
directives="\nrevision_environment=true\n"
)
def tearDown(self):
clear_staging_env()
def test_args_propagate(self):
config = _no_sql_testing_config()
script = ScriptDirectory.from_config(config)
template_args = {"x": "x1", "y": "y1", "z": "z1"}
env = EnvironmentContext(config, script, template_args=template_args)
env.configure(
dialect_name="sqlite", template_args={"y": "y2", "q": "q1"}
)
eq_(template_args, {"x": "x1", "y": "y2", "z": "z1", "q": "q1"})
def test_tmpl_args_revision(self):
env_file_fixture(
"""
context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"})
"""
)
script_file_fixture(
"""
# somearg: ${somearg}
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
"""
)
command.revision(self.cfg, message="some rev")
script = ScriptDirectory.from_config(self.cfg)
rev = script.get_revision("head")
with open(rev.path) as f:
text = f.read()
assert "somearg: somevalue" in text
def test_bad_render(self):
env_file_fixture(
"""
context.configure(dialect_name='sqlite', template_args={"somearg":"somevalue"})
"""
)
script_file_fixture(
"""
<% z = x + y %>
"""
)
try:
command.revision(self.cfg, message="some rev")
except CommandError as ce:
m = re.match(
r"^Template rendering failed; see (.+?) "
"for a template-oriented",
str(ce),
)
assert m, "Command error did not produce a file"
with open(m.group(1)) as handle:
contents = handle.read()
os.remove(m.group(1))
assert "<% z = x + y %>" in contents
class DuplicateVersionLocationsTest(TestBase):
def setUp(self):
self.env = staging_env()
self.cfg = _multi_dir_testing_config(
# this is a duplicate of one of the paths
# already present in this fixture
extra_version_location="%(here)s/model1"
)
script = ScriptDirectory.from_config(self.cfg)
self.model1 = util.rev_id()
self.model2 = util.rev_id()
self.model3 = util.rev_id()
for model, name in [
(self.model1, "model1"),
(self.model2, "model2"),
(self.model3, "model3"),
]:
script.generate_revision(
model,
name,
refresh=True,
version_path=os.path.join(_get_staging_directory(), name),
head="base",
)
write_script(
script,
model,
"""\
"%s"
revision = '%s'
down_revision = None
branch_labels = ['%s']
from alembic import op
def upgrade():
pass
def downgrade():
pass
"""
% (name, model, name),
)
def tearDown(self):
clear_staging_env()
def test_env_emits_warning(self):
with assertions.expect_warnings(
"File %s loaded twice! ignoring. "
"Please ensure version_locations is unique"
% (
os.path.realpath(
os.path.join(
_get_staging_directory(),
"model1",
"%s_model1.py" % self.model1,
)
)
)
):
script = ScriptDirectory.from_config(self.cfg)
script.revision_map.heads
eq_(
[rev.revision for rev in script.walk_revisions()],
[self.model1, self.model2, self.model3],
)
class NormPathTest(TestBase):
def setUp(self):
self.env = staging_env()
def test_script_location(self):
config = _no_sql_testing_config()
script = ScriptDirectory.from_config(config)
def normpath(path):
return path.replace("/", ":NORM:")
normpath = mock.Mock(side_effect=normpath)
with mock.patch("os.path.normpath", normpath):
eq_(
script._version_locations,
(
os.path.abspath(
os.path.join(
_get_staging_directory(), "scripts", "versions"
)
).replace("/", ":NORM:"),
),
)
eq_(
script.versions,
os.path.abspath(
os.path.join(
_get_staging_directory(), "scripts", "versions"
)
).replace("/", ":NORM:"),
)
def test_script_location_muliple(self):
config = _multi_dir_testing_config()
script = ScriptDirectory.from_config(config)
def normpath(path):
return path.replace("/", ":NORM:")
normpath = mock.Mock(side_effect=normpath)
with mock.patch("os.path.normpath", normpath):
eq_(
script._version_locations,
[
os.path.abspath(
os.path.join(_get_staging_directory(), "model1/")
).replace("/", ":NORM:"),
os.path.abspath(
os.path.join(_get_staging_directory(), "model2/")
).replace("/", ":NORM:"),
os.path.abspath(
os.path.join(_get_staging_directory(), "model3/")
).replace("/", ":NORM:"),
],
)
| python |
print("Hello World")) # noqa: E902 | python |
from setuptools import setup, find_packages
setup(
name="mediafire-dl",
version="0.1.0",
description="UN script simple para descargar enlaces de mediafire basado en gdown",
url="https://github.com/fernandocaleo/mediafired-dlink",
author="Fernando Caleo",
author_email="[email protected]",
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.7"
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
keywords="audo ai",
py_modules=['mediafire_dl'],
install_requires=[
"requests",
"tqdm",
],
entry_points={
"console_scripts": ["mediafire-dl=mediafire_dl:main"],
},
)
| python |
#!/usr/bin/env python
# coding: utf-8
# In[2]:
import sys
sys.path.append('../../framework')
sys.path.append('../../application')
from NetworkClass import Network
# In[9]:
model_dict = {
"network": {
'input_layer': {
"units": 784,
},
'hidden_layer': [{
"units": 168,
"activation": "relu",
"type": "Linear"
},
{
"units": 168,
"activation": "relu",
"type": "Linear"
},
{
"units": 168,
"activation": "relu",
"type": "Linear"
}
],
'output_layer': {
"units": 10,
"activation": "softmax",
"type": "Linear"
}
}
}
model = Network(model_dict)
model
# In[ ]:
import logging
import string
import random
import os
import torch
import torchvision
from sklearn.model_selection import KFold
from Experiment import Experiment
from train_utils import ReshapeTransform
def randomString(stringLength=10):
"""Generate a random string of fixed length """
letters = string.ascii_lowercase
return ''.join(random.choice(letters) for i in range(stringLength))
logging.basicConfig(level=logging.INFO)
if not(os.path.isdir('models')):
os.mkdir('models')
params_dict = {
"batch_size_train": 100,
"learning_rate": 0.01,
"batch_size_test": 1000,
"n_epochs": 200
}
seed = 42
uid = randomString(stringLength=6)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
else:
torch.manual_seed(seed)
params_dict["model"] = model_dict
train_dataset = torchvision.datasets.FashionMNIST('../data/', train=True, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
ReshapeTransform(
(-1,))
]))
test_dataset = torchvision.datasets.FashionMNIST('../data/', train=False, download=True,
transform=torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
ReshapeTransform(
(-1,))
]))
dataset = torch.utils.data.ConcatDataset(
[train_dataset, test_dataset])
kf = KFold(n_splits=5, shuffle=True, random_state=seed)
for i_fold, (train_index, test_index) in enumerate(kf.split(dataset)):
print("Fold: {}".format(i_fold+1))
# new fold - network from scratch
experiment = Experiment(device)
model = Network(model_dict)
params_dict["fold"] = i_fold+1
# set the dataloaders for the fold
train = torch.utils.data.Subset(dataset, train_index)
test = torch.utils.data.Subset(dataset, test_index)
train_loader = torch.utils.data.DataLoader(
train, batch_size=params_dict["batch_size_train"], shuffle=True)
test_loader = torch.utils.data.DataLoader(
test, batch_size=params_dict["batch_size_test"], shuffle=True)
# set up the experiment
experiment.set_metadata(params_dict)
experiment.set_network(model_dict)
experiment.set_loaders(train_loader, test_loader)
experiment.set_loss(torch.nn.CrossEntropyLoss())
# training loop
for idx, epoch in enumerate(range(params_dict["n_epochs"])):
print("Epoch: {}".format(epoch))
epoch_vals = experiment.train_epoch(epoch)
logging.info(epoch_vals)
logging.info(experiment.network)
experiment.save_weights({
'epoch': epoch,
'state_dict': experiment.network.state_dict(),
'train_acc': experiment.tacc,
'val_acc': experiment.acc,
'train_loss': experiment.trainLoss,
'val_loss': experiment.testLoss,
'optimizer': experiment.optimizer.state_dict(),
'traint': experiment.traint,
'traini': experiment.traini,
'params': experiment.params_dict
}, 'models/{}_{}.pth.tar'.format(uid, epoch,))
# In[ ]:
| python |
# core.py
#
# Copyright (c) 2007 Stephen Day
#
# This module is part of Creoleparser and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
#
import re
import genshi.builder as bldr
__docformat__ = 'restructuredtext en'
escape_char = '~'
esc_neg_look = '(?<!' + re.escape(escape_char) + ')'
esc_to_remove = re.compile(''.join([r'(?<!',re.escape(escape_char),')',re.escape(escape_char),r'(?!([ \n]|$))']))
place_holder_re = re.compile(r'<<<(-?\d+?)>>>')
max_blank_lines = 250
def fill_from_store(text,element_store):
frags = []
mo = place_holder_re.search(text)
while mo:
if mo.start():
frags.append(text[:mo.start()])
frags.append(element_store.get(mo.group(1),
mo.group(1).join(['<<<','>>>'])))
if mo.end() < len(text):
text = text[mo.end():]
else:
break
mo = place_holder_re.search(text)
else:
frags.append(text)
return frags
def fragmentize(text,wiki_elements, element_store,remove_escapes=True):
"""Takes a string of wiki markup and outputs a list of genshi
Fragments (Elements and strings).
This recursive function, with help from the WikiElement objects,
does almost all the parsing.
When no WikiElement objects are supplied, escapes are removed from
``text`` (except if remove_escapes=True) and it is
returned as-is. This is the only way for recursion to stop.
:parameters:
text
the text to be parsed
wiki_elements
list of WikiElement objects to be searched for
remove_escapes
If False, escapes will not be removed
"""
while wiki_elements:
# If the first supplied wiki_element is actually a list of elements, \
# search for all of them and match the closest one only.
if isinstance(wiki_elements[0],(list,tuple)):
x = None
mo = None
for element in wiki_elements[0]:
m = element.regexp.search(text)
if m:
if x is None:
x,wiki_element,mo = m.start(),element,m
elif m.start() < x:
x,wiki_element,mo = m.start(),element,m
else:
wiki_element = wiki_elements[0]
mo = wiki_element.regexp.search(text)
if mo:
frags = wiki_element._process(mo, text, wiki_elements, element_store)
break
else:
wiki_elements = wiki_elements[1:]
# remove escape characters
else:
if remove_escapes:
text = esc_to_remove.sub('',text)
frags = fill_from_store(text,element_store)
return frags
class Parser(object):
"""Instantiates a parser with specified behaviour"""
def __init__(self,dialect, method='xhtml', strip_whitespace=False, encoding='utf-8'):
"""Constructor for Parser objects.
:parameters:
dialect
A Creole instance
method
This value is passed to genshies Steam.render(). Possible values
include ``xhtml``, ``html``, and ``xml``.
strip_whitespace
This value is passed Genshies Steam.render().
encoding
This value is passed Genshies Steam.render().
"""
self.dialect = dialect
self.method = method
self.strip_whitespace = strip_whitespace
self.encoding=encoding
def generate(self,text,element_store=None,context='block'):
"""Returns a Genshi Stream.
:parameters:
text
The text to be parsed.
context
This is useful for marco development where (for example) supression
of paragraph tags is desired. Can be 'inline', 'block', or a list
of WikiElement objects (use with caution).
element_store
Internal dictionary that's passed around a lot ;)
See Genshi documentation for additional keyword arguments.
"""
if element_store is None:
element_store = {}
if not isinstance(context,list):
if context == 'block':
top_level_elements = self.dialect.block_elements
do_preprocess = True
elif context == 'inline':
top_level_elements = self.dialect.inline_elements
do_preprocess = False
else:
top_level_elements = context
do_preprocess = False
if do_preprocess:
chunks = preprocess(text,self.dialect)
else:
chunks = [text]
return bldr.tag(*[fragmentize(text,top_level_elements,element_store) for text in chunks]).generate()
def render(self,text,element_store=None,context='block',**kwargs):
"""Returns final output string (e.g., xhtml)
See generate() (above) and Genshi documentation for keyword arguments.
"""
if element_store is None:
element_store = {}
return self.generate(text,element_store,context).render(method=self.method,strip_whitespace=self.strip_whitespace,
encoding=self.encoding,**kwargs)
def __call__(self,text,element_store=None,context='block'):
"""Wrapper for the render method. Returns final output string.
See generate() (above) and Genshi documentation for keyword arguments.
"""
if element_store is None:
element_store = {}
return self.render(text,element_store,context)
def preprocess(text, dialect):
"""This should generally be called before fragmentize().
:parameters:
text
text to be processsed.
dialect
a ``Creole`` object.
"""
text = text.replace("\r\n", "\n")
text = text.replace("\r", "\n")
text = ''.join([text.rstrip(),'\n'])
blank_lines = list(dialect.blank_line.regexp.finditer(text))
if len(blank_lines) > max_blank_lines:
return chunk(text,blank_lines,[dialect.pre,dialect.bodied_block_macro],max_blank_lines)
return [text]
def chunk(text, blank_lines, hard_elements, limit):
"""Safely breaks large Creole documents into a list of smaller
ones (strings)
"""
hard_spans = []
for e in hard_elements:
for mo in e.regexp.finditer(text):
hard_spans.append(mo.span())
hard_chars = []
for x,y in hard_spans:
hard_chars.extend(range(x,y))
hard_chars = set(hard_chars)
chunks = []
start = 0
for i in range(len(blank_lines)/limit):
for mo in blank_lines[limit/2 + i*limit:limit*3/2+i*limit:10]:
if mo.start() not in hard_chars:
chunks.append(text[start:mo.start()])
start = mo.end()
break
chunks.append(text[start:])
return chunks
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| python |
""" List of trading instruments and strategy portfolio """
from app_head import get_head
from app_body import get_body
from app_page import set_page
from app_ogp import set_ogp
from app_metatags import get_metatags
from app_title import get_title
from app_footer import get_page_footer
from bootstrap import get_bootstrap
from app_loading import get_loading_head, get_loading_body
from app_stylesheet import get_stylesheet
from app_navbar import navbar
from font_awesome import get_font_awesome
from app_cookie import get_sa_theme, theme_return_this
from googleanalytics import get_googleanalytics
from googleadsense import get_googleadsense
from list_instr_n_portf import get_box_list_instr_n_portf
from print_google_ads import print_google_ads
from purechat import get_purechat
def get_top_instr_n_portf_list():
""" xxx """
box_content = '<div class="box-top">' +\
' <div class="row">'+\
' <div class="col-lg-12 col-md-12 col-sm-12 col-xs-12">'+\
' <div class="box-part rounded sa-center-content">'+\
' </div>'+\
' </div>'+\
' </div>'+\
'</div>'
return box_content
def gen_view_list_instr_n_portf(appname, burl, what, sel, terminal):
""" xxx """
#what = 'instr', what = 'portf'
#sel = market or asset class
return_data = ''
if what == 'instr':
numrow = 10000
else:
numrow = 200
page_title = 'Top Performing Trades of the Week'
page_desc = 'Access to thousands of financial instruments, '+\
'stocks, forex, commodities & cryptos. '+\
'Create your trading signals portfolio powered by Artificial intelligence.'
return_data = get_head(get_loading_head() +\
get_googleanalytics() +\
get_googleadsense() +\
get_title(appname) +\
get_metatags(burl) +\
set_ogp(burl, 2, page_title, page_desc) +\
get_bootstrap(get_sa_theme(), burl) +\
get_font_awesome() + get_stylesheet(burl))
return_data = return_data + get_body(get_loading_body(),
navbar(burl, 0, terminal) +\
get_top_instr_n_portf_list() +\
get_box_list_instr_n_portf(burl,
'view',
what,
1,
numrow,
sel) +\
get_page_footer(burl, False) +\
get_purechat(0),
'')
return_data = set_page(return_data)
return return_data
| python |
#-*- coding=utf-8 -*-
import cv2
import numpy as np
#直线检测
img = cv2.imread('lines.jpg')
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray,50,120)
minLineLength = 20
maxLineGap = 5
lines = cv2.HoughLinesP(edges,1,np.pi/180,20,minLineLength,maxLineGap)
for x1,y1,x2,y2 in lines[0]:
cv2.line(img,(x1,y1),(x2,y2),(0,255,0),2)
cv2.imwrite("edges.jpeg",edges)
cv2.imwrite("lines.jpeg",img)
cv2.imshow("edges", edges)
cv2.imshow("lines", img)
cv2.waitKey()
cv2.destroyAllWindows()
| python |
import unittest
import os
import logging
import datetime
from cryptoxlib.CryptoXLib import CryptoXLib
from cryptoxlib.clients.bitpanda import enums
from cryptoxlib.clients.bitpanda.BitpandaWebsocket import PricesSubscription, AccountSubscription, OrderbookSubscription, \
CandlesticksSubscription, CandlesticksSubscriptionParams, MarketTickerSubscription
from cryptoxlib.Pair import Pair
from cryptoxlib.clients.bitpanda.exceptions import BitpandaRestException
from CryptoXLibTest import CryptoXLibTest, WsMessageCounter
api_key = os.environ['BITPANDAAPIKEY']
class BitpandaRestApi(CryptoXLibTest):
@classmethod
def initialize(cls) -> None:
cls.print_logs = True
cls.log_level = logging.DEBUG
def check_positive_response(self, response):
return str(response['status_code'])[0] == '2'
async def init_test(self):
self.client = CryptoXLib.create_bitpanda_client(api_key)
async def clean_test(self):
await self.client.close()
async def test_get_time(self):
response = await self.client.get_time()
self.assertTrue(self.check_positive_response(response))
async def test_get_account_balances(self):
response = await self.client.get_account_balances()
self.assertTrue(self.check_positive_response(response))
async def test_get_account_orders(self):
response = await self.client.get_account_orders()
self.assertTrue(self.check_positive_response(response))
async def test_get_account_order(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.get_account_order("1")
e = cm.exception
self.assertEqual(e.status_code, 400)
async def test_create_market_order(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.create_market_order(Pair("BTC", "EUR"), enums.OrderSide.BUY, "100000")
e = cm.exception
self.assertEqual(e.status_code, 422)
async def test_create_limit_order(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.create_limit_order(Pair("BTC", "EUR"), enums.OrderSide.BUY, "10000", "1")
e = cm.exception
self.assertEqual(e.status_code, 422)
async def test_create_stop_limit_order(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.create_stop_limit_order(Pair("BTC", "EUR"), enums.OrderSide.BUY, "10000", "1", "1")
e = cm.exception
self.assertEqual(e.status_code, 422)
async def test_get_account_order_trades(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.get_account_order_trades("1")
e = cm.exception
self.assertEqual(e.status_code, 400)
async def test_get_account_trades(self):
response = await self.client.get_account_trades()
self.assertTrue(self.check_positive_response(response))
async def test_get_account_trade(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.get_account_trade("1")
e = cm.exception
self.assertEqual(e.status_code, 400)
async def test_get_account_trading_volume(self):
response = await self.client.get_account_trading_volume()
self.assertTrue(self.check_positive_response(response))
async def test_get_currencies(self):
response = await self.client.get_currencies()
self.assertTrue(self.check_positive_response(response))
async def test_find_order(self):
response = await self.client.get_candlesticks(Pair("BTC", "EUR"), enums.TimeUnit.DAYS, "1",
datetime.datetime.now() - datetime.timedelta(days = 7),
datetime.datetime.now())
self.assertTrue(self.check_positive_response(response))
async def test_get_account_fees(self):
response = await self.client.get_account_fees()
self.assertTrue(self.check_positive_response(response))
async def test_get_instruments(self):
response = await self.client.get_instruments()
self.assertTrue(self.check_positive_response(response))
async def test_get_order_book(self):
response = await self.client.get_order_book(Pair("BTC", "EUR"))
self.assertTrue(self.check_positive_response(response))
async def test_get_fee_groups(self):
response = await self.client.get_fee_groups()
self.assertTrue(self.check_positive_response(response))
async def test_get_order_book2(self):
response = await self.client.get_order_book(Pair("BTC", "EUR"), level = "3", depth = "1")
self.assertTrue(self.check_positive_response(response))
async def test_get_market_tickers(self):
response = await self.client.get_market_tickers()
self.assertTrue(self.check_positive_response(response))
async def test_get_market_ticker(self):
response = await self.client.get_market_ticker(Pair('ETH', 'EUR'))
self.assertTrue(self.check_positive_response(response))
async def test_get_price_ticks(self):
response = await self.client.get_price_tick(Pair('ETH', 'EUR'))
self.assertTrue(self.check_positive_response(response))
async def test_get_price_ticks2(self):
response = await self.client.get_price_tick(Pair('ETH', 'EUR'),
from_timestamp = datetime.datetime.now() - datetime.timedelta(hours = 2),
to_timestamp = datetime.datetime.now())
self.assertTrue(self.check_positive_response(response))
async def test_create_deposit_crypto_address(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.create_deposit_crypto_address("ABC")
e = cm.exception
self.assertEqual(e.status_code, 404)
self.assertTrue(e.body['error'] == 'CURRENCY_NOT_FOUND')
async def test_get_deposit_crypto_address(self):
response = await self.client.get_deposit_crypto_address("BTC")
self.assertTrue(self.check_positive_response(response))
async def test_get_fiat_deposit_info(self):
response = await self.client.get_fiat_deposit_info()
self.assertTrue(self.check_positive_response(response))
async def test_withdraw_crypto(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.withdraw_crypto('ABC', '1.0', 'ABC')
e = cm.exception
self.assertEqual(e.status_code, 404)
self.assertTrue(e.body['error'] == 'CURRENCY_NOT_FOUND')
async def test_delete_auto_cancel_all_orders(self):
response = await self.client.delete_auto_cancel_all_orders()
self.assertTrue(self.check_positive_response(response))
@unittest.skip
# SERVICE_UNAVAILABLE
async def test_withdraw_fiat(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.withdraw_fiat('ABC', '1.0', 'ABC')
e = cm.exception
self.assertEqual(e.status_code, 404)
async def test_get_deposits(self):
response = await self.client.get_deposits()
self.assertTrue(self.check_positive_response(response))
async def test_get_deposits2(self):
response = await self.client.get_deposits(currency = 'CHF')
self.assertTrue(self.check_positive_response(response))
async def test_get_bitpanda_deposits(self):
response = await self.client.get_bitpanda_deposits()
self.assertTrue(self.check_positive_response(response))
async def test_get_bitpanda_deposits2(self):
response = await self.client.get_bitpanda_deposits(currency = 'CHF')
self.assertTrue(self.check_positive_response(response))
async def test_get_withdrawals(self):
response = await self.client.get_withdrawals()
self.assertTrue(self.check_positive_response(response))
async def test_get_withdrawals2(self):
response = await self.client.get_withdrawals(currency = 'CHF')
self.assertTrue(self.check_positive_response(response))
async def test_get_bitpanda_withdrawals(self):
response = await self.client.get_bitpanda_withdrawals()
self.assertTrue(self.check_positive_response(response))
async def test_get_bitpanda_withdrawals2(self):
response = await self.client.get_bitpanda_withdrawals(currency = 'CHF')
self.assertTrue(self.check_positive_response(response))
@unittest.skip
# updates account settings
async def test_toggle_best_fee_collection(self):
response = await self.client.toggle_best_fee_collection(True)
self.assertTrue(self.check_positive_response(response))
async def test_delete_account_order(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.delete_account_order(order_id = "1")
e = cm.exception
self.assertEqual(e.status_code, 404)
async def test_delete_account_order2(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.delete_account_order(client_id = "1")
e = cm.exception
self.assertEqual(e.status_code, 404)
async def test_order_update_order_id(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.update_order(amount = "10", order_id = "1")
e = cm.exception
self.assertEqual(e.status_code, 404)
async def test_order_update_client_id(self):
with self.assertRaises(BitpandaRestException) as cm:
await self.client.update_order(amount = "10", client_id = "1")
e = cm.exception
self.assertEqual(e.status_code, 404)
class BitpandaWs(CryptoXLibTest):
@classmethod
def initialize(cls) -> None:
cls.print_logs = True
cls.log_level = logging.DEBUG
async def init_test(self):
self.client = CryptoXLib.create_bitpanda_client(api_key)
async def test_price_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
PricesSubscription([Pair("BTC", "EUR")], callbacks = [message_counter.generate_callback(1)])
])
await self.assertWsMessageCount(message_counter)
async def test_account_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
AccountSubscription(callbacks = [message_counter.generate_callback(3)])
])
await self.assertWsMessageCount(message_counter)
async def test_order_book_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
OrderbookSubscription([Pair("BTC", "EUR")], "1", [message_counter.generate_callback(1)]),
])
await self.assertWsMessageCount(message_counter)
@unittest.skip
async def test_candlesticks_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
CandlesticksSubscription([CandlesticksSubscriptionParams(Pair("BTC", "EUR"), enums.TimeUnit.MINUTES, 1)],
callbacks = [message_counter.generate_callback(1)]),
])
await self.assertWsMessageCount(message_counter)
async def test_market_ticker_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
MarketTickerSubscription([Pair("BTC", "EUR")], callbacks = [message_counter.generate_callback(2)])
])
await self.assertWsMessageCount(message_counter)
async def test_multiple_subscription(self):
message_counter = WsMessageCounter()
self.client.compose_subscriptions([
MarketTickerSubscription([Pair("BTC", "EUR")], callbacks = [message_counter.generate_callback(2, name = "MarketTicker")]),
OrderbookSubscription([Pair("BTC", "EUR")], "1", callbacks = [message_counter.generate_callback(1, name = "Orderbook")])
])
await self.assertWsMessageCount(message_counter)
if __name__ == '__main__':
unittest.main() | python |
from time import gmtime, strftime
from django.contrib import admin
from django.contrib.gis.db import models as gis_models
from django.db import models as django_models
from mapwidgets.widgets import GooglePointFieldWidget
from . import models
class MyDate(admin.widgets.AdminSplitDateTime):
def __init__(self, attrs=None):
super().__init__(attrs)
def value_from_datadict(self, data, files, name):
value = super().value_from_datadict(data, files, name)
if not value[1]:
value[1] = '00:00:00' # default to 00:00:00
return value
class GenericAdmin(admin.ModelAdmin):
exclude = ('slug',)
formfield_overrides = {
django_models.DateTimeField: {'widget': MyDate},
gis_models.PointField: {"widget": GooglePointFieldWidget(attrs={'autocomplete': 'off'})},
}
class LocationNeedsModerationFilter(admin.SimpleListFilter):
title = 'needs moderation'
parameter_name = 'needs_moderation'
def lookups(self, request, model_admin):
return (
('address', 'Empty address'),
)
def queryset(self, request, queryset):
value = self.value()
if value == 'address':
return queryset.filter(address=None)
return queryset
@admin.register(models.Location)
class LocationAdmin(GenericAdmin):
ordering = ('name',)
list_display = ('name', 'address')
search_fields = ('name', 'address')
list_filter = (LocationNeedsModerationFilter,)
@admin.register(models.Organization)
class OrganizationAdmin(GenericAdmin):
search_fields = ('name',)
list_display = ('name',)
ordering = ('name',)
search_fields = ('name',)
class EventNeedsModerationFilter(admin.SimpleListFilter):
title = 'needs moderation'
parameter_name = 'needs_moderation'
def lookups(self, request, model_admin):
return (
('cost', 'Unknown cost'),
('location', 'No location given'),
('address', 'Inaccurate address'),
)
def queryset(self, request, queryset):
value = self.value()
if value == 'cost':
return queryset.filter(price=None)
if value == 'location':
return queryset.filter(location=None)
if value == 'address':
return queryset.filter(
location__isnull=False,
location__address=None,
)
return queryset
@admin.register(models.Event)
class EventAdmin(GenericAdmin):
ordering = ('-start',)
list_display = ('name', 'organization', 'location', 'start', 'event_format')
list_display_links = ('name', )
search_fields = ('name',)
list_filter = (EventNeedsModerationFilter, 'event_format', 'created_by',)
readonly_fields = ('created_by',)
filter_horizontal = ('tags', 'languages_spoken')
autocomplete_fields = ('organization', 'location')
def save_model(self, request, obj, form, change):
if getattr(obj, 'created_by', None) is None:
obj.created_by = request.user
obj.save()
admin.site.register(models.EventLike)
| python |
# coding: utf-8
import sys
import random
from hpopt.datasets.uci.car import load_corpus
from ..sklearn import SklearnClassifier, SklearnGrammar
from sklearn.model_selection import train_test_split
def main():
X, y = load_corpus(representation='onehot')
Xtrain, Xtest, ytrain, ytest = train_test_split(X, y, test_size=0.3)
random.seed(0)
for i in range(20):
classifier = SklearnClassifier(popsize=20, select=5, iters=100, timeout=300, global_timeout=3600, fitness_evaluations=5, verbose=True)
classifier.fit(Xtrain, ytrain)
with open("cars.log", "a") as fp:
fp.write("%.5f\n" % classifier.score(Xtest, ytest))
if __name__ == "__main__":
main()
| python |
from __future__ import print_function
from contextlib import contextmanager
from selecta.errors import NotSupportedError
from selecta.terminal import Keycodes
from selecta.renderers import MatchRenderer
from selecta.utils import is_printable, safeint
import re
__all__ = ["UI", "DumbTerminalUI", "SmartTerminalUI"]
class UI(object):
"""Abstract superclass for the different variants of the user interface
that we offer to the user."""
def __init__(self):
self.index = None
def dispose(self):
"""Notifies the user interface that it will not be needed any more."""
pass
def setup(self, index):
"""Prepares the user interface to be used with the given search index.
Args:
index (selecta.indexing.Index): the search index to be used by the
UI to search for hits matching a given query string
"""
self.index = index
def choose_item(self, initial_query=None):
"""Shows the user interface and lets the user choose an item.
Args:
initial_query (str or None): the initial search query to submit
automatically, or ``None`` if no such query should be
submitted
Returns:
Match: a match representing the item that the user has chosen,
or ``None`` if the user cancelled the selection.
"""
raise NotImplementedError
@contextmanager
def use(self, *args, **kwds):
try:
self.setup(*args, **kwds)
yield
finally:
self.dispose()
class TerminalUI(UI):
"""Abstract superclass for terminal-based UIs."""
def __init__(self, terminal, prompt="> ", renderer=None):
"""Constructor.
Args:
terminal (Terminal): the terminal that the UI will be created on
prompt (str): prompt to use before lines that require user input
renderer (Renderer or None): renderer to use for showing matches
on the UI. ``None`` means to use a default renderer created
by ``create_default_renderer()``..
"""
super(TerminalUI, self).__init__()
# If you are thinking about importing readline to add support for
# fancy editing, don't. Doing so might add extra ANSI escape
# sequences on some terminals with some versions of readline, which
# will screw up the output of selecta. This is apparently a readline
# bug:
#
# https://bugs.python.org/issue19884
self.hit_list_limit = 9
self.prompt = prompt
self.renderer = renderer or self.create_default_renderer()
self.terminal = terminal
def create_default_renderer(self):
"""Creates a default MatchRenderer_ that is used to show matches on
the console."""
return MatchRenderer()
class DumbTerminalUI(TerminalUI):
"""Dumb terminal-based UI class for ``selecta``. This UI class does not
require any special capabilities from the terminal (e.g., raw terminal
access)."""
def choose_item(self, initial_query=None):
matches = self.index.search(initial_query) if initial_query else None
while True:
self.show_matches(matches)
query = self.read_query()
if query is None:
return None
match_index = safeint(query, 0)
if match_index > 0 and match_index <= len(matches):
return matches[match_index-1]
matches = self.index.search(query)
def read_query(self):
"""Reads the query string or the index of the match chosen by the
user from the standard input.
Returns:
the query string or the index of the match chosen by the user,
or ``None`` if the user cancelled the selection by submitting EOF
"""
try:
return raw_input(self.prompt)
except KeyboardInterrupt:
return None
except EOFError:
return None
def show_matches(self, matches):
"""Shows the given list of matches on the standard output."""
matches = matches or []
limit = self.hit_list_limit
self.renderer.attach_to_terminal(self.terminal)
for index, match in enumerate(matches[:limit], 1):
print("{index}: {rendered_match}".format(
index=index,
rendered_match=self.renderer.render(match)
))
if len(matches) > limit:
print("...and {0} more".format(len(matches) - limit))
class SmartTerminalUI(TerminalUI):
"""Smart terminal-based UI class for ``selecta`` that provides a snappier
user experience but requires raw access to the terminal (which might not
be available on all platforms)."""
def __init__(self, terminal, prompt="> ", renderer=None):
super(SmartTerminalUI, self).__init__(terminal, prompt, renderer)
if not terminal.supports("LEFT", "RIGHT", "UP", "DOWN"):
raise NotSupportedError("SmartTerminalUI requires a terminal that "
"supports cursor movement")
self._query = None
self._ui_shown = False
self.reset()
def choose_item(self, initial_query=None):
self.query = initial_query or ''
while True:
try:
char = self.terminal.getch()
except KeyboardInterrupt:
return None
except EOFError:
return None
if Keycodes.is_enter_like(char):
return self.selected_item
elif Keycodes.is_backspace_like(char):
self.query = self.query[:-1]
elif char == Keycodes.CTRL_N or char == Keycodes.DOWN:
self.adjust_selected_index_by(1)
elif char == Keycodes.CTRL_P or char == Keycodes.UP:
self.adjust_selected_index_by(-1)
elif char == Keycodes.CTRL_U:
self.query = ''
elif char == Keycodes.CTRL_W:
self.query = re.sub("[^ ]* *$", "", self.query)
elif char == Keycodes.ESCAPE:
return None
elif is_printable(char):
self.query += char
else:
print("Unhandled char: {0!r}".format(char))
def dispose(self):
self.hide()
def hide(self):
"""Hides the UI. This function assumes that the cursor is currently
in the first row of the UI."""
if not self._ui_shown:
return
self._hide()
self._ui_shown = False
def _hide(self):
self.terminal.move_cursor(x=0)
self.terminal.clear_to_eos()
def adjust_selected_index_by(self, offset, wrap=True):
"""Adjusts the selected index with the given offset, optionally wrapping
around the result list.
Args:
offset (int): the offset to add to the selected index
wrap (bool): whether to wrap around the result list
"""
if self.selected_index is None:
return
new_index = int(self.selected_index) + offset
if wrap:
new_index = new_index % self.num_visible_matches
self.selected_index = new_index
@property
def num_visible_matches(self):
"""The number of matches currently visible on the UI."""
return min(len(self._best_matches), self.hit_list_limit)
@property
def query(self):
"""The current query string shown on the UI."""
return self._query
@query.setter
def query(self, value):
"""Sets the current query string shown on the UI."""
# TODO: optimize if the new query string has the old as a prefix
if value == self._query:
return
self._query = value
self.refresh()
def refresh(self):
"""Redraws the UI. Assumes that the cursor is in the row where the
drawing should start."""
num_lines = self.hit_list_limit + 1
if not self._ui_shown:
# Ensure that there are enough empty lines at the bottom of the
# terminal to show the UI
self.terminal.write("\n" * num_lines)
self.terminal.move_cursor(dy=-num_lines)
self._ui_shown = True
query = self.query
self._best_matches = self.index.search(query) if self.index else []
if self._best_matches and self._selected_index is None:
self._selected_index = 0
self._fix_selected_index()
with self.terminal.hidden_cursor():
# Draw the matches first
self.terminal.move_cursor(x=0, dy=1)
num_lines_printed = self._show_matches(self._best_matches)
self.terminal.clear_to_eos()
# Now draw the prompt and the query
self.terminal.move_cursor(x=0, dy=-num_lines_printed-1)
self.terminal.write(self.prompt, raw=True)
# TODO: truncate the query from the front if too wide
self.terminal.write(query, raw=True)
self.terminal.clear_to_eol()
def reset(self):
"""Resets the UI to the initial state (no query, no matches, no
selection)."""
self._best_matches = []
self._selected_index = None
self.query = ''
@property
def selected_index(self):
"""Returns the index of the currently selected item on the UI."""
return self._selected_index
@selected_index.setter
def selected_index(self, value):
if self._selected_index == value:
return
self._selected_index = value
self._fix_selected_index()
self.refresh()
@property
def selected_item(self):
"""The currently selected item on the UI."""
if self._selected_index is None or self._selected_index < 0:
return None
else:
return self._best_matches[self._selected_index]
def _fix_selected_index(self):
"""Ensures that the index of the selected item is within valid
bounds."""
if not self._best_matches:
self._selected_index = None
elif self._selected_index is not None:
self._selected_index = max(
0, min(self._selected_index, self.num_visible_matches)
)
def _show_matches(self, matches):
"""Shows the given list of matches on the terminal.
Returns:
int: the number of lines printed on the terminal
"""
matches = matches or []
limit = self.hit_list_limit
self.renderer.attach_to_terminal(self.terminal)
for index, match in enumerate(matches[:limit]):
selected = (index == self._selected_index)
rendered_match = self.renderer.render(match, selected=selected)
self.terminal.write(rendered_match, raw=True)
self.terminal.write("\n")
return min(len(matches), limit)
| python |