content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import numpy as np
from numpy import array, arange, float32, uint8
from numpy.random import rand
import os
import sys
import time
from BVchunker import *
from BVchunker.ND2Reader import ReadFromND2Vid
from BVchunker.TIFReader import ReadFrom2DTIFVid
from BVchunker.OMETIFReader import ReadFromOMETIFVid
from BVchunker.PIMSReader import ReadFromPIMSVid
import pandas as pd
import argparse
import apache_beam as beam
from apache_beam.io import ReadFromText
from apache_beam.io import WriteToText
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='inputFolder/')
parser.add_argument('--output', type=str, default='outputFolder/')
known_args, pipeline_args = parser.parse_known_args()
pipeline_args.extend([
'--runner=DirectRunner',
'--setup_file=./setup.py',
'--max_num_workers=1'
])
pipeline_options = PipelineOptions(pipeline_args)
class ReduceVideosStats(beam.PTransform):
def __init__(self, kind, output):
super(ReduceVideosStats, self).__init__()
self.kind = kind
self.output = output
def expand(self, pvalue):
return (
pvalue
| 'strip chunk keys' >> beam.ParDo(stripChunks())
| 'recombine video' >> beam.CombinePerKey(combineStats())
| 'to JSON' >> beam.ParDo(toJSON())
| 'WriteFullOutput' >> WriteToText(
self.output,
shard_name_template='',
file_name_suffix='--'+self.kind+'.txt'))
with beam.Pipeline(options=pipeline_options) as pipeline:
testPIMS = (
pipeline
| 'Read PIMS' >> ReadFromPIMSVid(
os.path.join(known_args.input, '**.*'))
| 'PIMS Pipeline' >> ReduceVideosStats('pims', known_args.output))
testND2 = (
pipeline
| 'Read ND2' >> ReadFromND2Vid(
os.path.join(known_args.input, '**.nd2'))
| 'ND2 Pipeline' >> ReduceVideosStats('nd2', known_args.output))
test2DTIF = (
pipeline
| 'Read 2D TIF' >> ReadFrom2DTIFVid(
os.path.join(known_args.input, '**.tif'))
| '2D TIF Pipeline' >> ReduceVideosStats('tif', known_args.output))
testOMETIF = (
pipeline
| 'Read OME TIF' >> ReadFrom2DTIFVid(
os.path.join(known_args.input, '**.ome.tif'))
| 'OME TIF Pipeline' >> ReduceVideosStats('ome.tif', known_args.output))
| python |
"""Tests for the models of the careers app."""
from django.test import TestCase
from django.utils.text import slugify
from mixer.backend.django import mixer
class CareerPositionTestCase(TestCase):
"""Tests for the ``CareerPosition`` model."""
longMessage = True
def test_model(self):
instance = mixer.blend(
'careers.CareerPosition', title='Career 1', position=1)
self.assertTrue(instance.pk, msg='Should be able to save the obj')
def test_str(self):
testTitle = 'Test Career'
instance = mixer.blend(
'careers.CareerPosition', title=testTitle, position=1)
self.assertEqual(str(instance), testTitle, msg='Should return title')
def test_slug(self):
testTitle = 'test title'
instance = mixer.blend(
'careers.CareerPosition', title=testTitle, position=1)
slug_value = slugify(
u'{} {}'.format(instance.pk, testTitle))
self.assertEqual(
instance.slug(), slug_value, msg=(
'slug_value should match instance.slug()'))
| python |
import json
from app.main.model.database import User
from sanic.log import logger
from bson import ObjectId, json_util
from ..service.blacklist_service import save_token
from ..util.response import *
class Auth:
@staticmethod
async def login_user(data):
try:
# fetch the user data
user = await User.find_one({'email': data.get('email')})
if user:
if user.check_password(data.get('password')):
auth_token = User.encode_auth_token(str(user.pk))
if auth_token:
return response_message(SUCCESS, token=auth_token.decode())
return response_message(UNKNOWN_ERROR)
return response_message(PASSWORD_INCORRECT)
return response_message(USER_NOT_EXIST)
except Exception as e:
logger.exception(e)
return response_message(EAGAIN)
@staticmethod
async def logout_user(data):
auth_token = data
if auth_token:
payload = await User.decode_auth_token(auth_token)
if not isinstance(payload, str):
# mark the token as blacklisted
return await save_token(token=auth_token)
return response_message(TOKEN_ILLEGAL, payload)
return response_message(TOKEN_REQUIRED)
@staticmethod
async def get_logged_in_user(token):
if token:
payload = await User.decode_auth_token(token)
if not isinstance(payload, str):
user = await User.find_one({'_id': ObjectId(payload['sub'])})
if user:
return response_message(SUCCESS,
user_id=str(user.pk),
email=user.email,
username=user.name,
roles=user.roles,
registered_on=user.registered_on.timestamp() * 1000,
avatar=user.avatar,
introduction=user.introduction,
region=user.region
)
return response_message(USER_NOT_EXIST)
return response_message(TOKEN_ILLEGAL, payload)
return response_message(TOKEN_REQUIRED)
@staticmethod
async def is_user_authenticated(token):
ret = await Auth.get_logged_in_user(token)
if ret['code'] == SUCCESS.code:
return True
return False
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Author: Hamilton Kibbe <[email protected]>
import pytest
from ..gerber_statements import *
from ..cam import FileSettings
def test_Statement_smoketest():
stmt = Statement("Test")
assert stmt.type == "Test"
stmt.to_metric()
assert "units=metric" in str(stmt)
stmt.to_inch()
assert "units=inch" in str(stmt)
stmt.to_metric()
stmt.offset(1, 1)
assert "type=Test" in str(stmt)
def test_FSParamStmt_factory():
""" Test FSParamStruct factory
"""
stmt = {"param": "FS", "zero": "L", "notation": "A", "x": "27"}
fs = FSParamStmt.from_dict(stmt)
assert fs.param == "FS"
assert fs.zero_suppression == "leading"
assert fs.notation == "absolute"
assert fs.format == (2, 7)
stmt = {"param": "FS", "zero": "T", "notation": "I", "x": "27"}
fs = FSParamStmt.from_dict(stmt)
assert fs.param == "FS"
assert fs.zero_suppression == "trailing"
assert fs.notation == "incremental"
assert fs.format == (2, 7)
def test_FSParamStmt():
""" Test FSParamStmt initialization
"""
param = "FS"
zeros = "trailing"
notation = "absolute"
fmt = (2, 5)
stmt = FSParamStmt(param, zeros, notation, fmt)
assert stmt.param == param
assert stmt.zero_suppression == zeros
assert stmt.notation == notation
assert stmt.format == fmt
def test_FSParamStmt_dump():
""" Test FSParamStmt to_gerber()
"""
stmt = {"param": "FS", "zero": "L", "notation": "A", "x": "27"}
fs = FSParamStmt.from_dict(stmt)
assert fs.to_gerber() == "%FSLAX27Y27*%"
stmt = {"param": "FS", "zero": "T", "notation": "I", "x": "25"}
fs = FSParamStmt.from_dict(stmt)
assert fs.to_gerber() == "%FSTIX25Y25*%"
settings = FileSettings(zero_suppression="leading", notation="absolute")
assert fs.to_gerber(settings) == "%FSLAX25Y25*%"
def test_FSParamStmt_string():
""" Test FSParamStmt.__str__()
"""
stmt = {"param": "FS", "zero": "L", "notation": "A", "x": "27"}
fs = FSParamStmt.from_dict(stmt)
assert str(fs) == "<Format Spec: 2:7 leading zero suppression absolute notation>"
stmt = {"param": "FS", "zero": "T", "notation": "I", "x": "25"}
fs = FSParamStmt.from_dict(stmt)
assert (
str(fs) == "<Format Spec: 2:5 trailing zero suppression incremental notation>"
)
def test_MOParamStmt_factory():
""" Test MOParamStruct factory
"""
stmts = [{"param": "MO", "mo": "IN"}, {"param": "MO", "mo": "in"}]
for stmt in stmts:
mo = MOParamStmt.from_dict(stmt)
assert mo.param == "MO"
assert mo.mode == "inch"
stmts = [{"param": "MO", "mo": "MM"}, {"param": "MO", "mo": "mm"}]
for stmt in stmts:
mo = MOParamStmt.from_dict(stmt)
assert mo.param == "MO"
assert mo.mode == "metric"
stmt = {"param": "MO"}
mo = MOParamStmt.from_dict(stmt)
assert mo.mode == None
stmt = {"param": "MO", "mo": "degrees kelvin"}
pytest.raises(ValueError, MOParamStmt.from_dict, stmt)
def test_MOParamStmt():
""" Test MOParamStmt initialization
"""
param = "MO"
mode = "inch"
stmt = MOParamStmt(param, mode)
assert stmt.param == param
for mode in ["inch", "metric"]:
stmt = MOParamStmt(param, mode)
assert stmt.mode == mode
def test_MOParamStmt_dump():
""" Test MOParamStmt to_gerber()
"""
stmt = {"param": "MO", "mo": "IN"}
mo = MOParamStmt.from_dict(stmt)
assert mo.to_gerber() == "%MOIN*%"
stmt = {"param": "MO", "mo": "MM"}
mo = MOParamStmt.from_dict(stmt)
assert mo.to_gerber() == "%MOMM*%"
def test_MOParamStmt_conversion():
stmt = {"param": "MO", "mo": "MM"}
mo = MOParamStmt.from_dict(stmt)
mo.to_inch()
assert mo.mode == "inch"
stmt = {"param": "MO", "mo": "IN"}
mo = MOParamStmt.from_dict(stmt)
mo.to_metric()
assert mo.mode == "metric"
def test_MOParamStmt_string():
""" Test MOParamStmt.__str__()
"""
stmt = {"param": "MO", "mo": "IN"}
mo = MOParamStmt.from_dict(stmt)
assert str(mo) == "<Mode: inches>"
stmt = {"param": "MO", "mo": "MM"}
mo = MOParamStmt.from_dict(stmt)
assert str(mo) == "<Mode: millimeters>"
def test_IPParamStmt_factory():
""" Test IPParamStruct factory
"""
stmt = {"param": "IP", "ip": "POS"}
ip = IPParamStmt.from_dict(stmt)
assert ip.ip == "positive"
stmt = {"param": "IP", "ip": "NEG"}
ip = IPParamStmt.from_dict(stmt)
assert ip.ip == "negative"
def test_IPParamStmt():
""" Test IPParamStmt initialization
"""
param = "IP"
for ip in ["positive", "negative"]:
stmt = IPParamStmt(param, ip)
assert stmt.param == param
assert stmt.ip == ip
def test_IPParamStmt_dump():
""" Test IPParamStmt to_gerber()
"""
stmt = {"param": "IP", "ip": "POS"}
ip = IPParamStmt.from_dict(stmt)
assert ip.to_gerber() == "%IPPOS*%"
stmt = {"param": "IP", "ip": "NEG"}
ip = IPParamStmt.from_dict(stmt)
assert ip.to_gerber() == "%IPNEG*%"
def test_IPParamStmt_string():
stmt = {"param": "IP", "ip": "POS"}
ip = IPParamStmt.from_dict(stmt)
assert str(ip) == "<Image Polarity: positive>"
stmt = {"param": "IP", "ip": "NEG"}
ip = IPParamStmt.from_dict(stmt)
assert str(ip) == "<Image Polarity: negative>"
def test_IRParamStmt_factory():
stmt = {"param": "IR", "angle": "45"}
ir = IRParamStmt.from_dict(stmt)
assert ir.param == "IR"
assert ir.angle == 45
def test_IRParamStmt_dump():
stmt = {"param": "IR", "angle": "45"}
ir = IRParamStmt.from_dict(stmt)
assert ir.to_gerber() == "%IR45*%"
def test_IRParamStmt_string():
stmt = {"param": "IR", "angle": "45"}
ir = IRParamStmt.from_dict(stmt)
assert str(ir) == "<Image Angle: 45>"
def test_OFParamStmt_factory():
""" Test OFParamStmt factory
"""
stmt = {"param": "OF", "a": "0.1234567", "b": "0.1234567"}
of = OFParamStmt.from_dict(stmt)
assert of.a == 0.1234567
assert of.b == 0.1234567
def test_OFParamStmt():
""" Test IPParamStmt initialization
"""
param = "OF"
for val in [0.0, -3.4567]:
stmt = OFParamStmt(param, val, val)
assert stmt.param == param
assert stmt.a == val
assert stmt.b == val
def test_OFParamStmt_dump():
""" Test OFParamStmt to_gerber()
"""
stmt = {"param": "OF", "a": "0.123456", "b": "0.123456"}
of = OFParamStmt.from_dict(stmt)
assert of.to_gerber() == "%OFA0.12345B0.12345*%"
def test_OFParamStmt_conversion():
stmt = {"param": "OF", "a": "2.54", "b": "25.4"}
of = OFParamStmt.from_dict(stmt)
of.units = "metric"
# No effect
of.to_metric()
assert of.a == 2.54
assert of.b == 25.4
of.to_inch()
assert of.units == "inch"
assert of.a == 0.1
assert of.b == 1.0
# No effect
of.to_inch()
assert of.a == 0.1
assert of.b == 1.0
stmt = {"param": "OF", "a": "0.1", "b": "1.0"}
of = OFParamStmt.from_dict(stmt)
of.units = "inch"
# No effect
of.to_inch()
assert of.a == 0.1
assert of.b == 1.0
of.to_metric()
assert of.units == "metric"
assert of.a == 2.54
assert of.b == 25.4
# No effect
of.to_metric()
assert of.a == 2.54
assert of.b == 25.4
def test_OFParamStmt_offset():
s = OFParamStmt("OF", 0, 0)
s.offset(1, 0)
assert s.a == 1.0
assert s.b == 0.0
s.offset(0, 1)
assert s.a == 1.0
assert s.b == 1.0
def test_OFParamStmt_string():
""" Test OFParamStmt __str__
"""
stmt = {"param": "OF", "a": "0.123456", "b": "0.123456"}
of = OFParamStmt.from_dict(stmt)
assert str(of) == "<Offset: X: 0.123456 Y: 0.123456 >"
def test_SFParamStmt_factory():
stmt = {"param": "SF", "a": "1.4", "b": "0.9"}
sf = SFParamStmt.from_dict(stmt)
assert sf.param == "SF"
assert sf.a == 1.4
assert sf.b == 0.9
def test_SFParamStmt_dump():
stmt = {"param": "SF", "a": "1.4", "b": "0.9"}
sf = SFParamStmt.from_dict(stmt)
assert sf.to_gerber() == "%SFA1.4B0.9*%"
def test_SFParamStmt_conversion():
stmt = {"param": "OF", "a": "2.54", "b": "25.4"}
of = SFParamStmt.from_dict(stmt)
of.units = "metric"
of.to_metric()
# No effect
assert of.a == 2.54
assert of.b == 25.4
of.to_inch()
assert of.units == "inch"
assert of.a == 0.1
assert of.b == 1.0
# No effect
of.to_inch()
assert of.a == 0.1
assert of.b == 1.0
stmt = {"param": "OF", "a": "0.1", "b": "1.0"}
of = SFParamStmt.from_dict(stmt)
of.units = "inch"
# No effect
of.to_inch()
assert of.a == 0.1
assert of.b == 1.0
of.to_metric()
assert of.units == "metric"
assert of.a == 2.54
assert of.b == 25.4
# No effect
of.to_metric()
assert of.a == 2.54
assert of.b == 25.4
def test_SFParamStmt_offset():
s = SFParamStmt("OF", 0, 0)
s.offset(1, 0)
assert s.a == 1.0
assert s.b == 0.0
s.offset(0, 1)
assert s.a == 1.0
assert s.b == 1.0
def test_SFParamStmt_string():
stmt = {"param": "SF", "a": "1.4", "b": "0.9"}
sf = SFParamStmt.from_dict(stmt)
assert str(sf) == "<Scale Factor: X: 1.4 Y: 0.9>"
def test_LPParamStmt_factory():
""" Test LPParamStmt factory
"""
stmt = {"param": "LP", "lp": "C"}
lp = LPParamStmt.from_dict(stmt)
assert lp.lp == "clear"
stmt = {"param": "LP", "lp": "D"}
lp = LPParamStmt.from_dict(stmt)
assert lp.lp == "dark"
def test_LPParamStmt_dump():
""" Test LPParamStmt to_gerber()
"""
stmt = {"param": "LP", "lp": "C"}
lp = LPParamStmt.from_dict(stmt)
assert lp.to_gerber() == "%LPC*%"
stmt = {"param": "LP", "lp": "D"}
lp = LPParamStmt.from_dict(stmt)
assert lp.to_gerber() == "%LPD*%"
def test_LPParamStmt_string():
""" Test LPParamStmt.__str__()
"""
stmt = {"param": "LP", "lp": "D"}
lp = LPParamStmt.from_dict(stmt)
assert str(lp) == "<Level Polarity: dark>"
stmt = {"param": "LP", "lp": "C"}
lp = LPParamStmt.from_dict(stmt)
assert str(lp) == "<Level Polarity: clear>"
def test_AMParamStmt_factory():
name = "DONUTVAR"
macro = """0 Test Macro. *
1,1,1.5,0,0*
20,1,0.9,0,0.45,12,0.45,0*
21,1,6.8,1.2,3.4,0.6,0*
22,1,6.8,1.2,0,0,0*
4,1,4,0.1,0.1,0.5,0.1,0.5,0.5,0.1,0.5,0.1,0.1,0*
5,1,8,0,0,8,0*
6,0,0,5,0.5,0.5,2,0.1,6,0*
7,0,0,7,6,0.2,0*
8,THIS IS AN UNSUPPORTED PRIMITIVE*
"""
s = AMParamStmt.from_dict({"param": "AM", "name": name, "macro": macro})
s.build()
assert len(s.primitives) == 10
assert isinstance(s.primitives[0], AMCommentPrimitive)
assert isinstance(s.primitives[1], AMCirclePrimitive)
assert isinstance(s.primitives[2], AMVectorLinePrimitive)
assert isinstance(s.primitives[3], AMCenterLinePrimitive)
assert isinstance(s.primitives[4], AMLowerLeftLinePrimitive)
assert isinstance(s.primitives[5], AMOutlinePrimitive)
assert isinstance(s.primitives[6], AMPolygonPrimitive)
assert isinstance(s.primitives[7], AMMoirePrimitive)
assert isinstance(s.primitives[8], AMThermalPrimitive)
assert isinstance(s.primitives[9], AMUnsupportPrimitive)
def testAMParamStmt_conversion():
name = "POLYGON"
macro = "5,1,8,25.4,25.4,25.4,0*"
s = AMParamStmt.from_dict({"param": "AM", "name": name, "macro": macro})
s.build()
s.units = "metric"
# No effect
s.to_metric()
assert s.primitives[0].position == (25.4, 25.4)
assert s.primitives[0].diameter == 25.4
s.to_inch()
assert s.units == "inch"
assert s.primitives[0].position == (1.0, 1.0)
assert s.primitives[0].diameter == 1.0
# No effect
s.to_inch()
assert s.primitives[0].position == (1.0, 1.0)
assert s.primitives[0].diameter == 1.0
macro = "5,1,8,1,1,1,0*"
s = AMParamStmt.from_dict({"param": "AM", "name": name, "macro": macro})
s.build()
s.units = "inch"
# No effect
s.to_inch()
assert s.primitives[0].position == (1.0, 1.0)
assert s.primitives[0].diameter == 1.0
s.to_metric()
assert s.units == "metric"
assert s.primitives[0].position == (25.4, 25.4)
assert s.primitives[0].diameter == 25.4
# No effect
s.to_metric()
assert s.primitives[0].position == (25.4, 25.4)
assert s.primitives[0].diameter == 25.4
def test_AMParamStmt_dump():
name = "POLYGON"
macro = "5,1,8,25.4,25.4,25.4,0.0"
s = AMParamStmt.from_dict({"param": "AM", "name": name, "macro": macro})
s.build()
assert s.to_gerber() == "%AMPOLYGON*5,1,8,25.4,25.4,25.4,0.0*%"
# TODO - Store Equations and update on unit change...
s = AMParamStmt.from_dict(
{"param": "AM", "name": "OC8", "macro": "5,1,8,0,0,1.08239X$1,22.5"}
)
s.build()
# assert_equal(s.to_gerber(), '%AMOC8*5,1,8,0,0,1.08239X$1,22.5*%')
assert s.to_gerber() == "%AMOC8*5,1,8,0,0,0,22.5*%"
def test_AMParamStmt_string():
name = "POLYGON"
macro = "5,1,8,25.4,25.4,25.4,0*"
s = AMParamStmt.from_dict({"param": "AM", "name": name, "macro": macro})
s.build()
assert str(s) == "<Aperture Macro POLYGON: 5,1,8,25.4,25.4,25.4,0*>"
def test_ASParamStmt_factory():
stmt = {"param": "AS", "mode": "AXBY"}
s = ASParamStmt.from_dict(stmt)
assert s.param == "AS"
assert s.mode == "AXBY"
def test_ASParamStmt_dump():
stmt = {"param": "AS", "mode": "AXBY"}
s = ASParamStmt.from_dict(stmt)
assert s.to_gerber() == "%ASAXBY*%"
def test_ASParamStmt_string():
stmt = {"param": "AS", "mode": "AXBY"}
s = ASParamStmt.from_dict(stmt)
assert str(s) == "<Axis Select: AXBY>"
def test_INParamStmt_factory():
""" Test INParamStmt factory
"""
stmt = {"param": "IN", "name": "test"}
inp = INParamStmt.from_dict(stmt)
assert inp.name == "test"
def test_INParamStmt_dump():
""" Test INParamStmt to_gerber()
"""
stmt = {"param": "IN", "name": "test"}
inp = INParamStmt.from_dict(stmt)
assert inp.to_gerber() == "%INtest*%"
def test_INParamStmt_string():
stmt = {"param": "IN", "name": "test"}
inp = INParamStmt.from_dict(stmt)
assert str(inp) == "<Image Name: test>"
def test_LNParamStmt_factory():
""" Test LNParamStmt factory
"""
stmt = {"param": "LN", "name": "test"}
lnp = LNParamStmt.from_dict(stmt)
assert lnp.name == "test"
def test_LNParamStmt_dump():
""" Test LNParamStmt to_gerber()
"""
stmt = {"param": "LN", "name": "test"}
lnp = LNParamStmt.from_dict(stmt)
assert lnp.to_gerber() == "%LNtest*%"
def test_LNParamStmt_string():
stmt = {"param": "LN", "name": "test"}
lnp = LNParamStmt.from_dict(stmt)
assert str(lnp) == "<Level Name: test>"
def test_comment_stmt():
""" Test comment statement
"""
stmt = CommentStmt("A comment")
assert stmt.type == "COMMENT"
assert stmt.comment == "A comment"
def test_comment_stmt_dump():
""" Test CommentStmt to_gerber()
"""
stmt = CommentStmt("A comment")
assert stmt.to_gerber() == "G04A comment*"
def test_comment_stmt_string():
stmt = CommentStmt("A comment")
assert str(stmt) == "<Comment: A comment>"
def test_eofstmt():
""" Test EofStmt
"""
stmt = EofStmt()
assert stmt.type == "EOF"
def test_eofstmt_dump():
""" Test EofStmt to_gerber()
"""
stmt = EofStmt()
assert stmt.to_gerber() == "M02*"
def test_eofstmt_string():
assert str(EofStmt()) == "<EOF Statement>"
def test_quadmodestmt_factory():
""" Test QuadrantModeStmt.from_gerber()
"""
line = "G74*"
stmt = QuadrantModeStmt.from_gerber(line)
assert stmt.type == "QuadrantMode"
assert stmt.mode == "single-quadrant"
line = "G75*"
stmt = QuadrantModeStmt.from_gerber(line)
assert stmt.mode == "multi-quadrant"
def test_quadmodestmt_validation():
""" Test QuadrantModeStmt input validation
"""
line = "G76*"
pytest.raises(ValueError, QuadrantModeStmt.from_gerber, line)
pytest.raises(ValueError, QuadrantModeStmt, "quadrant-ful")
def test_quadmodestmt_dump():
""" Test QuadrantModeStmt.to_gerber()
"""
for line in ("G74*", "G75*"):
stmt = QuadrantModeStmt.from_gerber(line)
assert stmt.to_gerber() == line
def test_regionmodestmt_factory():
""" Test RegionModeStmt.from_gerber()
"""
line = "G36*"
stmt = RegionModeStmt.from_gerber(line)
assert stmt.type == "RegionMode"
assert stmt.mode == "on"
line = "G37*"
stmt = RegionModeStmt.from_gerber(line)
assert stmt.mode == "off"
def test_regionmodestmt_validation():
""" Test RegionModeStmt input validation
"""
line = "G38*"
pytest.raises(ValueError, RegionModeStmt.from_gerber, line)
pytest.raises(ValueError, RegionModeStmt, "off-ish")
def test_regionmodestmt_dump():
""" Test RegionModeStmt.to_gerber()
"""
for line in ("G36*", "G37*"):
stmt = RegionModeStmt.from_gerber(line)
assert stmt.to_gerber() == line
def test_unknownstmt():
""" Test UnknownStmt
"""
line = "G696969*"
stmt = UnknownStmt(line)
assert stmt.type == "UNKNOWN"
assert stmt.line == line
def test_unknownstmt_dump():
""" Test UnknownStmt.to_gerber()
"""
lines = ("G696969*", "M03*")
for line in lines:
stmt = UnknownStmt(line)
assert stmt.to_gerber() == line
def test_statement_string():
""" Test Statement.__str__()
"""
stmt = Statement("PARAM")
assert "type=PARAM" in str(stmt)
stmt.test = "PASS"
assert "test=PASS" in str(stmt)
assert "type=PARAM" in str(stmt)
def test_ADParamStmt_factory():
""" Test ADParamStmt factory
"""
stmt = {"param": "AD", "d": 0, "shape": "C"}
ad = ADParamStmt.from_dict(stmt)
assert ad.d == 0
assert ad.shape == "C"
stmt = {"param": "AD", "d": 1, "shape": "R"}
ad = ADParamStmt.from_dict(stmt)
assert ad.d == 1
assert ad.shape == "R"
stmt = {"param": "AD", "d": 1, "shape": "C", "modifiers": "1.42"}
ad = ADParamStmt.from_dict(stmt)
assert ad.d == 1
assert ad.shape == "C"
assert ad.modifiers == [(1.42,)]
stmt = {"param": "AD", "d": 1, "shape": "C", "modifiers": "1.42X"}
ad = ADParamStmt.from_dict(stmt)
assert ad.d == 1
assert ad.shape == "C"
assert ad.modifiers == [(1.42,)]
stmt = {"param": "AD", "d": 1, "shape": "R", "modifiers": "1.42X1.24"}
ad = ADParamStmt.from_dict(stmt)
assert ad.d == 1
assert ad.shape == "R"
assert ad.modifiers == [(1.42, 1.24)]
def test_ADParamStmt_conversion():
stmt = {"param": "AD", "d": 0, "shape": "C", "modifiers": "25.4X25.4,25.4X25.4"}
ad = ADParamStmt.from_dict(stmt)
ad.units = "metric"
# No effect
ad.to_metric()
assert ad.modifiers[0] == (25.4, 25.4)
assert ad.modifiers[1] == (25.4, 25.4)
ad.to_inch()
assert ad.units == "inch"
assert ad.modifiers[0] == (1.0, 1.0)
assert ad.modifiers[1] == (1.0, 1.0)
# No effect
ad.to_inch()
assert ad.modifiers[0] == (1.0, 1.0)
assert ad.modifiers[1] == (1.0, 1.0)
stmt = {"param": "AD", "d": 0, "shape": "C", "modifiers": "1X1,1X1"}
ad = ADParamStmt.from_dict(stmt)
ad.units = "inch"
# No effect
ad.to_inch()
assert ad.modifiers[0] == (1.0, 1.0)
assert ad.modifiers[1] == (1.0, 1.0)
ad.to_metric()
assert ad.modifiers[0] == (25.4, 25.4)
assert ad.modifiers[1] == (25.4, 25.4)
# No effect
ad.to_metric()
assert ad.modifiers[0] == (25.4, 25.4)
assert ad.modifiers[1] == (25.4, 25.4)
def test_ADParamStmt_dump():
stmt = {"param": "AD", "d": 0, "shape": "C"}
ad = ADParamStmt.from_dict(stmt)
assert ad.to_gerber() == "%ADD0C*%"
stmt = {"param": "AD", "d": 0, "shape": "C", "modifiers": "1X1,1X1"}
ad = ADParamStmt.from_dict(stmt)
assert ad.to_gerber() == "%ADD0C,1X1,1X1*%"
def test_ADPamramStmt_string():
stmt = {"param": "AD", "d": 0, "shape": "C"}
ad = ADParamStmt.from_dict(stmt)
assert str(ad) == "<Aperture Definition: 0: circle>"
stmt = {"param": "AD", "d": 0, "shape": "R"}
ad = ADParamStmt.from_dict(stmt)
assert str(ad) == "<Aperture Definition: 0: rectangle>"
stmt = {"param": "AD", "d": 0, "shape": "O"}
ad = ADParamStmt.from_dict(stmt)
assert str(ad) == "<Aperture Definition: 0: obround>"
stmt = {"param": "AD", "d": 0, "shape": "test"}
ad = ADParamStmt.from_dict(stmt)
assert str(ad) == "<Aperture Definition: 0: test>"
def test_MIParamStmt_factory():
stmt = {"param": "MI", "a": 1, "b": 1}
mi = MIParamStmt.from_dict(stmt)
assert mi.a == 1
assert mi.b == 1
def test_MIParamStmt_dump():
stmt = {"param": "MI", "a": 1, "b": 1}
mi = MIParamStmt.from_dict(stmt)
assert mi.to_gerber() == "%MIA1B1*%"
stmt = {"param": "MI", "a": 1}
mi = MIParamStmt.from_dict(stmt)
assert mi.to_gerber() == "%MIA1B0*%"
stmt = {"param": "MI", "b": 1}
mi = MIParamStmt.from_dict(stmt)
assert mi.to_gerber() == "%MIA0B1*%"
def test_MIParamStmt_string():
stmt = {"param": "MI", "a": 1, "b": 1}
mi = MIParamStmt.from_dict(stmt)
assert str(mi) == "<Image Mirror: A=1 B=1>"
stmt = {"param": "MI", "b": 1}
mi = MIParamStmt.from_dict(stmt)
assert str(mi) == "<Image Mirror: A=0 B=1>"
stmt = {"param": "MI", "a": 1}
mi = MIParamStmt.from_dict(stmt)
assert str(mi) == "<Image Mirror: A=1 B=0>"
def test_coordstmt_ctor():
cs = CoordStmt("G04", 0.0, 0.1, 0.2, 0.3, "D01", FileSettings())
assert cs.function == "G04"
assert cs.x == 0.0
assert cs.y == 0.1
assert cs.i == 0.2
assert cs.j == 0.3
assert cs.op == "D01"
def test_coordstmt_factory():
stmt = {
"function": "G04",
"x": "0",
"y": "001",
"i": "002",
"j": "003",
"op": "D01",
}
cs = CoordStmt.from_dict(stmt, FileSettings())
assert cs.function == "G04"
assert cs.x == 0.0
assert cs.y == 0.1
assert cs.i == 0.2
assert cs.j == 0.3
assert cs.op == "D01"
def test_coordstmt_dump():
cs = CoordStmt("G04", 0.0, 0.1, 0.2, 0.3, "D01", FileSettings())
assert cs.to_gerber(FileSettings()) == "G04X0Y001I002J003D01*"
def test_coordstmt_conversion():
cs = CoordStmt("G71", 25.4, 25.4, 25.4, 25.4, "D01", FileSettings())
cs.units = "metric"
# No effect
cs.to_metric()
assert cs.x == 25.4
assert cs.y == 25.4
assert cs.i == 25.4
assert cs.j == 25.4
assert cs.function == "G71"
cs.to_inch()
assert cs.units == "inch"
assert cs.x == 1.0
assert cs.y == 1.0
assert cs.i == 1.0
assert cs.j == 1.0
assert cs.function == "G70"
# No effect
cs.to_inch()
assert cs.x == 1.0
assert cs.y == 1.0
assert cs.i == 1.0
assert cs.j == 1.0
assert cs.function == "G70"
cs = CoordStmt("G70", 1.0, 1.0, 1.0, 1.0, "D01", FileSettings())
cs.units = "inch"
# No effect
cs.to_inch()
assert cs.x == 1.0
assert cs.y == 1.0
assert cs.i == 1.0
assert cs.j == 1.0
assert cs.function == "G70"
cs.to_metric()
assert cs.x == 25.4
assert cs.y == 25.4
assert cs.i == 25.4
assert cs.j == 25.4
assert cs.function == "G71"
# No effect
cs.to_metric()
assert cs.x == 25.4
assert cs.y == 25.4
assert cs.i == 25.4
assert cs.j == 25.4
assert cs.function == "G71"
def test_coordstmt_offset():
c = CoordStmt("G71", 0, 0, 0, 0, "D01", FileSettings())
c.offset(1, 0)
assert c.x == 1.0
assert c.y == 0.0
assert c.i == 1.0
assert c.j == 0.0
c.offset(0, 1)
assert c.x == 1.0
assert c.y == 1.0
assert c.i == 1.0
assert c.j == 1.0
def test_coordstmt_string():
cs = CoordStmt("G04", 0, 1, 2, 3, "D01", FileSettings())
assert (
str(cs) == "<Coordinate Statement: Fn: G04 X: 0 Y: 1 I: 2 J: 3 Op: Lights On>"
)
cs = CoordStmt("G04", None, None, None, None, "D02", FileSettings())
assert str(cs) == "<Coordinate Statement: Fn: G04 Op: Lights Off>"
cs = CoordStmt("G04", None, None, None, None, "D03", FileSettings())
assert str(cs) == "<Coordinate Statement: Fn: G04 Op: Flash>"
cs = CoordStmt("G04", None, None, None, None, "TEST", FileSettings())
assert str(cs) == "<Coordinate Statement: Fn: G04 Op: TEST>"
def test_aperturestmt_ctor():
ast = ApertureStmt(3, False)
assert ast.d == 3
assert ast.deprecated == False
ast = ApertureStmt(4, True)
assert ast.d == 4
assert ast.deprecated == True
ast = ApertureStmt(4, 1)
assert ast.d == 4
assert ast.deprecated == True
ast = ApertureStmt(3)
assert ast.d == 3
assert ast.deprecated == False
def test_aperturestmt_dump():
ast = ApertureStmt(3, False)
assert ast.to_gerber() == "D3*"
ast = ApertureStmt(3, True)
assert ast.to_gerber() == "G54D3*"
assert str(ast) == "<Aperture: 3>"
| python |
import torch.nn as nn
import torch.nn.functional as F
import curves
__all__ = ['WideResNet28x10']
def conv3x3(in_planes, out_planes, stride=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True)
def conv3x3curve(in_planes, out_planes, fix_points, stride=1):
return curves.Conv2d(in_planes, out_planes, kernel_size=3, fix_points=fix_points, stride=stride,
padding=1, bias=True)
class WideBasic(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, stride=1):
super(WideBasic, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True),
)
def forward(self, x):
out = self.dropout(self.conv1(F.relu(self.bn1(x))))
out = self.conv2(F.relu(self.bn2(out)))
out += self.shortcut(x)
return out
class WideBasicCurve(nn.Module):
def __init__(self, in_planes, planes, dropout_rate, fix_points, stride=1):
super(WideBasicCurve, self).__init__()
self.bn1 = curves.BatchNorm2d(in_planes, fix_points=fix_points)
self.conv1 = curves.Conv2d(in_planes, planes, kernel_size=3, padding=1, bias=True,
fix_points=fix_points)
self.dropout = nn.Dropout(p=dropout_rate)
self.bn2 = curves.BatchNorm2d(planes, fix_points=fix_points)
self.conv2 = curves.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1,
bias=True, fix_points=fix_points)
self.shortcut = None
if stride != 1 or in_planes != planes:
self.shortcut = curves.Conv2d(in_planes, planes, kernel_size=1, stride=stride,
bias=True, fix_points=fix_points)
def forward(self, x, coeffs_t):
out = self.dropout(self.conv1(F.relu(self.bn1(x, coeffs_t)), coeffs_t))
out = self.conv2(F.relu(self.bn2(out, coeffs_t)), coeffs_t)
residual = x
if self.shortcut is not None:
residual = self.shortcut(x, coeffs_t)
out += residual
return out
class WideResNetBase(nn.Module):
def __init__(self, num_classes, depth=28, widen_factor=10, dropout_rate=0.):
super(WideResNetBase, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3(3, nstages[0])
self.layer1 = self._wide_layer(WideBasic, nstages[1], n, dropout_rate, stride=1)
self.layer2 = self._wide_layer(WideBasic, nstages[2], n, dropout_rate, stride=2)
self.layer3 = self._wide_layer(WideBasic, nstages[3], n, dropout_rate, stride=2)
self.bn1 = nn.BatchNorm2d(nstages[3], momentum=0.9)
self.linear = nn.Linear(nstages[3], num_classes)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, dropout_rate, stride))
self.in_planes = planes
return nn.Sequential(*layers)
def forward(self, x):
out = self.conv1(x)
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.relu(self.bn1(out))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class WideResNetCurve(nn.Module):
def __init__(self, num_classes, fix_points, depth=28, widen_factor=10, dropout_rate=0.):
super(WideResNetCurve, self).__init__()
self.in_planes = 16
assert ((depth - 4) % 6 == 0), 'Wide-resnet depth should be 6n+4'
n = (depth - 4) / 6
k = widen_factor
nstages = [16, 16 * k, 32 * k, 64 * k]
self.conv1 = conv3x3curve(3, nstages[0], fix_points=fix_points)
self.layer1 = self._wide_layer(WideBasicCurve, nstages[1], n, dropout_rate, stride=1,
fix_points=fix_points)
self.layer2 = self._wide_layer(WideBasicCurve, nstages[2], n, dropout_rate, stride=2,
fix_points=fix_points)
self.layer3 = self._wide_layer(WideBasicCurve, nstages[3], n, dropout_rate, stride=2,
fix_points=fix_points)
self.bn1 = curves.BatchNorm2d(nstages[3], momentum=0.9, fix_points=fix_points)
self.linear = curves.Linear(nstages[3], num_classes, fix_points=fix_points)
def _wide_layer(self, block, planes, num_blocks, dropout_rate, stride, fix_points):
strides = [stride] + [1] * int(num_blocks - 1)
layers = []
for stride in strides:
layers.append(
block(self.in_planes, planes, dropout_rate, fix_points=fix_points, stride=stride)
)
self.in_planes = planes
return nn.ModuleList(layers)
def forward(self, x, coeffs_t):
out = self.conv1(x, coeffs_t)
for block in self.layer1:
out = block(out, coeffs_t)
for block in self.layer2:
out = block(out, coeffs_t)
for block in self.layer3:
out = block(out, coeffs_t)
out = F.relu(self.bn1(out, coeffs_t))
out = F.avg_pool2d(out, 8)
out = out.view(out.size(0), -1)
out = self.linear(out, coeffs_t)
return out
class WideResNet28x10:
base = WideResNetBase
curve = WideResNetCurve
kwargs = {'depth': 28, 'widen_factor': 10}
| python |
import numpy as np
from sklearn.preprocessing import RobustScaler
def normalize(_A, mask=None, norm_0mean=False):
"""Norm A (MRI-T2): filtering top 0.1% values by assigning them to the top_thr (the value at the 99th percentage)
then map values to [0 1] range by dividing by the max intensity within the prostate for each slide"""
thr = .01 # .01
mask = np.ones_like(_A) if mask is None else mask
if not norm_0mean:
x = np.zeros_like(_A)
for c in range(_A.shape[-1]):
for i in range(_A.shape[0]):
tmp = _A[i, ..., c][mask[i, ..., 0] > 0].reshape((-1, 1))
tmp_n = RobustScaler().fit_transform(X=tmp)[..., 0]
tmp_n1 = x[i, ..., c]
tmp_n1[np.where(mask[i, ..., 0] == 1)] = tmp_n
x[i, ..., c] = tmp_n1
_A = x.copy()
else:
x = np.zeros_like(_A)
for c in range(_A.shape[-1]):
mu = np.asarray([_A[i, ..., c][mask[i, ..., 0] == 1].mean() for i in range(_A.shape[0])])
sigma = np.asarray([_A[i, ..., c][mask[i, ..., 0] == 1].std() for i in range(_A.shape[0])])
_A[..., c] = ((_A[..., c] - mu[..., np.newaxis, np.newaxis]) / sigma[..., np.newaxis, np.newaxis]) * \
mask[..., 0]
return _A | python |
"""
data:{coauthorship, coauthor}
dataset:{cora, citeseer, pubmed}
"""
problem = 'coauthorship'
dataset = 'cora'
datasetroot = '../data/' + problem + '/' + dataset + '/'
"""
Configuration of the Network
num_class = {cora: 7, citeseer: }
"""
hidden_dim = 400
out_dim = 200
num_class = 7
"""
For training
"""
update_ratio = 0.004
seed = None
refit = 0
| python |
import torch
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np
from Gym.models.QLearningBase import QLearningBase
class QLearning(QLearningBase):
def __init__(
self,
device,
n_actions,
n_features,
learning_rate=0.01,
gamma=0.9,
tau=0.001,
updateTargetFreq=10000,
epsilonStart=1,
epsilonEnd=0.2,
epsilonDecayFreq=1000,
mSize=10000,
batchSize=200,
startTrainSize=100,
transforms=None,
):
netEval = Net(n_features, n_actions)
netTarget = Net(n_features, n_actions)
# optimizer 是訓練的工具
# 傳入 net 的所有參數, 學習率
optimizer = torch.optim.Adam(netEval.parameters(), lr=learning_rate)
super().__init__(
device=device,
netEval=netEval,
netTarget=netTarget,
optimizer=optimizer,
n_actions=n_actions,
learning_rate=learning_rate,
gamma=gamma,
tau=tau,
updateTargetFreq=updateTargetFreq,
epsilonStart=epsilonStart,
epsilonEnd=epsilonEnd,
epsilonDecayFreq=epsilonDecayFreq,
mSize=mSize,
batchSize=batchSize,
startTrainSize=startTrainSize,
transforms=transforms,
)
def choose_action(self, state):
action = super().choose_action(state)
return action, action
class Net(torch.nn.Module):
def __init__(self, img_shape, n_actions):
super(Net, self).__init__()
# 定義每層用什麼樣的形式
in_channels = img_shape[2]
h = img_shape[0]
w = img_shape[1]
kernel_size = 8
stride = 4
padding = 0
self.conv1 = torch.nn.Conv2d(
in_channels, 32, kernel_size=kernel_size, stride=stride, padding=padding
)
h = (h + padding * 2 - kernel_size) // stride + 1
w = (w + padding * 2 - kernel_size) // stride + 1
# self.pool1 = torch.nn.MaxPool2d(2) # 32 x (h-2)//2 x (w-2)//2
# h //= 2
# w //= 2
kernel_size = 4
stride = 2
padding = 0
self.conv2 = torch.nn.Conv2d(
32, 64, kernel_size=kernel_size, stride=stride, padding=padding
)
h = (h + padding * 2 - kernel_size) // stride + 1
w = (w + padding * 2 - kernel_size) // stride + 1
kernel_size = 3
stride = 1
padding = 0
self.conv3 = torch.nn.Conv2d(
64, 64, kernel_size=kernel_size, stride=stride, padding=padding
)
h = (h + padding * 2 - kernel_size) // stride + 1
w = (w + padding * 2 - kernel_size) // stride + 1
# self.pool2 = torch.nn.MaxPool2d(2) # 64 x ((h-2)//2-2)//2 x ((w-2)//2-2)//2
# h //= 2
# w //= 2
self.fc1 = torch.nn.Linear(64 * h * w, 512)
self.fc2 = torch.nn.Linear(512, n_actions)
# self.dropout = torch.nn.Dropout(p=0.5)
def forward(self, x): # 這同時也是 Module 中的 forward 功能
# 正向傳播輸入值, 神經網絡分析出輸出值
# x = self.pool1(F.relu(self.conv1(x)))
# x = self.pool2(F.relu(self.conv2(x)))
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = x.view(x.shape[0], -1)
# x = self.dropout(x)
x = F.relu(self.fc1(x))
# x = self.dropout(x)
x = self.fc2(x)
return x
| python |
'''
Author: jianzhnie
Date: 2021-12-28 10:13:05
LastEditTime: 2021-12-28 10:20:24
LastEditors: jianzhnie
Description:
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Optimizer
KD_loss = nn.KLDivLoss(reduce='mean')
def kd_step(teacher: nn.Module, student: nn.Module, temperature: float,
inputs: torch.tensor, optimizer: Optimizer):
teacher.eval()
student.train()
with torch.no_grad():
logits_t = teacher(inputs=inputs)
logits_s = student(inputs=inputs)
loss = KD_loss(input=F.log_softmax(logits_s / temperature, dim=-1),
target=F.log_softmax(logits_t / temperature, dim=-1))
loss.backward()
optimizer.step()
optimizer.zero_grad()
return loss
| python |
import pandas as pd
import os
input = __import__('sys').stdin.readline
raw_data = []
for _ in range(44):
tmp = ["", ""]
tmp[0] = float(input())
tmp[1] = input().strip()
raw_data.append(tmp)
for x in raw_data:
print(x)
print(len(raw_data))
try:
dir_path = os.path.abspath(__file__) + "/data/"
file_name = "task3.xlsx"
df = pd.DataFrame.from_records(raw_data)
if not os.path.isdir(dir_path):
os.mkdir(dir_path)
df.to_excel(dir_path + file_name, sheet_name="Sheet1", index=False)
print("fin")
except OSError as e:
print(e.__traceback__) | python |
import pathlib
import argparse
import shutil
import pytest
import numpy as np
from PIL import Image
from src import image_averager
@pytest.fixture
def test_image_dir(tmpdir):
test_images = pathlib.Path(__file__).parent / 'data' / 'test_images'
target = pathlib.Path(tmpdir) / 'images'
shutil.copytree(test_images, target)
return target
@pytest.fixture
def averaged_image():
return pathlib.Path(__file__).parent / 'data' / 'expected_result.png'
def test_existing_directory(tmpdir):
assert image_averager.existing_directory(str(tmpdir)) == pathlib.Path(tmpdir)
with pytest.raises(argparse.ArgumentTypeError):
image_averager.existing_directory('/not/a/path')
def test_build_average_image(test_image_dir, tmpdir, averaged_image):
result = image_averager.build_average_image(test_image_dir)
# outpath = pathlib.Path(tmpdir) / 'result.png'
# result.save(outpath)
expected = np.array(Image.open(averaged_image))
np.testing.assert_array_equal(expected, np.array(result))
def test_cli(test_image_dir, tmpdir, capsys):
outpath = pathlib.Path(tmpdir / "result.png")
args = f'-s {test_image_dir} -o {outpath} -l{"DEBUG"}'
image_averager.main(args.split())
out, err = capsys.readouterr()
assert not out
assert err == 'INFO averaging 3 images.\n'
assert outpath.exists()
| python |
from errors import *
from parse import *
from nodes import *
from func import *
from loop import *
from ifelse import *
class FlatNode(object):
pass
class Code(FlatNode):
def __init__(self, words):
self.words = words
class GoTo(FlatNode):
def __init__(self, index):
self.index = index
class Branch(FlatNode):
"""Branch-If-Zero object"""
def __init__(self, index):
self.index = index
class LabdaNode(FlatNode):
def __init__(self, index):
self.index = index
class Marker(object):
pass
class SingleInstruction(object):
def __init__(self, opcode, ref):
self.opcode = opcode
self.ref = ref
def __repr__(self):
return str(self.opcode) + ' ' + str(self.ref)
def flatten(tree, acc=None):
if acc is None:
acc = []
if isinstance(tree, list):
for branch in tree:
flatten(branch, acc)
return acc
for branch in tree.children:
if isinstance(branch, list):
for b in branch:
flatten(b, acc)
if isinstance(branch, Statement):
acc.append(SingleInstruction('LINE_NUMBER', branch.linenr))
if isinstance(branch, Word):
if acc and isinstance(acc[-1], Code):
acc[-1].words.append(branch)
else:
acc.append(Code([branch]))
elif isinstance(branch, WordList):
if isinstance(branch, Line):
acc.append(SingleInstruction('LINE_NUMBER', branch.linenr))
if acc and isinstance(acc[-1], Code):
acc[-1].words.extend(branch.children)
else:
acc.append(Code(list(branch.children)))
elif isinstance(branch, LabdaStatement):
m = Marker()
acc.append(LabdaNode(m))
for argument in branch.arguments:
acc.append(SingleInstruction('SET_LOCAL', argument))
flatten(branch.body, acc)
acc.append(SingleInstruction('RETURN', 0))
acc.append(m)
if isinstance(branch, LocalFuncStatement):
acc.append(SingleInstruction('SET_LOCAL', branch.name))
elif isinstance(branch, FuncStatement):
name = branch.name
if '!' in name:
if name.count('!') > 1 or name.endswith('!'):
raise DejaSyntaxError('methods need exactly one method name')
if name.startswith('!'):
name = 'eva' + name
base, method = name.split('!')
acc.append(SingleInstruction('PUSH_LITERAL', method))
acc.append(SingleInstruction('PUSH_WORD', base))
acc.append(SingleInstruction('SET_DICT', 0))
else:
acc.append(SingleInstruction('SET_GLOBAL', name))
elif isinstance(branch, WhileStatement):
m1 = Marker()
m2 = Marker()
acc.append(SingleInstruction('ENTER_SCOPE', 0))
acc.append(m1)
flatten(branch.conditionclause, acc)
acc.append(Branch(m2))
flatten(branch.body, acc)
acc.append(GoTo(m1))
acc.append(m2)
acc.append(SingleInstruction('LEAVE_SCOPE', 0))
elif isinstance(branch, ForStatement):
m1 = Marker()
m2 = Marker()
flatten(branch.forclause, acc)
acc.append(m1)
acc.append(SingleInstruction('DUP', 0))
acc.append(Branch(m2))
acc.append(SingleInstruction('ENTER_SCOPE', 0))
acc.append(SingleInstruction('SET_LOCAL', '#f'))
acc.append(SingleInstruction('SET_LOCAL', '#h'))
acc.append(SingleInstruction('SET_LOCAL', branch.countername))
flatten(branch.body, acc)
acc.append(SingleInstruction('PUSH_WORD', '#h'))
acc.append(SingleInstruction('PUSH_WORD', '#f'))
acc.append(SingleInstruction('LEAVE_SCOPE', 0))
acc.append(GoTo(m1))
acc.append(m2)
acc.append(SingleInstruction('DROP', 0))
elif isinstance(branch, RepeatStatement):
m1 = Marker()
m2 = Marker()
flatten(branch.forclause, acc)
acc.append(SingleInstruction('ENTER_SCOPE', 0))
acc.append(SingleInstruction('SET_LOCAL', '#r'))
acc.append(m1)
acc.append(SingleInstruction('PUSH_WORD', '#r'))
acc.append(Branch(m2))
flatten(branch.body, acc)
acc.append(SingleInstruction('PUSH_WORD', '#r'))
acc.append(SingleInstruction('PUSH_WORD', '--'))
acc.append(SingleInstruction('SET_LOCAL', '#r'))
acc.append(GoTo(m1))
acc.append(m2)
acc.append(SingleInstruction('LEAVE_SCOPE', 0))
elif isinstance(branch, IfStatement):
m_end = Marker()
m = Marker()
acc.append(SingleInstruction('ENTER_SCOPE', 0))
flatten(branch.ifclause.conditionclause, acc)
acc.append(Branch(m))
flatten(branch.ifclause, acc)
acc.append(GoTo(m_end))
acc.append(m)
for elseifclause in branch.elseifclauses:
m = Marker()
flatten(elseifclause.conditionclause, acc)
acc.append(Branch(m))
flatten(elseifclause, acc)
acc.append(GoTo(m_end))
acc.append(m)
if branch.elseclause:
flatten(branch.elseclause, acc)
acc.append(m_end)
acc.append(SingleInstruction('LEAVE_SCOPE', 0))
elif isinstance(branch, TryStatement):
m_body = Marker()
m_end = Marker()
acc.append(SingleInstruction('ENTER_ERRHAND', m_body))
for handler in branch.catchclauses:
h_start = Marker()
h_end = Marker()
for ex in handler.exceptions:
acc.extend([
SingleInstruction('DUP', 0),
SingleInstruction('PUSH_LITERAL', ex),
SingleInstruction('JMPEQ', h_start),
])
acc.pop()
acc.extend([
SingleInstruction('JMPNE', h_end),
h_start,
SingleInstruction('DROP', 0),
])
flatten(handler, acc)
acc.extend([GoTo(m_end), h_end])
acc.append(SingleInstruction('RERAISE', 0))
acc.append(m_body)
flatten(branch.tryclause, acc)
acc.append(SingleInstruction('LEAVE_ERRHAND', 0))
acc.append(m_end)
return acc
| python |
# Generated by Django 3.0.4 on 2020-03-17 17:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('logomaker', '0002_category_image'),
]
operations = [
migrations.CreateModel(
name='logo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('logoname', models.CharField(max_length=100)),
('logoimage', models.ImageField(default='mypic', upload_to='upload/')),
],
),
migrations.RemoveField(
model_name='category',
name='image',
),
]
| python |
# -*- coding: utf-8 -*-
"""
TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-节点管理(BlueKing-BK-NODEMAN) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at https://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
from django.utils.translation import ugettext_lazy as _
from apps.exceptions import AppBaseException
class BackendBaseException(AppBaseException):
MODULE_CODE = 2000
class UploadPackageNotExistError(BackendBaseException):
MESSAGE = _("文件包不存在")
ERROR_CODE = 1
class JobNotExistError(BackendBaseException):
MESSAGE = _("任务不存在")
ERROR_CODE = 2
class StopDebugError(BackendBaseException):
MESSAGE = _("停止调试失败")
ERROR_CODE = 3
class PluginNotExistError(BackendBaseException):
MESSAGE = _("插件包不存在")
MESSAGE_TPL = _("插件包[{plugin_name}-{os_type}-{cpu_arch}]不存在")
ERROR_CODE = 4
class PackageStatusOpError(BackendBaseException):
MESSAGE = _("插件包状态变更错误")
ERROR_CODE = 5
class PackageVersionValidationError(BackendBaseException):
MESSAGE = _("插件包版本校验错误")
ERROR_CODE = 6
class GenCommandsError(BackendBaseException):
MESSAGE = _("安装命令生成失败")
ERROR_CODE = 7
class GseEncryptedError(BackendBaseException):
MESSAGE = _("GSE敏感信息加密失败")
ERROR_CODE = 8
class PluginParseError(BackendBaseException):
MESSAGE = _("插件解析错误")
ERROR_CODE = 9
class CreatePackageRecordError(BackendBaseException):
MESSAGE = _("归档插件包信息错误")
ERROR_CODE = 10
| python |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, emperor development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file LICENSE.md, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import division
from skbio import OrdinationResults
from skbio.io import FileFormatError, IOSourceError
from emperor.qiime_backports.parse import parse_coords as qiime_parse_coords
def parse_coords(lines):
"""Parse skbio's ordination results file into coords, labels, eigvals,
pct_explained.
Returns:
- list of sample labels in order
- array of coords (rows = samples, cols = axes in descending order)
- list of eigenvalues
- list of percent variance explained
For the file format check
skbio.stats.ordination.OrdinationResults.read
Strategy: read the file using skbio's parser and return the objects
we want
"""
try:
pcoa_results = OrdinationResults.read(lines)
return (pcoa_results.samples.index.tolist(),
pcoa_results.samples.values, pcoa_results.eigvals.values,
pcoa_results.proportion_explained.values)
except (FileFormatError, IOSourceError):
try:
lines.seek(0)
except AttributeError:
# looks like we have a list of lines, not a file-like object
pass
return qiime_parse_coords(lines)
| python |
rounds = ['chicken', 'ribs', 'pork', 'brisket']
class Table:
def __init__(self, id=1, limit=6):
self.id = id
self.limit = limit
self.boxes = {
'chicken': [],
'ribs': [],
'pork': [],
'brisket': [],
}
def add_box(self, round, box):
self.boxes[round].append(box)
def has_box(self, box):
return any(box in self.boxes[rnd] for rnd in rounds)
def can_take(self, round, box):
return not self.has_box(box) and self.limit > len(self.boxes[round])
| python |
import json
import time
import urllib.parse
import argparse
from http.server import HTTPServer, BaseHTTPRequestHandler
class PaulusHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.end_headers()
self.wfile.write(form_html(args.questions).encode("utf-8"))
def do_POST(self):
content_length = int(self.headers.get("Content-Length"))
body = self.rfile.read(content_length).decode("utf-8")
form_data = parse_form_data(body)
with open(args.output, "a") as file:
file.write(json.dumps(form_data) + "\n")
self.send_response(200)
self.end_headers()
self.wfile.write("Merci!".encode("utf-8"))
def run():
server_address = ('', args.port)
httpd = HTTPServer(server_address, PaulusHandler)
httpd.serve_forever()
def parse_args():
parser = argparse.ArgumentParser(description="Paulus")
parser.add_argument(
'--port', type=int,
default=8000,
help="Port to start the server on"
)
parser.add_argument(
'--questions',
type=str,
default="questions.txt",
help="File that contains newline-separated questions"
)
parser.add_argument(
'--output',
type=str,
default="paulus.json",
help="File to append poll data to"
)
return parser.parse_args()
def parse_form_data(string):
form_data = {"time": int(time.time())}
for line in string.split("&"):
[key, val] = line.split("=")
parsed_key = urllib.parse.unquote_plus(key).strip()
parsed_val = urllib.parse.unquote_plus(val).strip()
form_data[parsed_key] = parsed_val
return form_data
def form_html(questions_file):
questions = []
with open(questions_file, "r") as file:
questions = file.readlines()
questions_html = ""
for question in questions:
questions_html += f"""
<div class="form-question">
<label for="{question}">{question}</label>
<div>
<input type="checkbox" name="{question}" id="{question}"/>
</div>
</div>
"""
style = """
.form-question {
display: flex;
width: 100%;
padding-bottom: 0.5em;
}
.form-question > * {
display: block;
width: 50%;
}
.form-question > label {
text-align: right;
margin-right: 10px;
}
input[type=submit] {
position: relative;
left: 50%;
}
"""
message = f"""
<!DOCTYPE html>
<html>
<head>
<title>Paulus</title>
</head>
<body>
<h1>Paulus</h1>
<form action="" method="post">
{questions_html}
<div class="form-question">
<label for="comment">comment</label>
<textarea name="comment" id="comment"></textarea>
</div>
<input type="submit" value="Submit" />
</form>
<style>
{style}
</style>
</body>
</html>
"""
return message
if __name__ == "__main__":
args = parse_args()
run()
| python |
import cPickle as pickle
import zlib
""" Compressed version of pickle """
def zdumps(obj, compression_level = 3):
return zlib.compress(pickle.dumps(obj,pickle.HIGHEST_PROTOCOL),compression_level)
def zloads(zstr):
return pickle.loads(zlib.decompress(zstr))
def dump(obj,path):
compr = zdumps(obj)
with open(path,"wb") as fp:
fp.write(compr)
def load(path):
with open(path,"rb") as fp:
compr = fp.read()
return zloads(compr)
| python |
from .pointnet2_head import PointNet2Head
__all__ = ['PointNet2Head']
| python |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
# vim: ts=4 sts=4 sw=4 tw=79 sta et
"""%prog [options]
Python source code - @todo
This implements the code to store and save data about tweets
IMPORTANT NOTE: All times are in UTC. They must be either naive and represent
UTC or contain valid tzinfo.
"""
__author__ = 'Patrick Butler'
__email__ = '[email protected]'
import pycassa
import datetime
from ..utils import now, dt_to_ts, uuid_to_dt
from pycassa.util import convert_time_to_uuid
class InvalidDefinitionException(Exception):
pass
class ColumnFamily(object):
"""Record a set of numerical stats"""
name = None
columns = []
super = False
def __init__(self, pool):
"""@todo: to be defined
:param pool: the connection pool with keypace to use
"""
if self.__class__.name is None:
raise InvalidDefinitionException("Name undefined in class: " +
self.__class__.__name__)
self._pool = pool
self._cf = pycassa.ColumnFamily(self._pool, self.__class__.name)
@classmethod
def _get_class_keys(cls):
"""@todo: Docstring for __get_class_keys
:param arg1: @todo
:returns: @todo
"""
arg_keys = {i: i for i in [
"comparator_type", "subcomparator_type", "merge_shards_chance",
"column_validation_classes", "key_cache_size", "row_cache_size",
"gc_grace_seconds", "read_repair_chance", "comment"
"default_validation_class", "key_validation_class",
"min_compaction_threshold", "max_compaction_threshold",
"key_cache_save_period_in_seconds", "replicate_on_write",
"row_cache_save_period_in_seconds", "compaction_strategy_options",
"row_cache_provider", "key_alias", "compaction_strategy",
"row_cache_keys_to_save", "compression_options",
]}
arg_keys.update({'default_validation_class': 'default_type',
'key_validation_class': 'key_type',
'comparator_type': 'column_name_type',
'subcomparator_type': 'subcolumn_name_type',
})
kwargs = {}
for pc_arg, cls_arg in arg_keys.iteritems():
if hasattr(cls, cls_arg):
kwargs[pc_arg] = getattr(cls, cls_arg)
kwargs['column_validation_classes'] = cls.columns \
if cls.columns else None
kwargs['super'] = cls.super
return kwargs
@classmethod
def create(cls, sys, keyspace):
"""@todo: Docstring for create_cf
:param sys: @todo
:param keysapce: @todo
:returns: @todo
"""
kwargs = cls._get_class_keys()
if cls.name not in sys.get_keyspace_column_families(keyspace).keys():
sys.create_column_family(keyspace, cls.name, **kwargs)
@classmethod
def alter(cls, sys, keyspace):
"""@todo: Docstring for create_cf
:param sys: @todo
:param keysapce: @todo
:returns: @todo
"""
kwargs = cls._get_class_keys()
for k in ["super", "comparator_type", "subcomparator_type",
"key_validation_class"]:
if k in kwargs:
del kwargs[k]
sys.alter_column_family(keyspace, cls.name, **kwargs)
@classmethod
def create_or_alter(cls, sys, keyspace):
"""@todo: Docstring for create_cf
:param sys: @todo
:param keysapce: @todo
:returns: @todo
"""
if cls.name not in sys.get_keyspace_column_families(keyspace).keys():
cls.create(sys, keyspace)
else:
cls.alter(sys, keyspace)
def batch(self):
return self._cf.batch()
def insert(self, *args, **kwargs):
batch = kwargs.get('batch')
if batch is None:
self._cf.insert(*args, **kwargs)
else:
del kwargs['batch']
batch.insert(*args, **kwargs)
def remove(self, *args, **kwargs):
batch = kwargs.get('batch')
if batch is None:
self._cf.remove(*args, **kwargs)
else:
del kwargs['batch']
batch.remove(*args, **kwargs)
def get(self, *args, **kwargs):
self._cf.get(*args, **kwargs)
def xget(self, *args, **kwargs):
self._cf.xget(*args, **kwargs)
class WideTimeColumnFamily(ColumnFamily):
"""A generic class for storingnumerical stats
start
interval
"""
column_name_type = pycassa.types.TimeUUIDType()
def __init__(self, pool):
"""@todo: to be defined
:param pool: the connection pool with keypace to use
"""
cls = self.__class__
super(WideTimeColumnFamily, self).__init__(pool)
if not hasattr(cls, 'start_ts'):
self.__class__.start_ts = dt_to_ts(self.__class__.start)
def row_key(self, _time):
cls = self.__class__
_time = int(dt_to_ts(_time) - cls.start_ts)
_time //= cls.interval
return str(int(_time))
def col_key(self, _time):
return convert_time_to_uuid(_time, randomize=True)
def insert(self, time, data, batch=None):
rkey = self.row_key(time)
ckey = convert_time_to_uuid(time, randomize=True)
if batch is None:
self._cf.insert(rkey, {ckey: data})
else:
batch.insert(rkey, {ckey: data})
return ckey
def remove(self, uuid):
t = uuid_to_dt(uuid)
k = self.row_key(t)
col_type = "columns"
if self.__class__.super:
col_type = "super_column"
self._cf.remove(k, **{col_type: uuid})
def xget(self, start=None, stop=None, bsize=1000):
cls = self.__class__
if start is None:
start = cls.start
if stop is None:
stop = now()
place = start
while True: # start <= stop:
kstart = self.row_key(place)
total = self._cf.get_count(kstart,
column_start=start,
column_finish=stop,)
s = start
seen = 0
while seen < total:
tmp = self._cf.get(kstart, column_start=s,
column_finish=stop,
column_count=bsize)
itr = tmp.iteritems()
if seen > 0: # column start/finish are inclusive so skip
itr.next()
for k, v in itr:
yield k, v
s = max(s, uuid_to_dt(k))
seen += 1
start = s
if place > stop:
break
place += datetime.timedelta(seconds=cls.interval)
return
def get(self, start=None, stop=None, bsize=1000):
return list(self.xget(start, stop, bsize))
class CounterColumnFamily(WideTimeColumnFamily):
super = False
default_type = pycassa.COUNTER_COLUMN_TYPE
column_name_type = pycassa.DATE_TYPE
#column_name_type = pycassa.types.TimeUUIDType()
sub_interval = "m"
def row_key(self, name, _time):
cls = self.__class__
_time = int(dt_to_ts(_time) - cls.start_ts)
_time //= cls.interval
return name + "|" + str(int(_time))
def col_key(self, name, _time):
si = self.__class__.sub_interval
if si == "m":
_time = _time.replace(second=0, microsecond=0)
elif si == "h":
_time = _time.replace(minute=0, second=0, microsecond=0)
elif si == "d":
_time = _time.replace(hour=0, minute=0, second=0, microsecond=0)
return _time
def add(self, name, value=1, _time=None):
if _time is None:
_time = now()
rkey = self.row_key(name, _time)
ckey = self.col_key(name, _time)
self._cf.add(rkey, ckey, value)
def get_value(self, name, _time):
rkey = self.row_key(name, _time)
ckey = self.col_key(name, _time)
try:
return self._cf.get(rkey, [ckey]).values()[0]
except pycassa.cassandra.c10.ttypes.NotFoundException:
return 0
class StatColumnFamily(WideTimeColumnFamily):
super = False
default_type = pycassa.INT_TYPE
column_name_type = pycassa.DATE_TYPE
#column_name_type = pycassa.types.TimeUUIDType()
sub_interval = "m"
def row_key(self, name, _time):
cls = self.__class__
_time = int(dt_to_ts(_time) - cls.start_ts)
_time //= cls.interval
return name + "|" + str(int(_time))
def col_key(self, name, _time):
si = self.__class__.sub_interval
if si == "m":
_time = _time.replace(second=0, microsecond=0)
elif si == "h":
_time = _time.replace(minute=0, second=0, microsecond=0)
elif si == "d":
_time = _time.replace(hour=0, minute=0, second=0, microsecond=0)
return _time
def insert(self, name, value, _time=None, batch=None):
if _time is None:
_time = now()
rkey = self.row_key(name, _time)
ckey = self.col_key(name, _time)
if batch is None:
self._cf.insert(rkey, {ckey: value})
else:
batch.insert(rkey, {ckey: value})
def get(self, *args, **kwargs):
return list(self.xget(*args, **kwargs))
def xget(self, name, start=None, stop=None, bsize=1000):
cls = self.__class__
if start is None:
start = cls.start
if stop is None:
stop = now()
place = start
while True: # start <= stop:
kstart = self.row_key(name, place)
total = self._cf.get_count(kstart,
column_start=start,
column_finish=stop,)
s = start
seen = 0
while seen < total:
tmp = self._cf.get(kstart, column_start=s,
column_finish=stop,
column_count=bsize)
itr = tmp.iteritems()
if seen > 0: # column start/finish are inclusive so skip
itr.next()
for k, v in itr:
yield k, v
s = max(s, k) # uuid_to_dt(k))
seen += 1
start = s
if place > stop:
break
place += datetime.timedelta(seconds=cls.interval)
return
#def get_value(self, name, _time):
# rkey = self.row_key(name, _time)
# ckey = self.col_key(name, _time)
# try:
# return self._cf.get(rkey, [ckey]).values()[0]
# except pycassa.cassandra.c10.ttypes.NotFoundException:
# return 0
| python |
"""Includes methods that plays the game. i.e. Self play, and AI v. AI.
Author(s): Jonah Chen, Muhammad Ahsan Kaleem
"""
from time import perf_counter
from copy import deepcopy
from concurrent.futures import ProcessPoolExecutor
from os import mkdir
import numpy as np
import tensorflow as tf
from mcts import optimized_search
from game import move_on_board
from nptrain import *
def self_play(model, games=128, game_iter=64, search_iter=512, gamma=1):
"""The model performs self play to generate training data.
Args:
model (tf.keras.models.Model): The model that will be predicting the policies and values for self players
games (int, optional): The number of games in this batch of self players. Defaults to 128.
game_iter (int, optional): The maximum length of the games. Defaults to 64.
search_iter (int, optional): The number of iterations of MCTS that is performed to make each moves. Defaults to 512.
gamma (float, optional): The discounting factor for the rewards. A value of 1 means no discounting. Defaults to 1.
Returns:
s (list of numpy arrays): A list of the boards that are a result of each state of the every game.
pie (list of numpy arrays): A list of arrays of the policies generated from the monte-carlo tree search.
z (list of int): A list of the value (result or diminished result) of each of the games.
"""
boards = np.zeros((games, 8, 8, 2,), dtype="float32")
players = [1]*games
inputs = None
s = []
pie = []
z = []
# These are the parameters to train the network to gained by MCTS process
# The elements are accessed as game_boards[game#][turn#]
game_boards = [[] for _ in range(games)]
mcts_policies = [[] for _ in range(games)]
for turns in range(game_iter):
print(
f"------------------------------------------------------------\nTurn {turns+1} of {game_iter}. Ended: {games - len(game_boards)} of {games}. Cumulated: {int(perf_counter() - true_start)}s")
if len(game_boards) == 0:
return s, pie, z
results = optimized_search(
model, boards, players, roots=inputs, it=search_iter)
inputs = []
games_ended = 0
for j in range(len(results)):
i = j - games_ended
# Save the results of the MCTS to train NN
act, dist = results[i].play()
game_boards[i].append(
deepcopy(boards[i] if players[i] == 1 else np.flip(boards[i], axis=2)))
mcts_policies[i].append(dist)
# Make Move
move_on_board(boards[i], act, player=players[i])
# When game ends, save the data of the game.
state = is_win(boards[i])
if state:
s.append(game_boards.pop(i))
pie.append(mcts_policies.pop(i))
if state == 1:
z.append([(1 - 2 * (k % 2))*gamma**(turns-k) for k in range(turns+1)])
elif state == 2:
z.append([(2 * (k % 2) - 1)*gamma**(turns-k) for k in range(turns+1)])
elif state == 3:
z.append([0]*(turns+1))
boards = np.delete(boards, i, axis=0)
players.pop()
del results[i]
games_ended += 1
else:
# When game doesn't end. Player changes and the new state is appended to be evaluated on the next tern.
inputs.append(results[i].children[act])
players[i] = players[i] % 2 + 1
return s, pie, z
def digest(list_of_list):
temp = []
for x1 in list_of_list:
for x2 in x1:
temp.append(x2)
return np.array(temp)
def ai_v_ai(black, white, games=64, game_iter=64, search_iter=512, tau=0):
"""Plays the AI black against white. Return the score of black (between 0 and 100, higher is better), the list of list of games played as moves (0-63) in the order they are played, and the record as a tuple (losses, draws, wins). Black will start with the black stones in every game"""
# Creates the boards.
boards = np.zeros((games, 8, 8, 2,), dtype="float32")
players = [1]*games
inputs = None
# Create the statistics.
wins, losses, draws = 0, 0, 0
# Creates the arrays of the moves being made.
temp_games = [[] for _ in range(games)]
save_games = []
for turns in range(game_iter):
print(
f"------------------------------------------------------------\nTurn {turns+1} of {game_iter}. w/d/l={wins}/{draws}/{losses}")
# Return when all games end
if len(temp_games) == 0:
return round((100*wins+50*draws)/games), save_games, [losses, draws, wins]
# Execute the MCTS
results = optimized_search(
white if turns % 2 else black, boards, players, roots=inputs, it=search_iter)
inputs = []
games_ended = 0
for j in range(len(results)):
i = j - games_ended
# Generate and make the move
act, _ = results[i].play(tau=tau)
move_on_board(boards[i], act, player=players[i])
temp_games[i].append(act)
# When game ends, save the data of the game.
state = is_win(boards[i])
if state:
save_games.append(np.array(temp_games.pop(i)))
if state == 1:
wins += 1
elif state == 2:
losses += 1
elif state == 3:
draws += 1
boards = np.delete(boards, i, axis=0)
players.pop()
del results[i]
games_ended += 1
else:
# When game doesn't end. Player changes and the new state is appended to be evaluated on the next tern.
inputs.append(results[i].children[act])
players[i] = players[i] % 2 + 1
return round((100*wins+50*draws)/games), np.array(save_games), [losses, draws, wins]
def generate_data(num, model, games=128, gamma=1):
global true_start
true_start = perf_counter()
# Make a directory and write a dummy file to it.
mkdir(f'selfplay_data/{num}')
np.save(f'selfplay_data/{num}/_test', np.zeros(1,))
print("Directory created succesfully.")
s, pie, z = self_play(model, games=games, gamma=gamma)
start = perf_counter()
with ProcessPoolExecutor() as executor:
pie = executor.submit(digest, pie).result()
z = executor.submit(digest, z).result()
s = executor.submit(digest, s).result()
end = perf_counter()
print(end-start)
np.save(f'selfplay_data/{num}/pie', pie)
np.save(f'selfplay_data/{num}/z', z)
np.save(f'selfplay_data/{num}/s', s)
del s, pie, z
def eval_model(new_model, old_model, games=128, verbose=True, search_iter=512):
"""Play games games with equal chance each model gets white and black and return
the score the new_model achieved(0-100),
the record [losses, draws, wins],
the games played with black,
the games played with white
as a tuple in order."""
_, games1, record1 = ai_v_ai(new_model, old_model, games=games//2, search_iter=search_iter)
_, games2, record2 = ai_v_ai(old_model, new_model, games=games//2, search_iter=search_iter)
if verbose:
print(f"Black (w/d/l): {record1[2]}/{record1[1]}/{record1[0]}")
print(f"White (w/d/l): {record2[0]}/{record2[1]}/{record2[2]}")
print(f"Total (w/d/l): {record1[2]+record2[0]}/{record1[1]+record2[1]}/{record1[0]+record2[2]}")
return round(((record1[2]+record2[0])*100 + (record1[1]+record2[1])*50)/games), [record1[0]+record2[2], record1[1]+record2[1], record1[2]+record2[0]], games1, games2
if __name__ == '__main__':
model2 = tf.keras.models.load_model('models/2')
model1 = tf.keras.models.load_model('models/1')
eval_model(model1, model2)
| python |
# Copyright (c) Johns Hopkins University and its affiliates.
# This source code is licensed under the Apache 2 license found in the
# LICENSE file in the root directory of this source tree.
__author__ = "Max Fleming, Darius Irani"
__copyright__ = "Copyright 2020, Johns Hopkins University"
__credits__ = ["Max Fleming"]
__license__ = "Apache 2.0"
__version__ = "0.1"
__maintainer__ = "JHU-COVID-QA"
__email__ = "[email protected]"
__status__ = "Development"
import jsonlines
import time
from bs4 import BeautifulSoup
from covid_scraping import utils, test_jsonlines
class Conversion():
def __init__(self, file_prefix, path):
"""
This is the constructor for Conversion, the file_prefix should be the name
of the file you want i.e. if your scraping 'American Veterinarian
Medical Association', and approptiate file prefix would be 'AVMA'.
The path should be the path from the directory your working in to
Covid-19-infobot/data/scraping
"""
self._examples = []
self._file_prefix = file_prefix
self._path = path
def _check_example(self, example):
required_keys_to_type = {'sourceUrl': str,
'sourceName': str,
'needUpdate': bool,
'typeOfInfo': str,
'isAnnotated': bool,
'responseAuthority': str,
'question': str,
'answer': str,
'hasAnswer': bool,
'targetEducationLevel': str,
'topic': list,
'extraData': dict,
'targetLocation': str,
'language': str}
for key in required_keys_to_type.keys():
if key not in example:
raise KeyError("'" + key + "'" + "was not found in dictionary")
if not isinstance(example[key], required_keys_to_type[key]):
raise ValueError("'" +
key +
"'" +
"should be type " +
str(required_keys_to_type[key]))
for field in ['question', 'answer']:
if len(example[field].strip()) == 0: # indicates empty field
raise ValueError('{} field is empty'.format(field))
def addExample(self, dict):
"""
Added a qa pair to the converter the dictionary pass should have the
following fields
sourceUrl
sourceName
sourceDate
lastUpdateTime
needUpdate
typeOfInfo
isAnnotated
responseAuthority
question
answer
hasAnswer
targetEducationLevel
topic
extraData
targetLocation
language
"""
self._check_example(dict)
self._examples.append(dict)
def _writeV2(self):
v2_requirements_from_scraper = ['sourceUrl',
'sourceName',
'needUpdate',
'typeOfInfo',
'isAnnotated',
'responseAuthority',
'hasAnswer',
'targetEducationLevel',
'targetLocation',
'language',
'extraData',
'topic']
v2_requirements_from_conversion = ['sourceDate',
'lastUpdateTime',
'dateScraped',
'questionOriginal',
'questionText',
'answerOriginal',
'answerText',
'ID',
'answerContainsURLs',
'answerToks2URL']
path = self._path + '/schema_v0.2/' + self._file_prefix + '_v0.2.jsonl'
qas = []
for example in self._examples:
questionText, question_link_dict = utils.clean_text(
example['question'])
answerText, answer_link_dict = utils.clean_text(example['answer'])
pairs_from_scraper = dict(zip(v2_requirements_from_scraper, list(
map(example.get, v2_requirements_from_scraper))))
v2_conversion = [self._lastUpdateTime,
self._lastUpdateTime,
self._dateScraped,
example['question'],
questionText,
example['answer'],
answerText,
example['sourceName'] + '|||' + str(hash(str(example['question']))),
bool(answer_link_dict),
answer_link_dict]
pairs_from_conversion = dict(
zip(v2_requirements_from_conversion, v2_conversion))
qas.append({**pairs_from_scraper, **pairs_from_conversion})
gold_data = utils.merge(path, qas)
# Merging could add a exampleUUID for a new example.
for example in gold_data:
example.pop('exampleUUID', None)
with jsonlines.open(path, 'w') as writer:
writer.write_all(gold_data)
return test_jsonlines(path, 'v0.2')
def _writeV3(self):
v3_requirements_from_scraper = ['sourceUrl',
'sourceName',
'needUpdate',
'typeOfInfo',
'isAnnotated',
'responseAuthority',
'hasAnswer',
'targetEducationLevel',
'targetLocation',
'language',
'extraData',
'topic']
v3_requirements_from_conversion = ['questionOriginal',
'questionText',
'answerOriginal',
'answerText',
'ID',
'answerContainsURLs',
'answerToks2URL']
path = self._path + '/schema_v0.3/' + self._file_prefix + '_v0.3.jsonl'
qas = []
for example in self._examples:
questionText, question_link_dict = utils.clean_text(example['question'])
answerText, answer_link_dict = utils.clean_text(example['answer'])
pairs_from_scraper = dict(zip(v3_requirements_from_scraper, list(
map(example.get, v3_requirements_from_scraper))))
v3_conversion = [example['question'],
questionText,
example['answer'],
answerText,
example['sourceName'] + '|||' + str(hash(str(example['question']))),
bool(answer_link_dict),
answer_link_dict]
pairs_from_conversion = dict(
zip(v3_requirements_from_conversion, v3_conversion))
qas.append({**pairs_from_scraper, **pairs_from_conversion})
gold_data = utils.merge(path, qas)
# Merging could add a exampleUUID for a new example.
for example in gold_data:
example.pop('exampleUUID', None)
example.pop('sourceDate', None)
example.pop('lastUpdateTime', None)
example.pop('dateScraped', None)
with jsonlines.open(path, 'w') as writer:
writer.write_all(gold_data)
return test_jsonlines(path, 'v0.3')
def write(self):
"Write all the added examples to the paths specified in the constructor"
return self._writeV3()
| python |
import json
from redisgears import getMyHashTag as hashtag
from rgsync.common import *
class CqlConnection:
def __init__(self, user, password, db, keyspace):
self._user = user
self._password = password
self._db = db
self._keyspace = keyspace
@property
def user(self):
return self._user() if callable(self._user) else self._user
@property
def password(self):
return self._password() if callable(self._password) else self._password
@property
def db(self):
return self._db() if callable(self._db) else self._db
@property
def keyspace(self):
return self._keyspace() if callable(self._keyspace) else self._keyspace
def _getConnectionStr(self):
return json.dumps(
{
"user": self.user,
"password": self.password,
"db": self.db,
"keyspace": self.keyspace,
}
)
def Connect(self):
from cassandra.auth import PlainTextAuthProvider
from cassandra.cluster import Cluster
ConnectionStr = self._getConnectionStr()
WriteBehindLog(f"Connect: connecting db={self.db} keyspace={self.keyspace}")
auth_provider = PlainTextAuthProvider(
username=self.user, password=self.password
)
cluster = Cluster(self.db.split(), auth_provider=auth_provider)
if self.keyspace != "":
session = cluster.connect(self.keyspace)
else:
session = cluster.connect()
WriteBehindLog("Connect: Connected")
return session
class CqlConnector:
def __init__(self, connection, tableName, pk, exactlyOnceTableName=None):
self.connection = connection
self.tableName = tableName
self.pk = pk
self.exactlyOnceTableName = exactlyOnceTableName
self.exactlyOnceLastId = None
self.shouldCompareId = True if self.exactlyOnceTableName is not None else False
self.session = None
self.supportedOperations = [OPERATION_DEL_REPLICATE, OPERATION_UPDATE_REPLICATE]
def PrepereQueries(self, mappings):
def GetUpdateQuery(tableName, mappings, pk):
query = f"update {tableName} set "
fields = [
f"{val}=?" for kk, val in mappings.items() if not kk.startswith("_")
]
query += ",".join(fields)
query += f" where {self.pk}=?"
return query
self.addQuery = GetUpdateQuery(self.tableName, mappings, self.pk)
self.delQuery = f"delete from {self.tableName} where {self.pk}=?"
if self.exactlyOnceTableName is not None:
self.exactlyOnceQuery = GetUpdateQuery(
self.exactlyOnceTableName, {"val", "val"}, "id"
)
def TableName(self):
return self.tableName
def PrimaryKey(self):
return self.pk
def WriteData(self, data):
if len(data) == 0:
WriteBehindLog("Warning, got an empty batch")
return
query = None
try:
if not self.session:
self.session = self.connection.Connect()
if self.exactlyOnceTableName is not None:
shardId = f"shard-{hashtag()}"
result = self.session.execute(
f"select val from {self.exactlyOnceTableName} where id=?",
shardId,
)
res = result.first()
if res is not None:
self.exactlyOnceLastId = str(res["val"])
else:
self.shouldCompareId = False
except Exception as e:
self.session = None # next time we will reconnect to the database
self.exactlyOnceLastId = None
self.shouldCompareId = (
True if self.exactlyOnceTableName is not None else False
)
msg = f'Failed connecting to Cassandra database, error="{str(e)}"'
WriteBehindLog(msg)
raise Exception(msg) from None
idsToAck = []
try:
from cassandra.cluster import BatchStatement
batch = BatchStatement()
isAddBatch = (
True
if data[0]["value"][OP_KEY] == OPERATION_UPDATE_REPLICATE
else False
)
query = self.addQuery if isAddBatch else self.delQuery
stmt = self.session.prepare(query)
lastStreamId = None
for d in data:
x = d["value"]
lastStreamId = d.pop(
"id", None
) # pop the stream id out of the record, we do not need it
if (
self.shouldCompareId
and CompareIds(self.exactlyOnceLastId, lastStreamId) >= 0
):
WriteBehindLog(
f"Skip {lastStreamId} as it was already writen to the backend"
)
continue
op = x.pop(OP_KEY, None)
if op not in self.supportedOperations:
msg = "Got unknown operation"
WriteBehindLog(msg)
raise Exception(msg) from None
self.shouldCompareId = False
if op != OPERATION_UPDATE_REPLICATE:
if isAddBatch:
self.session.execute(batch)
batch = BatchStatement()
isAddBatch = False
query = self.delQuery
else:
if not isAddBatch:
self.session.execute(batch)
batch = BatchStatement()
isAddBatch = True
query = self.addQuery
stmt = self.session.prepare(query)
batch.add(stmt.bind(x))
if len(batch) > 0:
self.session.execute(batch)
if self.exactlyOnceTableName is not None:
stmt = self.session.prepare(self.exactlyOnceQuery)
self.session.execute(stmt, {"id": shardId, "val": lastStreamId})
except Exception as e:
self.session = None # next time we will reconnect to the database
self.exactlyOnceLastId = None
self.shouldCompareId = (
True if self.exactlyOnceTableName is not None else False
)
msg = 'Got exception when writing to DB, query="%s", error="%s".' % (
(query if query else "None"),
str(e),
)
WriteBehindLog(msg)
raise Exception(msg) from None
| python |
from graphics import *
from menu import *
from levels import *
import common as var
import states
from game import *
import lives as l
from pathlib import *
from file import *
from highscores import *
def main():
win = GraphWin("Arkasquash by Alexandre Valente", 800, 800, autoflush=False) #, autoflush=True
startApplication(win)
win.close()
def startApplication(win):
hasExited = False
state = states.MAIN_MENU
while not hasExited:
if state == states.MAIN_MENU:
state = mainMenu(win, state)
elif state == states.GAME_STARTED:
state, gameVariables = playGame(win, state)
elif state == states.GAME_ENDED: #When the game ends, prompt the user to save score
state = saveScore(win, state, gameVariables)
elif state == states.HIGH_SCORES:
state = highScores(win, state)
elif state == states.INST_MENU:
state = instructionsMenu(win, state)
#elif state == states.LEVEL_EDITOR:
#state = levelEditor(win, state)
elif state == states.CARACTER_SEL:
state = caracterSelection(win, state)
elif state == states.GAME_EXIT or win.closed:
hasExited = True
update(states.FPS)
def playGame(win, state):
'''Draws the playing level, according to the current level number'''
gameVariables = [1, 0, 0, 0, 0, 3, [], [], Text(Point(690, 715), ""), Image(Point(0,0), ""), Text(Point(700, 130), "Score"), Image(Point(0,0), ""), Image(Point(0,0), "")]
drawLevel(win, gameVariables)
l.drawHearts(win, gameVariables)
l.drawScore(win, gameVariables)
hasStarted = True
isPlaying = False
speed = 50
ballSpeed = 5
ballDir = -1
goDown = 0
times_moved = 0
isPaused = False
rocketActive = False
'''Game Loop'''
while hasStarted:
key = win.checkKey()
'''Pause Menu'''
if isPaused:
mouse = win.checkMouse()
if not isPaused and key == 'Escape':
pause = showPause(win)
isPaused = True
elif isPaused and (key == 'Escape' or resumeButton(mouse)):
closePause(pause)
isPaused = False
elif isPaused and mainMenuButton(mouse):
return states.MAIN_MENU, gameVariables
if not isPaused:
if isPlaying and goDown > states.FPS * var.time_sec and times_moved < gameVariables[var.level] * 10:
goDown = 0
moveBlocksDown(win, gameVariables)
times_moved += 1
'''Game hasnt started, waiting for player to start'''
if key == 'space' and not isPlaying:
isPlaying = True
ballDir = startBall()
'''Move player Paddle'''
if key == 'Left' or key == 'Right':
movePlayer(win, key, gameVariables[var.player], speed)
'''Launch a rocket that destroys a set ammout of blocks'''
if isPlaying and key == 'z' and not rocketActive and gameVariables[var.rockets] > 0:
l.launchRocket(win, gameVariables)
gameVariables[var.rockets] -= 1
l.drawRockets(win, gameVariables)
rocketActive = True
if rocketActive:
rocketActive = l.moveRocket(win, gameVariables)
'''Move the paddle, while the game has not started'''
if not isPlaying:
if gameVariables[var.ball].getCenter().getX() != gameVariables[var.player].getAnchor().getX():
x = gameVariables[var.player].getAnchor().getX() - gameVariables[var.ball].getCenter().getX()
gameVariables[var.ball].move(x, 0)
if gameVariables[var.ball].getCenter().getY() >= 750:
gameVariables[var.ball].move(0, -10)
'''Detect collisions, ball movement, when die lose live and manage hearts'''
if isPlaying and ballDir != -1:
ballDir = checkCollisions(win, ballDir, gameVariables, var.ball_rad)
moveBall(ballDir, ballSpeed, gameVariables)
elif isPlaying and ballDir == -1:
l.removeHeart(gameVariables)
l.drawHearts(win, gameVariables)
isPlaying = False
'''When the number of blocks reaches 0, start next level and add heart'''
if isPlaying and len(gameVariables[var.blocks]) <= 0:
nextLevel(gameVariables)
drawLevel(win, gameVariables)
l.drawHearts(win, gameVariables)
isPlaying = False
ballDir = -1
times_moved = 0
if gameVariables[var.level] > 3:
hasStarted = False
'''When all lives are lost, end the game'''
if gameVariables[var.lives] <= 0:
isPlaying = False
hasStarted = False
if isPlaying:
goDown += 1
update(states.FPS)
return states.GAME_ENDED, gameVariables
def saveScore(win, state, gameVars):
#Creates the file in case it does not exist
text, tab, scoreText = promptUsername(win, gameVars[var.score])
while win.checkKey() != 'Return':
name = text.getText()
name = name[:13]
path = Path("scores.txt")
if not path.is_file():
file = open("scores.txt", "w")
file.close()
inFile = open("scores.txt", "r+")
data = inFile.read()
inFile.close()
if "`" not in data:
outFile = open("scores.txt", "w+")
outFile.write(name + "´" + str(gameVars[var.score]) + "`")
outFile.close()
else:
newData = scoresToList(data, name, gameVars[var.score])
outFile = open("scores.txt", "w+")
outFile.write(newData)
outFile.close()
scoreText.undraw()
text.undraw()
tab.undraw()
return states.MAIN_MENU
def highScores(win, state):
players, tab = drawHighscores(win)
while win.getKey() != 'Escape':
pass
return states.MAIN_MENU
def instructionsMenu(win, state):
tab = drawInstructions(win)
while win.getKey() != 'Escape':
pass
tab.undraw()
return states.MAIN_MENU
def caracterSelection(win, state):
tab = drawDesign(win)
dN, player = drawPlayerDesign(win)
key = 'm'
while key != 'Escape':
key = win.checkKey()
if key == 'Left':
dN, player = minusDesign(dN, player, win) #dN = designNumber
elif key == 'Right':
dN, player = plusDesign(dN, player, win)
saveDesign(dN)
tab.undraw()
player.undraw()
return states.MAIN_MENU
main() | python |
# Released under the MIT License. See LICENSE for details.
#
"""Provide top level UI related functionality."""
from __future__ import annotations
import os
import weakref
from dataclasses import dataclass
from typing import TYPE_CHECKING, cast, Type
import _ba
from ba._generated.enums import TimeType
from ba._general import print_active_refs
if TYPE_CHECKING:
from typing import Optional, Any
import ba
# Set environment variable BA_DEBUG_UI_CLEANUP_CHECKS to 1
# to print detailed info about what is getting cleaned up when.
DEBUG_UI_CLEANUP_CHECKS = os.environ.get('BA_DEBUG_UI_CLEANUP_CHECKS') == '1'
class Window:
"""A basic window.
Category: User Interface Classes
"""
def __init__(self, root_widget: ba.Widget, cleanupcheck: bool = True):
self._root_widget = root_widget
# Complain if we outlive our root widget.
if cleanupcheck:
uicleanupcheck(self, root_widget)
def get_root_widget(self) -> ba.Widget:
"""Return the root widget."""
return self._root_widget
@dataclass
class UICleanupCheck:
"""Holds info about a uicleanupcheck target."""
obj: weakref.ref
widget: ba.Widget
widget_death_time: Optional[float]
class UILocation:
"""Defines a specific 'place' in the UI the user can navigate to.
Category: User Interface Classes
"""
def __init__(self) -> None:
pass
def save_state(self) -> None:
"""Serialize this instance's state to a dict."""
def restore_state(self) -> None:
"""Restore this instance's state from a dict."""
def push_location(self, location: str) -> None:
"""Push a new location to the stack and transition to it."""
class UILocationWindow(UILocation):
"""A UILocation consisting of a single root window widget.
Category: User Interface Classes
"""
def __init__(self) -> None:
super().__init__()
self._root_widget: Optional[ba.Widget] = None
def get_root_widget(self) -> ba.Widget:
"""Return the root widget for this window."""
assert self._root_widget is not None
return self._root_widget
class UIEntry:
"""State for a UILocation on the stack."""
def __init__(self, name: str, controller: UIController):
self._name = name
self._state = None
self._args = None
self._instance: Optional[UILocation] = None
self._controller = weakref.ref(controller)
def create(self) -> None:
"""Create an instance of our UI."""
cls = self._get_class()
self._instance = cls()
def destroy(self) -> None:
"""Transition out our UI if it exists."""
if self._instance is None:
return
print('WOULD TRANSITION OUT', self._name)
def _get_class(self) -> Type[UILocation]:
"""Returns the UI class our name points to."""
# pylint: disable=cyclic-import
# TEMP HARD CODED - WILL REPLACE THIS WITH BA_META LOOKUPS.
if self._name == 'mainmenu':
from bastd.ui import mainmenu
return cast(Type[UILocation], mainmenu.MainMenuWindow)
raise ValueError('unknown ui class ' + str(self._name))
class UIController:
"""Wrangles ba.UILocations.
Category: User Interface Classes
"""
def __init__(self) -> None:
# FIXME: document why we have separate stacks for game and menu...
self._main_stack_game: list[UIEntry] = []
self._main_stack_menu: list[UIEntry] = []
# This points at either the game or menu stack.
self._main_stack: Optional[list[UIEntry]] = None
# There's only one of these since we don't need to preserve its state
# between sessions.
self._dialog_stack: list[UIEntry] = []
def show_main_menu(self, in_game: bool = True) -> None:
"""Show the main menu, clearing other UIs from location stacks."""
self._main_stack = []
self._dialog_stack = []
self._main_stack = (self._main_stack_game
if in_game else self._main_stack_menu)
self._main_stack.append(UIEntry('mainmenu', self))
self._update_ui()
def _update_ui(self) -> None:
"""Instantiate the topmost ui in our stacks."""
# First tell any existing UIs to get outta here.
for stack in (self._dialog_stack, self._main_stack):
assert stack is not None
for entry in stack:
entry.destroy()
# Now create the topmost one if there is one.
entrynew = (self._dialog_stack[-1] if self._dialog_stack else
self._main_stack[-1] if self._main_stack else None)
if entrynew is not None:
entrynew.create()
def uicleanupcheck(obj: Any, widget: ba.Widget) -> None:
"""Add a check to ensure a widget-owning object gets cleaned up properly.
Category: User Interface Functions
This adds a check which will print an error message if the provided
object still exists ~5 seconds after the provided ba.Widget dies.
This is a good sanity check for any sort of object that wraps or
controls a ba.Widget. For instance, a 'Window' class instance has
no reason to still exist once its root container ba.Widget has fully
transitioned out and been destroyed. Circular references or careless
strong referencing can lead to such objects never getting destroyed,
however, and this helps detect such cases to avoid memory leaks.
"""
if DEBUG_UI_CLEANUP_CHECKS:
print(f'adding uicleanup to {obj}')
if not isinstance(widget, _ba.Widget):
raise TypeError('widget arg is not a ba.Widget')
if bool(False):
def foobar() -> None:
"""Just testing."""
if DEBUG_UI_CLEANUP_CHECKS:
print('uicleanupcheck widget dying...')
widget.add_delete_callback(foobar)
_ba.app.ui.cleanupchecks.append(
UICleanupCheck(obj=weakref.ref(obj),
widget=widget,
widget_death_time=None))
def ui_upkeep() -> None:
"""Run UI cleanup checks, etc. should be called periodically."""
ui = _ba.app.ui
remainingchecks = []
now = _ba.time(TimeType.REAL)
for check in ui.cleanupchecks:
obj = check.obj()
# If the object has died, ignore and don't re-add.
if obj is None:
if DEBUG_UI_CLEANUP_CHECKS:
print('uicleanupcheck object is dead; hooray!')
continue
# If the widget hadn't died yet, note if it has.
if check.widget_death_time is None:
remainingchecks.append(check)
if not check.widget:
check.widget_death_time = now
else:
# Widget was already dead; complain if its been too long.
if now - check.widget_death_time > 5.0:
print(
'WARNING:', obj,
'is still alive 5 second after its widget died;'
' you might have a memory leak.')
print_active_refs(obj)
else:
remainingchecks.append(check)
ui.cleanupchecks = remainingchecks
| python |
from typing import Sequence, Union, Dict
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from kyle.util import safe_accuracy_score
class EvalStats:
TOP_CLASS_LABEL = "top_class"
"""
Class for computing evaluation statistics of classifiers, including calibration metrics
:param y_true: integer array of shape (n_samples,)
:param confidences: array of shape (n_samples, n_classes)
:param bins: on how many homogeneous bins to evaluate the statistics
"""
def __init__(self, y_true: np.ndarray, confidences: np.ndarray, bins=30):
assert (
len(y_true.shape) == 1
), f"y_true has to be 1-dimensional, instead got shape: {y_true.shape}"
assert (
len(confidences.shape) == 2
), f"predicted_probabilities have to be of shape (#samples, #classes), instead got {confidences.shape}"
assert confidences.shape[0] == len(
y_true
), f"Mismatch between number of data points in confidences and labels, {confidences.shape[0]} != {len(y_true)}"
self.num_samples = len(y_true)
self.num_classes = confidences.shape[1]
self.y_true = y_true
self.y_pred = confidences.argmax(axis=1)
self.confidences = confidences
self._top_class_confidences = confidences.max(axis=1)
self.bins: int = None
# due to discretization they don't sum to 1 anymore
self._discretized_confidences: np.ndarray = None
self._discretized_probab_values: np.ndarray = None
self.set_bins(bins)
def expected_confidence(self, class_label: Union[int, str] = TOP_CLASS_LABEL):
"""
Returns the expected confidence for the selected class or for the predictions (default)
:param class_label: either the class label as int or "top_class"
:return:
"""
if class_label == self.TOP_CLASS_LABEL:
confs = self._top_class_confidences
else:
confs = self.confidences[:, class_label]
return float(np.mean(confs))
def set_bins(self, bins: int):
self.bins = bins
self._discretized_probab_values = (np.arange(self.bins) + 0.5) / self.bins
bin_boundaries = np.linspace(0, 1, self.bins + 1)
bin_boundaries[
0
] = -1 # in order to associate predicted probabilities = 0 to the right bin
binned_confidences = (
np.digitize(x=self.confidences, bins=bin_boundaries, right=True) - 1
)
self._discretized_confidences = (binned_confidences + 0.5) / self.bins
def accuracy(self):
return safe_accuracy_score(self.y_true, self.y_pred)
def marginal_accuracy(self, class_label: int):
"""
Corresponds to acc_i in our calibration paper
:param class_label:
:return:
"""
class_label_mask = self.y_pred == class_label
predictions = self.y_pred[class_label_mask]
gt = self.y_true[class_label_mask]
return np.sum(gt == predictions) / len(self.y_true)
@staticmethod
def _expected_error(
probabilities: np.ndarray, members_per_bin: np.ndarray, confidences: np.ndarray
) -> float:
"""
Computes the expected error, being the sum of abs. differences of true probabilities and mean confidences
for each bin weighted by the factor N_bin / N_total
:param probabilities:
:param members_per_bin:
:return:
"""
total_members = np.sum(members_per_bin)
if total_members == 0:
return 0.0
result = float(np.sum(np.abs(probabilities - confidences) * members_per_bin))
result /= total_members
return result
def _non_degenerate_acc_conf_differences(self) -> np.ndarray:
"""
Computes the absolute differences between accuracy and mean confidence for each non-degenerate bin
where a bin is considered degenerate if for no confidence vector the maximum lies in the bin.
E.g. for a N-classes classifier, all bins with right-hand value below 1/N will be degenerate since the
maximum of a probabilities vector is always larger than 1/N.
:return: array of shape (N_bins, )
"""
accuracies, members_per_bin, confidences = self.top_class_reliabilities()
acc_conf_difference = (accuracies - confidences)[members_per_bin > 0]
return np.abs(acc_conf_difference)
def expected_calibration_error(self):
accuracies, members_per_bin, confidences = self.top_class_reliabilities()
return self._expected_error(accuracies, members_per_bin, confidences)
def average_calibration_error(self):
return np.mean(self._non_degenerate_acc_conf_differences())
def max_calibration_error(self):
return np.max(self._non_degenerate_acc_conf_differences())
def expected_marginal_calibration_error(self, class_label):
"""
I sort of made this up, although this very probably exists somewhere in the wild
:param class_label:
"""
(
class_probabilities,
members_per_bin,
class_confidences,
) = self.marginal_reliabilities(class_label)
return self._expected_error(
class_probabilities, members_per_bin, class_confidences
)
def average_marginal_calibration_error(self):
"""
I made this up, don't know if this metric was described anywhere yet.
It is also not completely clear what this means in terms of probabilistic quantities.
"""
errors = np.zeros(self.num_classes)
weights = np.zeros(self.num_classes)
for class_label in range(self.num_classes):
accuracies, n_members, class_confidences = self.marginal_reliabilities(
class_label
)
total_members = np.sum(n_members)
errors[class_label] = self._expected_error(
accuracies, n_members, class_confidences
)
weights[class_label] = total_members
return np.sum(errors * weights) / np.sum(weights)
def class_wise_expected_calibration_error(self):
result = sum(
self.expected_marginal_calibration_error(k) for k in range(self.num_classes)
)
result /= self.num_classes
return result
def marginal_reliabilities(self, class_label: int):
"""
Compute the true class probabilities and numbers of members (weights) for each of the N bins for the
confidence for the given class.
:return: tuple of two 1-dim arrays of length N, corresponding to (accuracy_per_bin, num_members_per_bin)
"""
discretized_class_confidences = self._discretized_confidences[:, class_label]
class_confidences = self.confidences[:, class_label]
members_per_bin = np.zeros(self.bins)
accuracies_per_bin = np.zeros(self.bins)
mean_class_confidences_per_bin = np.zeros(self.bins)
for i, probability_bin in enumerate(self._discretized_probab_values):
probability_bin_mask = discretized_class_confidences == probability_bin
cur_gt_labels = self.y_true[probability_bin_mask]
cur_class_confidences = class_confidences[probability_bin_mask]
cur_members = np.sum(probability_bin_mask)
cur_accuracy = safe_accuracy_score(
cur_gt_labels, class_label * np.ones(len(cur_gt_labels))
)
if len(cur_class_confidences) > 0:
cur_mean_class_confidence = cur_class_confidences.mean()
else:
cur_mean_class_confidence = probability_bin
members_per_bin[i] = cur_members
accuracies_per_bin[i] = cur_accuracy
mean_class_confidences_per_bin[i] = cur_mean_class_confidence
return accuracies_per_bin, members_per_bin, mean_class_confidences_per_bin
def top_class_reliabilities(self):
"""
Compute the accuracies and numbers of members (weights) for each of the N bins for top-class confidence.
:return: tuple of two 1-dim arrays of length N, corresponding to (accuracy_per_bin, num_members_per_bin)
"""
members_per_bin = np.zeros(self.bins)
accuracies_per_bin = np.zeros(self.bins)
mean_confidences_per_bin = np.zeros(self.bins)
discretized_top_class_confidences = self._discretized_confidences.max(axis=1)
for i, probability in enumerate(self._discretized_probab_values):
probability_bin_mask = discretized_top_class_confidences == probability
cur_members = np.sum(probability_bin_mask)
if cur_members == 0:
members_per_bin[i] = 0
accuracies_per_bin[i] = 0
mean_confidences_per_bin[i] = 0
continue
cur_gt_labels = self.y_true[probability_bin_mask]
cur_pred_labels = self.y_pred[probability_bin_mask]
cur_top_class_confidences = self._top_class_confidences[
probability_bin_mask
]
cur_accuracy = safe_accuracy_score(cur_gt_labels, cur_pred_labels)
cur_mean_confidence = cur_top_class_confidences.mean()
members_per_bin[i] = cur_members
accuracies_per_bin[i] = cur_accuracy
mean_confidences_per_bin[i] = cur_mean_confidence
return accuracies_per_bin, members_per_bin, mean_confidences_per_bin
# TODO: the reliabilities are plotted above the centers of bins, not above the mean confidences
# The latter would plotting multiple curves at once impossible but the plot would be more precise
def plot_reliability_curves(
self, class_labels: Sequence[Union[int, str]], display_weights=False
):
"""
:param class_labels:
:param display_weights: If True, for each reliability curve the weights of each bin will be
plotted as histogram. The weights have been scaled for the sake of display, only relative differences
between them have an interpretable meaning.
The errors containing "expected" in the name take these weights into account.
:return:
"""
colors = ListedColormap(["y", "g", "r", "c", "m"])
plt.figure()
plt.title(f"Reliability curves ({self.bins} bins)")
plt.xlabel("confidence")
plt.ylabel("ground truth probability")
plt.axis("equal")
x_values = self._discretized_probab_values
plt.plot(
np.linspace(0, 1), np.linspace(0, 1), label="perfect calibration", color="b"
)
for i, class_label in enumerate(class_labels):
color = colors(i)
if isinstance(class_label, int):
label = f"class {class_label}"
y_values, weights, _ = self.marginal_reliabilities(class_label)
elif class_label == self.TOP_CLASS_LABEL:
label = "prediction"
y_values, weights, _ = self.top_class_reliabilities()
else:
raise ValueError(f"Unknown class label: {class_label}")
plt.plot(x_values, y_values, marker=".", label=label, color=color)
if display_weights:
# rescale the weights such that the maximum is at 1/2 for improved visibility
weights = 1 / 2 * weights / weights.max()
plt.bar(
x_values,
weights,
alpha=0.2,
width=1 / self.bins,
color=color,
label=f"bin_weights for {label}",
)
axes = plt.gca()
axes.set_xlim([0, 1])
axes.set_ylim([0, 1])
plt.legend(loc="best")
# TODO: delete, I don't think we need this. Maybe add flag to only plot bin weights to the plot above
def plot_confidence_distributions(
self, class_labels: Sequence[Union[int, str]], new_fig=True
):
"""
:param new_fig:
:param class_labels:
:return:
"""
colors = ListedColormap(["y", "g", "r", "c", "m"])
if new_fig:
plt.figure()
plt.title(f" Confidence Distribution ({self.bins} bins)")
plt.xlabel("confidence")
plt.ylabel("Frequency")
x_values = self._discretized_probab_values
for i, class_label in enumerate(class_labels):
color = colors(i)
if isinstance(class_label, int):
label = f"class {class_label}"
_, weights, _ = self.marginal_reliabilities(class_label)
elif class_label == self.TOP_CLASS_LABEL:
label = "prediction"
_, weights, _ = self.top_class_reliabilities()
else:
raise ValueError(f"Unknown class label: {class_label}")
plt.bar(
x_values,
weights,
alpha=0.3,
width=1 / self.bins,
label=label,
color=color,
)
axes = plt.gca()
axes.set_xlim([0, 1])
plt.legend(loc="best")
if new_fig:
plt.show()
def plot_gt_distribution(self, label_names: Dict[int, str] = None):
class_labels, counts = np.unique(self.y_true, return_counts=True)
if label_names is not None:
class_labels = [label_names.get(l, l) for l in class_labels]
fig, ax = plt.subplots()
ax.pie(counts, labels=class_labels, autopct="%1.1f%%", startangle=90)
ax.axis("equal") # Equal aspect ratio ensures that pie is drawn as a circle.
ax.set_title("Ground Truth Distribution")
fig.show()
| python |
import subprocess
import os
from pathlib import Path
import glob
os.remove('text.txt')
os.system('E:/ChromeCacheView.exe /scomma D:/Studies/nsfw_test/text.txt')
fo = open("text.txt", "r")
while True:
line = fo.readline()
if not line: break
data=line.split(",",2)
if "data_2 [12288]" in line:
print(data[0])
print(data[1])
break
cmd2 = subprocess.Popen('cmd.exe /C E:\ChromeCacheView.exe /copycache "'+data[1]+'" "image/jpeg" /CopyFilesFolder "D:\check" /UseWebSiteDirStructure 0')
names = glob.glob("D:/Studies/nsfw_test/*.jpg")
for i in names:
if i.startswith('faces') or i.startswith('server') or i.startswith('output'):
continue
img = i
break
cmd3 = subprocess.Popen('cmd.exe /C python nsfw.py -m data/open_nsfw-weights.npy -u "'+data[1]+'" "'+img+'"')
| python |
"""
Global Template Variables
"""
# Standard Library
import os
# Local Library
from app.modules.entity.option_entity import OptionEntity
def globals(request):
option_entity = OptionEntity()
return {
"google_account": option_entity.get_value_by_key("google_analytics_account", ""),
"app_timezone": os.getenv("APP_TIMEZONE", "UTC"),
"activate_notifications": os.getenv("ACTIVATE_NOTIFICATIONS", "false") == "true",
}
| python |
# get rid of this for python2.6+
import imp, sys, os
def imp_path():
cwd = os.path.realpath('.')
return [path for path in sys.path if os.path.realpath(path) != cwd]
try:
json = imp.load_module('json', *imp.find_module('json', imp_path()))
loads, dumps = json.loads, json.dumps
except ImportError:
try:
from simplejson import loads, dumps
except ImportError:
from cjson import decode as loads
from cjson import encode
def dumps(x):
# do the same thing as simplejson: assume that all strings are utf-8
if isinstance(x, str):
x = x.decode('utf-8')
return encode(x)
| python |
from enum import Enum
class Trend(Enum):
NONE = 0
DoubleUp = 1
SingleUp = 2
FortyFiveUp = 3
Flat = 4
FortyFiveDown = 5
SingleDown = 6
DoubleDown = 7
NotComputable = 8
RateOutOfRange = 9
| python |
"""
this is a practice file
"""
import math
math.floor(3.4)
| python |
import tensorflow as tf
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true, y_pred))
def accuracy_fn(y_true, y_pred):
y_true = tf.cast(tf.argmax(y_true, axis=-1), tf.float32)
y_pred = tf.cast(tf.argmax(y_pred, axis=-1), tf.float32)
compare = tf.cast(tf.equal(y_true, y_pred), tf.float32)
accuracy = tf.reduce_mean(compare) * 100
return accuracy
def mse_loss(y_true, y_pred):
return tf.reduce_mean(tf.math.squared_difference(y_true, y_pred))
def binary_cross_entropy_loss(y_true, y_pred):
return tf.reduce_mean(- y_true * tf.math.log(y_pred) - (1 - y_true) * tf.math.log(1 - y_pred))
def categorical_cross_entropy_loss(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.categorical_crossentropy(y_true, y_pred))
| python |
import django
from django.conf import settings
def pytest_configure():
settings.configure(
INSTALLED_APPS=[
'django.contrib.contenttypes',
'django.contrib.auth',
],
ROOT_URLCONF='djangoapp.urls',
STATIC_URL='/static/',
LANGUAGE_CODE='en',
SITE_ID=1,
MIDDLEWARE=(
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
),
)
django.setup()
| python |
#!/usr/bin/env python
from matplotlib import pyplot as pp
import numpy as np
import sys
def levenstein(source, target):
if len(source) < len(target):
return levenstein(target, source)
if len(target) == 0:
return len(source)
source = np.array(tuple(source))
target = np.array(tuple(target))
prev_row = np.arange(target.size + 1)
for s in source:
curr_row = prev_row + 1
curr_row[1:] = np.minimum(
curr_row[1:], np.add(prev_row[:-1], target != s))
curr_row[1:] = np.minimum(
curr_row[1:], curr_row[0:-1] + 1)
prev_row = curr_row
return prev_row[-1]
with open(sys.argv[1]) as file:
lines = list(map(lambda l: l[:-1], file.readlines()))
ds, l0 = [], lines[0]
for line in lines[1:]:
d = levenstein(line, l0)
if d > 0: ds.append(d)
pp.title('Levenstein Differences')
pp.hist(ds, bins=13)
pp.grid()
pp.show()
| python |
__all__ = [
'q1_collections_counter',
'q2_defaultdict_tutorial',
'q3_py_collections_namedtuple',
'q4_py_collections_ordereddict',
'q5_word_order',
'q6_py_collections_deque',
'q7_most_commons',
'q8_piling_up',
]
| python |
"""
Utility functions for plotting figures consistently across different parts of the project
"""
import matplotlib.pyplot as plt
def set_font_size():
"""
Function which sets a standardized font size for all figures. Call this prior to plotting
to apply the standard
"""
SMALLER_SIZE = 10
MED_SIZE = 12
BIG_SIZE = 18
# plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MED_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MED_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALLER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MED_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIG_SIZE) # fontsize of the figure title | python |
"""
Status of the API.
"""
from flask import request
from flask_restful import Resource
class Working(Resource):
"""
Working reveals whether or not connection to API is working.
"""
def get(self):
"""
/working/
Args:
xx
Returns:
xx
"""
# set up object to export
data = { "working": "YES" }
return data | python |
# SPDX-License-Identifier: Apache-2.0
#
# Copyright (C) 2016, ARM Limited and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import os
import itertools
from statistics import mean
from lisa.tests.base import (
TestMetric, Result, ResultBundle, AggregatedResultBundle, TestBundle,
RTATestBundle, CannotCreateError
)
from lisa.target import Target
from lisa.utils import ArtifactPath, groupby, ExekallTaggable
from lisa.datautils import series_mean, df_window, df_filter_task_ids, series_tunnel_mean
from lisa.wlgen.rta import RTA, Periodic, RTATask
from lisa.trace import FtraceCollector, requires_events
from lisa.analysis.load_tracking import LoadTrackingAnalysis
from lisa.analysis.tasks import TasksAnalysis
from lisa.pelt import PELT_SCALE, simulate_pelt, pelt_settling_time
UTIL_SCALE = PELT_SCALE
UTIL_CONVERGENCE_TIME_S = pelt_settling_time(1, init=0, final=1024)
"""
Time in seconds for util_avg to converge (i.e. ignored time)
"""
class LoadTrackingHelpers:
"""
Common bunch of helpers for load tracking tests.
"""
MAX_RTAPP_CALIB_DEVIATION = 3 / 100
"""
Blacklist CPUs that have a RTapp calibration value that deviates too much
from the average calib value in their capacity class.
"""
@classmethod
def _get_blacklisted_cpus(cls, plat_info):
"""
Consider some CPUs as blacklisted when the load would not be
proportionnal to utilization on them.
That happens for CPUs that are busy executing other code than the test
workload, like handling interrupts. It is detect that by looking at the
RTapp calibration value and we blacklist outliers.
"""
rtapp_calib = plat_info['rtapp']['calib']
blacklisted = set()
# For each class of CPUs, get the average rtapp calibration value
# and blacklist the ones that are deviating too much from that
for cpu_class in plat_info['capacity-classes']:
calib_mean = mean(rtapp_calib[cpu] for cpu in cpu_class)
calib_max = (1 + cls.MAX_RTAPP_CALIB_DEVIATION) * calib_mean
blacklisted.update(
cpu
for cpu in cpu_class
# exclude outliers that are too slow (i.e. calib value too small)
if rtapp_calib[cpu] > calib_max
)
return sorted(blacklisted)
@classmethod
def filter_capacity_classes(cls, plat_info):
"""
Filter out capacity-classes key of ``plat_info`` to remove blacklisted CPUs.
.. seealso:: :meth:`_get_blacklisted_cpus`
"""
blacklisted_cpus = set(cls._get_blacklisted_cpus(plat_info))
return [
sorted(set(cpu_class) - blacklisted_cpus)
for cpu_class in plat_info['capacity-classes']
]
@classmethod
def correct_expected_pelt(cls, plat_info, cpu, signal_value):
"""
Correct an expected PELT signal from ``rt-app`` based on the calibration
values.
Since the instruction mix of ``rt-app`` might not be the same as the
benchmark that was used to establish CPU capacities, the duty cycle of
``rt-app`` will only be accurate on big CPUs. When we know on which CPU
the task actually executed, we can correct the expected value based on
the ratio of calibration values and CPU capacities.
"""
calib = plat_info['rtapp']['calib']
cpu_capacities = plat_info['cpu-capacities']
# Correct the signal mean to what it should have been if rt-app
# workload was exactly the same as the one used to establish CPU
# capacities
true_capacities = RTA.get_cpu_capacities_from_calibrations(calib)
return signal_value * cpu_capacities[cpu] / true_capacities[cpu]
class LoadTrackingBase(RTATestBundle, LoadTrackingHelpers):
"""
Base class for shared functionality of load tracking tests
"""
cpufreq_conf = {
"governor": "performance"
}
"""
The cpufreq configuration used while the synthetic workload is being run.
Items are arguments to :meth:`devlib.cpufreq.use_governor`.
"""
@classmethod
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, ftrace_coll: FtraceCollector = None) -> 'LoadTrackingBase':
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info)
# After a bit of experimenting, it turns out that on some platforms
# misprediction of the idle time (which leads to a shallow idle state,
# a wakeup and another idle nap) can mess up the duty cycle of the
# rt-app task we're running. In our case, a 50% duty cycle, 16ms period
# task would always be active for 8ms, but it would sometimes sleep for
# only 5 or 6 ms.
# This is fine to do this here, as we only care about the proper
# behaviour of the signal on running/not-running tasks.
with target.disable_idle_states():
with target.cpufreq.use_governor(**cls.cpufreq_conf):
cls.run_rtapp(target, res_dir, rtapp_profile, ftrace_coll)
return cls(res_dir, plat_info)
@staticmethod
def is_almost_equal(target, value, allowed_delta_pct):
"""
Verify that ``value``` is reasonably close to ``target```
"""
delta = target * allowed_delta_pct / 100
return target - delta <= value <= target + delta
class InvarianceItem(LoadTrackingBase, ExekallTaggable):
"""
Basic check for CPU and frequency invariant load and utilization tracking
**Expected Behaviour:**
Load tracking signals are scaled so that the workload results in
roughly the same util & load values regardless of compute power of the
CPU used and its frequency.
"""
task_prefix = 'invar'
cpufreq_conf = {
"governor": "userspace"
}
def __init__(self, res_dir, plat_info, cpu, freq, freq_list):
super().__init__(res_dir, plat_info)
self.freq = freq
self.freq_list = freq_list
self.cpu = cpu
@property
def rtapp_profile(self):
return self.get_rtapp_profile(self.plat_info, cpu=self.cpu, freq=self.freq)
@property
def task_name(self):
"""
The name of the only task this test uses
"""
tasks = self.rtapp_tasks
assert len(tasks) == 1
return tasks[0]
@property
def wlgen_task(self):
"""
The :class:`lisa.wlgen.rta.RTATask` description of the only rt-app
task, as specified in the profile.
"""
tasks = list(self.rtapp_profile.values())
assert len(tasks) == 1
return tasks[0]
def get_tags(self):
return {'cpu': '{}@{}'.format(self.cpu, self.freq)}
@classmethod
def get_rtapp_profile(cls, plat_info, cpu, freq):
"""
Get a specification for a rt-app workload with the specificied duty
cycle, pinned to the given CPU.
"""
freq_capa = cls._get_freq_capa(cpu, freq, plat_info)
duty_cycle_pct = freq_capa / UTIL_SCALE * 100
# Use half of the capacity at that OPP, so we are sure that the
# task will fit even at the lowest OPP
duty_cycle_pct //= 2
rtapp_profile = {}
rtapp_profile["{}{}".format(cls.task_prefix, cpu)] = Periodic(
duty_cycle_pct=duty_cycle_pct,
duration_s=2,
period_ms=cls.TASK_PERIOD_MS,
cpus=[cpu],
)
return rtapp_profile
@classmethod
def _from_target(cls, target: Target, *, cpu: int, freq: int, freq_list=None, res_dir: ArtifactPath = None, ftrace_coll: FtraceCollector = None) -> 'InvarianceItem':
"""
:param cpu: CPU to use, or ``None`` to automatically choose an
appropriate set of CPUs.
:type cpu: int or None
:param freq: Frequency to run at in kHz. It is only relevant in
combination with ``cpu``.
:type freq: int or None
"""
plat_info = target.plat_info
rtapp_profile = cls.get_rtapp_profile(plat_info, cpu, freq)
logger = cls.get_logger()
with target.cpufreq.use_governor(**cls.cpufreq_conf):
target.cpufreq.set_frequency(cpu, freq)
logger.debug('CPU{} frequency: {}'.format(cpu, target.cpufreq.get_frequency(cpu)))
cls.run_rtapp(target, res_dir, rtapp_profile, ftrace_coll)
freq_list = freq_list or [freq]
return cls(res_dir, plat_info, cpu, freq, freq_list)
@staticmethod
def _get_freq_capa(cpu, freq, plat_info):
capacity = plat_info['cpu-capacities'][cpu]
# Scale the capacity linearly according to the frequency
max_freq = max(plat_info['freqs'][cpu])
capacity *= freq / max_freq
return capacity
@LoadTrackingAnalysis.df_tasks_signal.used_events
@TasksAnalysis.df_task_activation.used_events
def get_simulated_pelt(self, task, signal_name):
"""
Simulate a PELT signal for a given task.
:param task: task to look for in the trace.
:type task: int or str or tuple(int, str)
:param signal_name: Name of the PELT signal to simulate.
:type signal_name: str
:return: A :class:`pandas.DataFrame` with a ``simulated`` column
containing the simulated signal, along with the column of the
signal as found in the trace.
"""
logger = self.get_logger()
trace = self.trace
task = trace.get_task_id(task)
cpus = trace.analysis.tasks.cpus_of_tasks([task])
df_activation = trace.analysis.tasks.df_task_activation(task)
df = trace.analysis.load_tracking.df_tasks_signal(signal_name)
df = df_filter_task_ids(df, [task])
# Ignore the first activation, as its signals are incorrect
df_activation = df_activation.iloc[2:]
# Make sure the activation df does not start before the dataframe of
# signal values, otherwise we cannot provide a sensible init value
df_activation = df_activation[df.index[0]:]
# Get the initial signal value matching the first activation we will care about
init_iloc = df.index.get_loc(df_activation.index[0], method='ffill')
init = df[signal_name].iloc[init_iloc]
try:
# PELT clock in nanoseconds
clock = df['update_time'] * 1e-9
except KeyError:
if any(
self.plat_info['cpu-capacities'][cpu] != UTIL_SCALE
for phase in self.wlgen_task.phases
for cpu in phase.cpus
):
raise CannotCreateError('PELT time scaling can only be simulated when the PELT clock is available from the trace')
logger.warning('PELT clock is not available, ftrace timestamp will be used at the expense of accuracy')
clock = None
df['simulated'] = simulate_pelt(df_activation['active'], index=df.index, init=init, clock=clock)
# Since load is now CPU invariant in recent kernel versions, we don't
# rescale it back. To match the old behavior, that line is
# needed:
# df['simulated'] /= self.plat_info['cpu-capacities'][cpu] / UTIL_SCALE
kernel_version = self.plat_info['kernel']['version']
if (
signal_name == 'load'
and kernel_version.parts[:2] < (5, 1)
):
logger().warning('Load signal is assumed to be CPU invariant, which is true for recent mainline kernels, but may be wrong for {}'.format(
kernel_version,
))
df['error'] = df[signal_name] - df['simulated']
df = df.dropna()
return df
def _plot_pelt(self, task, signal_name, simulated, test_name):
trace = self.trace
kwargs = dict(interactive=False)
axis = trace.analysis.load_tracking.plot_task_signals(task, signals=[signal_name], **kwargs)
simulated.plot(ax=axis, drawstyle='steps-post', label='simulated {}'.format(signal_name))
activation_axis = axis.twinx()
trace.analysis.tasks.plot_task_activation(task, alpha=0.2, axis=activation_axis, duration=True, **kwargs)
axis.legend()
path = ArtifactPath.join(self.res_dir, '{}_{}.png'.format(test_name, signal_name))
trace.analysis.load_tracking.save_plot(axis.get_figure(), filepath=path)
def _add_cpu_metric(self, res_bundle):
freq_str = '@{}'.format(self.freq) if self.freq is not None else ''
res_bundle.add_metric("cpu", '{}{}'.format(self.cpu, freq_str))
return res_bundle
@get_simulated_pelt.used_events
def _test_behaviour(self, signal_name, error_margin_pct):
task = self.task_name
phase = self.wlgen_task.phases[0]
df = self.get_simulated_pelt(task, signal_name)
cpus = phase.cpus
assert len(cpus) == 1
cpu = cpus[0]
expected_duty_cycle_pct = phase.duty_cycle_pct
expected_final_util = expected_duty_cycle_pct / 100 * UTIL_SCALE
settling_time = pelt_settling_time(10, init=0, final=expected_final_util)
settling_time += df.index[0]
df = df[settling_time:]
# Instead of taking the mean, take the average between the min and max
# values of the settled signal. This avoids the bias introduced by the
# fact that the util signal stays high while the task sleeps
settled_signal_mean = series_tunnel_mean(df[signal_name])
expected_signal_mean = expected_final_util
signal_mean_error_pct = abs(expected_signal_mean - settled_signal_mean) / UTIL_SCALE * 100
res = ResultBundle.from_bool(signal_mean_error_pct < error_margin_pct)
res.add_metric('expected mean', expected_signal_mean)
res.add_metric('settled mean', settled_signal_mean)
res.add_metric('settled mean error', signal_mean_error_pct, '%')
self._plot_pelt(task, signal_name, df['simulated'], 'behaviour')
res = self._add_cpu_metric(res)
return res
@get_simulated_pelt.used_events
def _test_correctness(self, signal_name, mean_error_margin_pct, max_error_margin_pct):
task = self.task_name
df = self.get_simulated_pelt(task, signal_name)
abs_error = df['error'].abs()
mean_error_pct = series_mean(abs_error) / UTIL_SCALE * 100
max_error_pct = abs_error.max() / UTIL_SCALE * 100
mean_ok = mean_error_pct <= mean_error_margin_pct
max_ok = max_error_pct <= max_error_margin_pct
res = ResultBundle.from_bool(mean_ok and max_ok)
res.add_metric('actual mean', series_mean(df[signal_name]))
res.add_metric('simulated mean', series_mean(df['simulated']))
res.add_metric('mean error', mean_error_pct, '%')
res.add_metric('actual max', df[signal_name].max())
res.add_metric('simulated max', df['simulated'].max())
res.add_metric('max error', max_error_pct, '%')
self._plot_pelt(task, signal_name, df['simulated'], 'correctness')
res = self._add_cpu_metric(res)
return res
@_test_correctness.used_events
def test_util_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> ResultBundle:
"""
Check that the utilization signal is as expected.
:param mean_error_margin_pct: Maximum allowed difference in the mean of
the actual signal and the simulated one, as a percentage of utilization
scale.
:type mean_error_margin_pct: float
:param max_error_margin_pct: Maximum allowed difference between samples
of the actual signal and the simulated one, as a percentage of
utilization scale.
:type max_error_margin_pct: float
"""
return self._test_correctness(
signal_name='util',
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
@_test_correctness.used_events
def test_load_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> ResultBundle:
"""
Same as :meth:`test_util_correctness` but checking the load.
"""
return self._test_correctness(
signal_name='load',
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
@_test_behaviour.used_events
@RTATestBundle.check_noisy_tasks(noise_threshold_pct=1)
def test_util_behaviour(self, error_margin_pct=5) -> ResultBundle:
"""
Check the utilization mean is linked to the task duty cycle.
.. note:: That is not really the case, as the util of a task is not
updated when the task is sleeping, but is fairly close to reality
as long as the task period is small enough.
:param error_margin_pct: Allowed difference in percentage of
utilization scale.
:type error_margin_pct: float
"""
return self._test_behaviour('util', error_margin_pct)
@_test_behaviour.used_events
@RTATestBundle.check_noisy_tasks(noise_threshold_pct=1)
def test_load_behaviour(self, error_margin_pct=5) -> ResultBundle:
"""
Same as :meth:`test_util_behaviour` but checking the load.
"""
return self._test_behaviour('load', error_margin_pct)
class Invariance(TestBundle, LoadTrackingHelpers):
"""
Basic check for frequency invariant load and utilization tracking
This test runs the same workload on one CPU of each capacity available in
the system at a cross section of available frequencies.
This class is mostly a wrapper around :class:`InvarianceItem`,
providing a way to build a list of those for a few frequencies, and
providing aggregated versions of the tests. Calling the tests methods on
the items directly is recommended to avoid the unavoidable loss of
information when aggregating the
:class:`~lisa.tests.base.Result` of each item.
`invariance_items` instance attribute is a list of instances of
:class:`InvarianceItem`.
"""
# Make sure ftrace_conf is available so exekall can find the right settings
# when building the FtraceCollector
ftrace_conf = InvarianceItem.ftrace_conf
NR_FREQUENCIES = 8
"""
Maximum number of tested frequencies.
"""
def __init__(self, res_dir, plat_info, invariance_items):
super().__init__(res_dir, plat_info)
self.invariance_items = invariance_items
@classmethod
def _build_invariance_items(cls, target, res_dir, ftrace_coll):
"""
Yield a :class:`InvarianceItem` for a subset of target's
frequencies, for one CPU of each capacity class.
This is a generator function.
:rtype: Iterator[:class:`InvarianceItem`]
"""
plat_info = target.plat_info
def pick_cpu(filtered_class, cpu_class):
try:
return filtered_class[0]
except IndexError:
raise RuntimeError('All CPUs of one capacity class have been blacklisted: {}'.format(cpu_class))
# pick one CPU per class of capacity
cpus = [
pick_cpu(filtered_class, cpu_class)
for cpu_class, filtered_class
in zip(
plat_info['capacity-classes'],
cls.filter_capacity_classes(plat_info)
)
]
def select_freqs(cpu):
all_freqs = plat_info['freqs'][cpu]
def interpolate(start, stop, nr):
step = (stop - start) / (nr - 1)
return [start + i * step for i in range(nr)]
# Select the higher freq no matter what
selected_freqs = {max(all_freqs)}
available_freqs = set(all_freqs) - selected_freqs
nr_freqs = cls.NR_FREQUENCIES - len(selected_freqs)
for ideal_freq in interpolate(min(all_freqs), max(all_freqs), nr_freqs):
if not available_freqs:
break
# Select the freq closest to ideal
selected_freq = min(available_freqs, key=lambda freq: abs(freq - ideal_freq))
available_freqs.discard(selected_freq)
selected_freqs.add(selected_freq)
return all_freqs, sorted(selected_freqs)
cpu_freqs = {
cpu: select_freqs(cpu)
for cpu in cpus
}
logger = cls.get_logger()
logger.info('Will run on: {}'.format(
', '.join(
'CPU{}@{}'.format(cpu, freq)
for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items())
for freq in freq_list
)
))
for cpu, (all_freqs, freq_list) in sorted(cpu_freqs.items()):
for freq in freq_list:
item_dir = ArtifactPath.join(res_dir, "{prefix}_{cpu}@{freq}".format(
prefix=InvarianceItem.task_prefix,
cpu=cpu,
freq=freq,
))
os.makedirs(item_dir)
logger.info('Running experiment for CPU {}@{}'.format(cpu, freq))
yield InvarianceItem.from_target(
target, cpu=cpu, freq=freq, freq_list=all_freqs, res_dir=item_dir,
ftrace_coll=ftrace_coll,
)
def iter_invariance_items(self) -> InvarianceItem:
yield from self.invariance_items
@classmethod
def _from_target(cls, target: Target, *, res_dir: ArtifactPath = None, ftrace_coll: FtraceCollector = None) -> 'Invariance':
return cls(res_dir, target.plat_info,
list(cls._build_invariance_items(target, res_dir, ftrace_coll))
)
def get_trace(self, cpu, freq):
"""
:returns: The trace generated when running at a given frequency
"""
for item in self.invariance_items:
if item.cpu == cpu and item.freq == freq:
return item
raise ValueError('No invariance item matching {cpu}@{freq}'.format(cpu, freq))
# Combined version of some other tests, applied on all available
# InvarianceItem with the result merged.
@InvarianceItem.test_util_correctness.used_events
def test_util_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_util_correctness`
"""
def item_test(test_item):
return test_item.test_util_correctness(
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
return self._test_all_freq(item_test)
@InvarianceItem.test_load_correctness.used_events
def test_load_correctness(self, mean_error_margin_pct=2, max_error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_load_correctness`
"""
def item_test(test_item):
return test_item.test_load_correctness(
mean_error_margin_pct=mean_error_margin_pct,
max_error_margin_pct=max_error_margin_pct,
)
return self._test_all_freq(item_test)
@InvarianceItem.test_util_behaviour.used_events
def test_util_behaviour(self, error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_util_behaviour`
"""
def item_test(test_item):
return test_item.test_util_behaviour(
error_margin_pct=error_margin_pct,
)
return self._test_all_freq(item_test)
@InvarianceItem.test_load_behaviour.used_events
def test_load_behaviour(self, error_margin_pct=5) -> AggregatedResultBundle:
"""
Aggregated version of :meth:`InvarianceItem.test_load_behaviour`
"""
def item_test(test_item):
return test_item.test_load_behaviour(
error_margin_pct=error_margin_pct,
)
return self._test_all_freq(item_test)
def _test_all_freq(self, item_test):
"""
Apply the `item_test` function on all instances of
:class:`InvarianceItem` and aggregate the returned
:class:`~lisa.tests.base.ResultBundle` into one.
:attr:`~lisa.tests.base.Result.UNDECIDED` is ignored.
"""
item_res_bundles = [
item_test(item)
for item in self.invariance_items
]
return AggregatedResultBundle(item_res_bundles, 'cpu')
@InvarianceItem.test_util_behaviour.used_events
def test_cpu_invariance(self) -> AggregatedResultBundle:
"""
Check that items using the max freq on each CPU is passing util avg test.
There could be false positives, but they are expected to be relatively
rare.
.. seealso:: :class:`InvarianceItem.test_util_behaviour`
"""
res_list = []
for cpu, item_group in groupby(self.invariance_items, key=lambda x: x.cpu):
item_group = list(item_group)
# combine all frequencies of that CPU class, although they should
# all be the same
max_freq = max(itertools.chain.from_iterable(
x.freq_list for x in item_group
))
max_freq_items = [
item
for item in item_group
if item.freq == max_freq
]
for item in max_freq_items:
# Only test util, as it should be more robust
res = item.test_util_behaviour()
res_list.append(res)
return AggregatedResultBundle(res_list, 'cpu')
@InvarianceItem.test_util_behaviour.used_events
def test_freq_invariance(self) -> ResultBundle:
"""
Check that at least one CPU has items passing for all tested frequencies.
.. seealso:: :class:`InvarianceItem.test_util_behaviour`
"""
logger = self.get_logger()
def make_group_bundle(cpu, item_group):
bundle = AggregatedResultBundle(
[
# Only test util, as it should be more robust
item.test_util_behaviour()
for item in item_group
],
# each item's "cpu" metric also contains the frequency
name_metric='cpu',
)
# At that level, we only report the CPU, since nested bundles cover
# different frequencies
bundle.add_metric('cpu', cpu)
logger.info('Util avg invariance {res} for CPU {cpu}'.format(
res=bundle.result.lower_name,
cpu=cpu,
))
return bundle
group_result_bundles = [
make_group_bundle(cpu, item_group)
for cpu, item_group in groupby(self.invariance_items, key=lambda x: x.cpu)
]
# The combination differs from the AggregatedResultBundle default one:
# we consider as passed as long as at least one of the group has
# passed, instead of forcing all of them to pass.
if any(result_bundle.result is Result.PASSED for result_bundle in group_result_bundles):
overall_result = Result.PASSED
elif all(result_bundle.result is Result.UNDECIDED for result_bundle in group_result_bundles):
overall_result = Result.UNDECIDED
else:
overall_result = Result.FAILED
return AggregatedResultBundle(
group_result_bundles,
name_metric='cpu',
result=overall_result
)
class CPUMigrationBase(LoadTrackingBase):
"""
Base class for migration-related load tracking tests
The idea here is to run several rt-app tasks and to have them pinned to
a single CPU for a single phase. They can change CPUs in a new phase,
and we can then inspect the CPU utilization - it should match the
sum of the utilization of all the tasks running on it.
**Design notes:**
Since we sum up the utilization of each task, make sure not to overload the
CPU - IOW, there should always be some idle cycles.
The code assumes all tasks have the same number of phases, and that those
phases are all aligned.
"""
PHASE_DURATION_S = 3 * UTIL_CONVERGENCE_TIME_S
"""
The duration of a single phase
"""
TASK_PERIOD_MS = 16
"""
The average value of the runqueue PELT signals is very dependent on the task
period, so it's important to set it to a known validate value in that class.
"""
@abc.abstractmethod
def get_nr_required_cpu(cls, plat_info):
"""
The number of CPUs of same capacity involved in the test
"""
pass
@classmethod
def run_rtapp(cls, target, res_dir, profile, ftrace_coll, cgroup=None):
# Just do some validation on the profile
for name, task in profile.items():
for phase in task.phases:
if len(phase.cpus) != 1:
raise RuntimeError("Each phase must be tied to a single CPU. "
"Task \"{}\" violates this".format(name))
super().run_rtapp(target, res_dir, profile, ftrace_coll, cgroup)
@property
def cpus(self):
"""
All CPUs used by RTapp workload.
"""
return set(itertools.chain.from_iterable(
phase.cpus
for task in self.rtapp_profile.values()
for phase in task.phases
))
@classmethod
def check_from_target(cls, target):
super().check_from_target(target)
try:
target.plat_info["cpu-capacities"]
except KeyError as e:
raise CannotCreateError(str(e))
# Check that there are enough CPUs of the same capacity
cls.get_migration_cpus(target.plat_info)
@classmethod
def get_migration_cpus(cls, plat_info):
"""
:returns: N CPUs of same capacity, with N set by :meth:`get_nr_required_cpu`.
"""
# Iterate over descending CPU capacity groups
nr_required_cpu = cls.get_nr_required_cpu(plat_info)
for cpus in reversed(plat_info["capacity-classes"]):
if len(cpus) >= nr_required_cpu:
return cpus[:nr_required_cpu]
raise CannotCreateError(
"This workload requires {} CPUs of identical capacity".format(
nr_required_cpu))
def get_expected_cpu_util(self):
"""
Get the per-phase average CPU utilization expected from the rtapp profile
:returns: A dict of the shape {cpu : {phase_id : expected_util}}
"""
cpu_util = {}
for task in self.rtapp_profile.values():
for phase_id, phase in enumerate(task.phases):
cpu = phase.cpus[0]
cpu_util.setdefault(cpu, {}).setdefault(phase_id, 0)
cpu_util[cpu][phase_id] += UTIL_SCALE * (phase.duty_cycle_pct / 100)
return cpu_util
@property
def reference_task(self):
return list(self.rtapp_profile.values())[0]
@LoadTrackingAnalysis.df_cpus_signal.used_events
def get_trace_cpu_util(self):
"""
Get the per-phase average CPU utilization read from the trace
:returns: A dict of the shape {cpu : {phase_id : trace_util}}
"""
df = self.trace.analysis.load_tracking.df_cpus_signal('util')
phase_start = self.trace.start
cpu_util = {}
for i, phase in enumerate(self.reference_task.phases):
# Start looking at signals once they should've converged
start = phase_start + UTIL_CONVERGENCE_TIME_S
# Trim the end a bit, otherwise we could have one or two events
# from the next phase
end = phase_start + phase.duration_s * .9
phase_df = df[start:end]
for cpu in self.cpus:
util = phase_df[phase_df.cpu == cpu].util
cpu_util.setdefault(cpu, {})[i] = series_tunnel_mean(util)
phase_start += phase.duration_s
return cpu_util
@LoadTrackingAnalysis.plot_task_signals.used_events
def _plot_util(self):
analysis = self.trace.analysis.load_tracking
fig, axes = analysis.setup_plot(nrows=len(self.rtapp_tasks))
for task, axis in zip(self.rtapp_tasks, axes):
analysis.plot_task_signals(task, signals=['util'], axis=axis)
self.trace.analysis.rta.plot_phases(task, axis=axis)
filepath = ArtifactPath.join(self.res_dir, 'tasks_util.png')
analysis.save_plot(fig, filepath=filepath)
filepath = ArtifactPath.join(self.res_dir, 'cpus_util.png')
cpus = sorted(self.cpus)
analysis.plot_cpus_signals(cpus, signals=['util'], filepath=filepath)
@get_trace_cpu_util.used_events
@_plot_util.used_events
@RTATestBundle.check_noisy_tasks(noise_threshold_pct=1)
def test_util_task_migration(self, allowed_error_pct=5) -> ResultBundle:
"""
Test that a migrated task properly propagates its utilization at the CPU level
:param allowed_error_pct: How much the trace averages can stray from the
expected values
:type allowed_error_pct: float
"""
expected_cpu_util = self.get_expected_cpu_util()
trace_cpu_util = self.get_trace_cpu_util()
passed = True
expected_metrics = {}
trace_metrics = {}
deltas = {}
for cpu in self.cpus:
cpu_str = "cpu{}".format(cpu)
expected_metrics[cpu_str] = TestMetric({})
trace_metrics[cpu_str] = TestMetric({})
deltas[cpu_str] = TestMetric({})
for i, phase in enumerate(self.reference_task.phases):
expected_util = expected_cpu_util[cpu][i]
trace_util = trace_cpu_util[cpu][i]
if not self.is_almost_equal(
expected_util,
trace_util,
allowed_error_pct):
passed = False
# Just some verbose metric collection...
phase_str = "phase{}".format(i)
delta = 100 * (trace_util - expected_util) / expected_util
expected_metrics[cpu_str].data[phase_str] = TestMetric(expected_util)
trace_metrics[cpu_str].data[phase_str] = TestMetric(trace_util)
deltas[cpu_str].data[phase_str] = TestMetric(delta, "%")
res = ResultBundle.from_bool(passed)
res.add_metric("Expected utilization", expected_metrics)
res.add_metric("Trace utilization", trace_metrics)
res.add_metric("Utilization deltas", deltas)
self._plot_util()
return res
class OneTaskCPUMigration(CPUMigrationBase):
"""
Some tasks on two big CPUs, one of them migrates in its second phase.
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
return 2
@classmethod
def get_rtapp_profile(cls, plat_info):
profile = {}
cpus = cls.get_migration_cpus(plat_info)
for task in ["migr", "static0", "static1"]:
# An empty RTATask just to sum phases up
profile[task] = RTATask()
common_phase_settings = dict(
duration_s=cls.PHASE_DURATION_S,
period_ms=cls.TASK_PERIOD_MS,
)
for cpu in cpus:
# A task that will migrate to another CPU
profile["migr"] += Periodic(
duty_cycle_pct=cls.unscaled_utilization(plat_info, cpu, 20),
cpus=[cpu], **common_phase_settings)
# Just some tasks that won't move to get some background utilization
profile["static0"] += Periodic(
duty_cycle_pct=cls.unscaled_utilization(plat_info, cpus[0], 30),
cpus=[cpus[0]], **common_phase_settings)
profile["static1"] += Periodic(
duty_cycle_pct=cls.unscaled_utilization(plat_info, cpus[1], 20),
cpus=[cpus[1]], **common_phase_settings)
return profile
class NTasksCPUMigrationBase(CPUMigrationBase):
"""
N tasks on N CPUs, with all the migration permutations.
"""
@classmethod
def get_rtapp_profile(cls, plat_info):
cpus = cls.get_migration_cpus(plat_info)
def make_name(i): return 'migr{}'.format(i)
nr_tasks = len(cpus)
profile = {
make_name(i): RTATask()
for i in range(nr_tasks)
}
# Define one task per CPU, and create all the possible migrations by
# shuffling around these tasks
for cpus_combi in itertools.permutations(cpus, r=nr_tasks):
for i, cpu in enumerate(cpus_combi):
profile[make_name(i)] += Periodic(
duty_cycle_pct=cls.unscaled_utilization(plat_info, cpu, 50),
duration_s=cls.PHASE_DURATION_S,
period_ms=cls.TASK_PERIOD_MS,
cpus=[cpu],
)
return profile
class TwoTasksCPUMigration(NTasksCPUMigrationBase):
"""
Two tasks on two big CPUs, swap their CPU in the second phase
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
return 2
class NTasksCPUMigration(NTasksCPUMigrationBase):
"""
N tasks on N CPUs, and try all permutations of tasks and CPUs.
"""
@classmethod
def get_nr_required_cpu(cls, plat_info):
"""
Select the maximum number of CPUs the tests can handle.
"""
return max(len(cpus) for cpus in plat_info["capacity-classes"])
def test_util_task_migration(self, allowed_error_pct=8) -> ResultBundle:
"""
Relax the margins compared to the super-class version.
"""
return super().test_util_task_migration(
allowed_error_pct=allowed_error_pct,
)
# vim :set tabstop=4 shiftwidth=4 textwidth=80 expandtab
| python |
from __future__ import absolute_import, division, print_function, unicode_literals
from typing import List
import tempfile
from io import StringIO
import pandas as pd
from Bio import AlignIO, SeqIO, Phylo
from Bio.Align import MultipleSeqAlignment
from Bio.Align.Applications import ClustalOmegaCommandline
from .AlignCommandline import MafftCommandline
from .SeqLike import SeqLikeType, SeqLike
def pad_seq_records_for_alignment(seqs: List[SeqLikeType]):
"""Pad sequences so that lengths match for multiple sequence alignment.
:param seqs: a list of SeqLikeType
:returns: a MultipleSeqAlignment object
"""
df = pd.DataFrame({"seqs": [SeqLike(seq, seq_type="aa") for seq in seqs]})
return df.seqs.seq.as_alignment()
def _generic_aligner_commandline_stdout(cline, **kwargs):
"""Execute aligner commandline that writes to stdout and return an alignment. Helper function.
:param cline: a subprocess object from Bio.Align.Applications.AbstractCommandline
:param **kwargs: additional arguments for alignment command
:returns: a MultipleSeqAlignment object
"""
stdout, _ = cline()
try:
stdout = StringIO(stdout)
except TypeError:
stdout = StringIO(unicode(stdout, "utf-8"))
return AlignIO.read(stdout, "fasta", **kwargs)
def _generic_aligner_commandline_file(cline, seqrecs, **kwargs):
"""Execute aligner commandline that requires file i/o and return an alignment. Helper function.
:param cline: a subprocess object from Bio.Align.Applications.AbstractCommandline
:param seqrecs: a list of SeqRecord that will be aligned
:param **kwargs: additional arguments for alignment command
:returns: a MultipleSeqAlignment object
"""
assert len(seqrecs) > 1, "Need more than 1 sequence for alignment."
# build alignment object 'unaligned'; pad seqrecs to be equal length
unaligned = pad_seq_records_for_alignment(seqrecs)
# execute alignment
with tempfile.NamedTemporaryFile(delete=False, mode="w") as tempf:
AlignIO.write(unaligned, tempf, "fasta")
tempf.flush()
return cline(tempf, **kwargs)
def _generic_alignment(cline, seqrecs, preserve_order=True, **kwargs):
"""Align sequences using command line stored as cline. Helper function.
:param cline: a subprocess object from Bio.Align.Applications.AbstractCommandline
:param seqrecs: an iterator of SeqRecord that will be aligned
:param preserve_order: if True, reorder aligned seqrecs to match input order.
:param **kwargs: additional arguments for alignment command
:returns: a MultipleSeqAlignment object with aligned sequences
"""
# convert iterator to list, so that we can extract keys and still run the alignment
unaligned = list(seqrecs)
# if alignment sequences from NCBI Blast, id will include spaces
keys = [seqrec.id.split()[0] for seqrec in unaligned]
# execute alignment
aligned = _generic_aligner_commandline_file(cline, unaligned, **kwargs)
if preserve_order:
aligned = SeqIO.to_dict(aligned)
aligned = MultipleSeqAlignment(aligned[key] for key in keys)
# make all alignment uppercase
return MultipleSeqAlignment([seqrec.upper() for seqrec in aligned])
def mafft_alignment(seqrecs, preserve_order=True, **kwargs):
"""Align sequences using MAFFT.
:param seqrecs: a list or dict of SeqRecord that will be aligned to ref
:param preserve_order: if True, reorder aligned seqrecs to match input order.
:param **kwargs: additional arguments for alignment command
:returns: a MultipleSeqAlignment object with aligned sequences
:sa: https://mafft.cbrc.jp/alignment/software/
"""
def commandline(file_obj, **kwargs):
cline = MafftCommandline(input=file_obj.name, **kwargs)
return _generic_aligner_commandline_stdout(cline)
# MAFFT does not reorder alignment by default (reorder=False), but don't overwrite 'reorder' if set
if "reorder" not in kwargs:
kwargs["reorder"] = not preserve_order
return _generic_alignment(commandline, seqrecs, preserve_order=preserve_order, **kwargs)
def clustal_omega_alignment(seqrecs, preserve_order=True, **kwargs):
"""Align sequences using Clustal Omega
:param seqrecs: a list or dict of SeqRecord that will be aligned to ref
:param preserve_order: if True, reorder aligned seqrecs to match input order.
:param **kwargs: additional arguments for alignment command
:returns: a MultipleSeqAlignment object with aligned sequences
"""
if preserve_order:
outputorder = "input-order"
else:
outputorder = "tree-order"
def commandline(file_obj, **kwargs):
cline = ClustalOmegaCommandline("clustalo", infile=file_obj.name, outputorder=outputorder, **kwargs)
return _generic_aligner_commandline_stdout(cline)
return _generic_alignment(commandline, seqrecs, **kwargs)
def clustal_omega_distance_matrix(seqrecs, **kwargs):
"""Generate a distance matrix using Clustal Omega
:param seqrecs: a list or dict of SeqRecord that will be aligned to ref
:param **kwargs: additional arguments for command line alignment
:returns: the pairwise distance matrix
"""
def commandline(ft, **kwargs):
with tempfile.NamedTemporaryFile(delete=False, mode="w") as ft_out:
cline = ClustalOmegaCommandline(
"clustalo",
infile=ft.name,
force=True,
distmat_out=ft_out.name,
distmat_full=True,
distmat_full_iter=True,
)
stdout, stderr = cline()
df = pd.read_csv(ft_out.name, delim_whitespace=True, skiprows=1, header=None, index_col=0)
df.index.name = "seqid"
return df
return _generic_aligner_commandline_file(commandline, seqrecs, **kwargs)
def clustal_omega_alignment_tree(seqrecs, **kwargs):
"""Generate phylogenetic tree using Clustal Omega and scikit-bio Neighbor Joining
This function computes a distance matrix using Clustal Omega, which skbio.tree.nj
uses to generate a newick file. Bio.Phylo can read this newick file.
Note: this function requires scikit-bio.
:param seqrecs: a list or dict of SeqRecord that will be aligned to ref
:param **kwargs: additional arguments for alignment command
:returns: a Bio.Phylo phylogenetic tree object
:sa: https://biopython.org/wiki/Phylo
:sa: http://scikit-bio.org/docs/0.2.1/generated/skbio.tree.nj.html
"""
import skbio
def skbio2phylo(treenode, format="newick"):
"""Convert skbio.tree.TreeNode object to Bio.Phylo.Newick.Tree object
:param treenode: an skbio.tree.TreeNode object
:param format: kind of tree, AKA New Hampshire Format
:returns: an equivalent Bio.Phylo.Newick.Tree object
:sa: https://biopython.org/docs/1.74/api/Bio.Phylo.Newick.html
"""
with tempfile.NamedTemporaryFile(delete=True, mode="w") as tempf:
treenode.write(tempf.name, format)
tempf.flush()
return Phylo.read(tempf.name, format)
distance_matrix = clustal_omega_distance_matrix(seqrecs, **kwargs)
ids = [s.id for s in seqrecs]
skbio_tree = skbio.tree.nj(skbio.DistanceMatrix(distance_matrix, ids))
return skbio2phylo(skbio_tree)
def clustalw_alignment_tree(seqrecs, **kwargs):
"""Generate phylogenetic tree using ClustalW. Note that ClustalW is an older
generation of Clustal aligner compared to Clustal Omega. It is considered to
be slower and less robust for aligning large sequence sets, but it is included here
because it does not require scikit-bio.
:param seqrecs: a list or dict of SeqRecord that will be aligned to ref
:param **kwargs: additional arguments for alignment command
:returns: the phylogenetic tree instead of the alignment object
"""
def commandline(ft, **kwargs):
with tempfile.NamedTemporaryFile(delete=False, mode="w") as ft_out:
cline = ClustalwCommandline(infile=ft.name, output="fasta", newtree=ft_out.name)
stdout, stderr = cline()
return Phylo.read(ft_out.name, "newick")
return _generic_alignment(commandline, seqrecs, preserve_order=False, **kwargs)
| python |
import sys
def input():
return sys.stdin.readline()[:-1]
N, M = map(int, input().split())
tree = [[] for i in range(N)]
for _ in range(M):
a, b = map(int, input().split())
a -= 1
b -= 1
tree[a].append(b)
tree[b].append(a)
ans = [[] for i in range(N)]
for target_no in range(N):
for t_friend_no in tree[target_no]:
for t_friend_friend_no in tree[t_friend_no]:
if target_no == t_friend_friend_no:
continue
if t_friend_friend_no in tree[target_no]:
continue
if t_friend_friend_no in ans[target_no]:
continue
ans[target_no].append(t_friend_friend_no)
for a in ans:
print(len(a))
| python |
import asyncio
import re
from functools import partial
import pytest
from dagster import (
AssetKey,
AssetMaterialization,
AssetObservation,
DynamicOut,
DynamicOutput,
DynamicOutputDefinition,
ExpectationResult,
Failure,
Field,
In,
InputDefinition,
Materialization,
Noneable,
Nothing,
Out,
Output,
OutputDefinition,
RetryRequested,
Selector,
build_op_context,
build_solid_context,
composite_solid,
execute_solid,
op,
pipeline,
resource,
solid,
)
from dagster.core.errors import (
DagsterInvalidConfigError,
DagsterInvalidDefinitionError,
DagsterInvalidInvocationError,
DagsterInvalidPropertyError,
DagsterInvariantViolationError,
DagsterResourceFunctionError,
DagsterStepOutputNotFoundError,
DagsterTypeCheckDidNotPass,
)
def test_solid_invocation_no_arg():
@solid
def basic_solid():
return 5
result = basic_solid()
assert result == 5
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'basic_solid' has no context "
"argument, but context was provided when invoking.",
):
basic_solid(build_solid_context())
# Ensure alias is accounted for in error message
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'aliased_basic_solid' has no context "
"argument, but context was provided when invoking.",
):
basic_solid.alias("aliased_basic_solid")(build_solid_context())
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'basic_solid'. This may be "
"because an argument was provided for the context parameter, but no context parameter was "
"defined for the solid.",
):
basic_solid(None)
# Ensure alias is accounted for in error message
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'aliased_basic_solid'. This may be "
"because an argument was provided for the context parameter, but no context parameter was "
"defined for the solid.",
):
basic_solid.alias("aliased_basic_solid")(None)
def test_solid_invocation_none_arg():
@solid
def basic_solid(_):
return 5
result = basic_solid(None)
assert result == 5
def test_solid_invocation_context_arg():
@solid
def basic_solid(context):
context.log.info("yay")
basic_solid(None)
basic_solid(build_solid_context())
basic_solid(context=None)
basic_solid(context=build_solid_context())
def test_solid_invocation_empty_run_config():
@solid
def basic_solid(context):
assert context.run_config is not None
assert context.run_config == {"resources": {}}
basic_solid(context=build_solid_context())
def test_solid_invocation_run_config_with_config():
@solid(config_schema={"foo": str})
def basic_solid(context):
assert context.run_config
assert context.run_config["solids"] == {"basic_solid": {"config": {"foo": "bar"}}}
basic_solid(build_solid_context(solid_config={"foo": "bar"}))
def test_solid_invocation_out_of_order_input_defs():
@solid(input_defs=[InputDefinition("x"), InputDefinition("y")])
def check_correct_order(y, x):
assert y == 6
assert x == 5
check_correct_order(6, 5)
check_correct_order(x=5, y=6)
check_correct_order(6, x=5)
def test_solid_invocation_with_resources():
@solid(required_resource_keys={"foo"})
def solid_requires_resources(context):
assert context.resources.foo == "bar"
return context.resources.foo
# Ensure that a check invariant is raise when we attempt to invoke without context
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'solid_requires_resources' has context argument, but no "
"context was provided when invoking.",
):
solid_requires_resources()
# Ensure that alias is accounted for in error message
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'aliased_solid_requires_resources' has context argument, but no "
"context was provided when invoking.",
):
solid_requires_resources.alias("aliased_solid_requires_resources")()
# Ensure that error is raised when we attempt to invoke with a None context
with pytest.raises(
DagsterInvalidInvocationError,
match='solid "solid_requires_resources" has required resources, but no context was '
"provided.",
):
solid_requires_resources(None)
# Ensure that error is raised when we attempt to invoke with a context without the required
# resource.
context = build_solid_context()
with pytest.raises(
DagsterInvalidInvocationError,
match='solid "solid_requires_resources" requires resource "foo", but no resource '
"with that key was found on the context.",
):
solid_requires_resources(context)
context = build_solid_context(resources={"foo": "bar"})
assert solid_requires_resources(context) == "bar"
def test_solid_invocation_with_cm_resource():
teardown_log = []
@resource
def cm_resource(_):
try:
yield "foo"
finally:
teardown_log.append("collected")
@solid(required_resource_keys={"cm_resource"})
def solid_requires_cm_resource(context):
return context.resources.cm_resource
# Attempt to use solid context as fxn with cm resource should fail
context = build_solid_context(resources={"cm_resource": cm_resource})
with pytest.raises(DagsterInvariantViolationError):
solid_requires_cm_resource(context)
del context
assert teardown_log == ["collected"]
# Attempt to use solid context as cm with cm resource should succeed
with build_solid_context(resources={"cm_resource": cm_resource}) as context:
assert solid_requires_cm_resource(context) == "foo"
assert teardown_log == ["collected", "collected"]
def test_solid_invocation_with_config():
@solid(config_schema={"foo": str})
def solid_requires_config(context):
assert context.solid_config["foo"] == "bar"
return 5
# Ensure that error is raised when attempting to execute and no context is provided
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'solid_requires_config' has context argument, but no "
"context was provided when invoking.",
):
solid_requires_config()
# Ensure that alias is accounted for in error message
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'aliased_solid_requires_config' has context argument, but no "
"context was provided when invoking.",
):
solid_requires_config.alias("aliased_solid_requires_config")()
# Ensure that error is raised when we attempt to invoke with a None context
with pytest.raises(
DagsterInvalidInvocationError,
match='solid "solid_requires_config" has required config schema, but no context was '
"provided.",
):
solid_requires_config(None)
# Ensure that error is raised when context does not have the required config.
context = build_solid_context()
with pytest.raises(
DagsterInvalidConfigError,
match="Error in config for solid",
):
solid_requires_config(context)
# Ensure that error is raised when attempting to execute and no context is provided, even when
# configured
with pytest.raises(
DagsterInvalidInvocationError,
match="Compute function of solid 'configured_solid' has context argument, but no "
"context was provided when invoking.",
):
solid_requires_config.configured({"foo": "bar"}, name="configured_solid")()
# Ensure that if you configure the solid, you can provide a none-context.
result = solid_requires_config.configured({"foo": "bar"}, name="configured_solid")(None)
assert result == 5
result = solid_requires_config(build_solid_context(solid_config={"foo": "bar"}))
assert result == 5
def test_solid_invocation_default_config():
@solid(config_schema={"foo": Field(str, is_required=False, default_value="bar")})
def solid_requires_config(context):
assert context.solid_config["foo"] == "bar"
return context.solid_config["foo"]
assert solid_requires_config(None) == "bar"
@solid(config_schema=Field(str, is_required=False, default_value="bar"))
def solid_requires_config_val(context):
assert context.solid_config == "bar"
return context.solid_config
assert solid_requires_config_val(None) == "bar"
@solid(
config_schema={
"foo": Field(str, is_required=False, default_value="bar"),
"baz": str,
}
)
def solid_requires_config_partial(context):
assert context.solid_config["foo"] == "bar"
assert context.solid_config["baz"] == "bar"
return context.solid_config["foo"] + context.solid_config["baz"]
assert (
solid_requires_config_partial(build_solid_context(solid_config={"baz": "bar"})) == "barbar"
)
def test_solid_invocation_dict_config():
@solid(config_schema=dict)
def solid_requires_dict(context):
assert context.solid_config == {"foo": "bar"}
return context.solid_config
assert solid_requires_dict(build_solid_context(solid_config={"foo": "bar"})) == {"foo": "bar"}
@solid(config_schema=Noneable(dict))
def solid_noneable_dict(context):
return context.solid_config
assert solid_noneable_dict(build_solid_context()) is None
assert solid_noneable_dict(None) is None
def test_solid_invocation_kitchen_sink_config():
@solid(
config_schema={
"str_field": str,
"int_field": int,
"list_int": [int],
"list_list_int": [[int]],
"dict_field": {"a_string": str},
"list_dict_field": [{"an_int": int}],
"selector_of_things": Selector(
{"select_list_dict_field": [{"an_int": int}], "select_int": int}
),
"optional_list_of_optional_string": Noneable([Noneable(str)]),
}
)
def kitchen_sink(context):
return context.solid_config
solid_config_one = {
"str_field": "kjf",
"int_field": 2,
"list_int": [3],
"list_list_int": [[1], [2, 3]],
"dict_field": {"a_string": "kdjfkd"},
"list_dict_field": [{"an_int": 2}, {"an_int": 4}],
"selector_of_things": {"select_int": 3},
"optional_list_of_optional_string": ["foo", None],
}
assert kitchen_sink(build_solid_context(solid_config=solid_config_one)) == solid_config_one
def test_solid_with_inputs():
@solid
def solid_with_inputs(x, y):
assert x == 5
assert y == 6
return x + y
assert solid_with_inputs(5, 6) == 11
assert solid_with_inputs(x=5, y=6) == 11
assert solid_with_inputs(5, y=6) == 11
assert solid_with_inputs(y=6, x=5) == 11
# Check for proper error when incorrect number of inputs is provided.
with pytest.raises(
DagsterInvalidInvocationError, match='No value provided for required input "y".'
):
solid_with_inputs(5)
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'solid_with_inputs'",
):
solid_with_inputs(5, 6, 7)
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'solid_with_inputs'",
):
solid_with_inputs(5, 6, z=7)
# Check for proper error when incorrect number of inputs is provided.
with pytest.raises(
DagsterInvalidInvocationError, match='No value provided for required input "y".'
):
solid_with_inputs(5, x=5)
def test_failing_solid():
@solid
def solid_fails():
raise Exception("Oh no!")
with pytest.raises(
Exception,
match="Oh no!",
):
solid_fails()
def test_attempted_invocation_in_composition():
@solid
def basic_solid(_x):
pass
msg = (
"Must pass the output from previous node invocations or inputs to the composition "
"function as inputs when invoking nodes during composition."
)
with pytest.raises(
DagsterInvalidDefinitionError,
match=msg,
):
@pipeline
def _pipeline_will_fail():
basic_solid(5)
with pytest.raises(
DagsterInvalidDefinitionError,
match=msg,
):
@pipeline
def _pipeline_will_fail_again():
basic_solid(_x=5)
def test_async_solid():
@solid
async def aio_solid():
await asyncio.sleep(0.01)
return "done"
loop = asyncio.get_event_loop()
assert loop.run_until_complete(aio_solid()) == "done"
def test_async_gen_invocation():
@solid
async def aio_gen(_):
await asyncio.sleep(0.01)
yield Output("done")
context = build_solid_context()
async def get_results():
res = []
async for output in aio_gen(context):
res.append(output)
return res
loop = asyncio.get_event_loop()
output = loop.run_until_complete(get_results())[0]
assert output.value == "done"
def test_multiple_outputs_iterator():
@solid(output_defs=[OutputDefinition(int, name="1"), OutputDefinition(int, name="2")])
def solid_multiple_outputs():
yield Output(2, output_name="2")
yield Output(1, output_name="1")
# Ensure that solid works both with execute_solid and invocation
result = execute_solid(solid_multiple_outputs)
assert result.success
outputs = list(solid_multiple_outputs())
assert outputs[0].value == 2
assert outputs[1].value == 1
def test_wrong_output():
@solid
def solid_wrong_output():
return Output(5, output_name="wrong_name")
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape(
'Core compute for solid "solid_wrong_output" returned an output "wrong_name" that does '
"not exist. The available outputs are ['result']"
),
):
execute_solid(solid_wrong_output)
with pytest.raises(
DagsterInvariantViolationError,
match=re.escape(
'Invocation of solid "solid_wrong_output" returned an output "wrong_name" that does '
"not exist. The available outputs are ['result']"
),
):
solid_wrong_output()
def test_optional_output_return():
@solid(
output_defs=[
OutputDefinition(int, name="1", is_required=False),
OutputDefinition(int, name="2"),
]
)
def solid_multiple_outputs_not_sent():
return Output(2, output_name="2")
assert solid_multiple_outputs_not_sent().value == 2
def test_optional_output_yielded():
@solid(
output_defs=[
OutputDefinition(int, name="1", is_required=False),
OutputDefinition(int, name="2"),
]
)
def solid_multiple_outputs_not_sent():
yield Output(2, output_name="2")
assert list(solid_multiple_outputs_not_sent())[0].value == 2
def test_optional_output_yielded_async():
@solid(
output_defs=[
OutputDefinition(int, name="1", is_required=False),
OutputDefinition(int, name="2"),
]
)
async def solid_multiple_outputs_not_sent():
yield Output(2, output_name="2")
async def get_results():
res = []
async for output in solid_multiple_outputs_not_sent():
res.append(output)
return res
loop = asyncio.get_event_loop()
output = loop.run_until_complete(get_results())[0]
assert output.value == 2
def test_missing_required_output_generator():
# Test missing required output from a generator solid
@solid(output_defs=[OutputDefinition(int, name="1"), OutputDefinition(int, name="2")])
def solid_multiple_outputs_not_sent():
yield Output(2, output_name="2")
with pytest.raises(
DagsterStepOutputNotFoundError,
match='Core compute for solid "solid_multiple_outputs_not_sent" did not return an output '
'for non-optional output "1"',
):
execute_solid(solid_multiple_outputs_not_sent)
with pytest.raises(
DagsterInvariantViolationError,
match="Invocation of solid 'solid_multiple_outputs_not_sent' did not return an output "
"for non-optional output '1'",
):
list(solid_multiple_outputs_not_sent())
def test_missing_required_output_generator_async():
# Test missing required output from an async generator solid
@solid(output_defs=[OutputDefinition(int, name="1"), OutputDefinition(int, name="2")])
async def solid_multiple_outputs_not_sent():
yield Output(2, output_name="2")
with pytest.raises(
DagsterStepOutputNotFoundError,
match='Core compute for solid "solid_multiple_outputs_not_sent" did not return an output '
'for non-optional output "1"',
):
execute_solid(solid_multiple_outputs_not_sent)
async def get_results():
res = []
async for output in solid_multiple_outputs_not_sent():
res.append(output)
return res
loop = asyncio.get_event_loop()
with pytest.raises(
DagsterInvariantViolationError,
match="Invocation of solid 'solid_multiple_outputs_not_sent' did not return an output "
"for non-optional output '1'",
):
loop.run_until_complete(get_results())
def test_missing_required_output_return():
@solid(output_defs=[OutputDefinition(int, name="1"), OutputDefinition(int, name="2")])
def solid_multiple_outputs_not_sent():
return Output(2, output_name="2")
with pytest.raises(
DagsterStepOutputNotFoundError,
match='Core compute for solid "solid_multiple_outputs_not_sent" did not return an output '
'for non-optional output "1"',
):
execute_solid(solid_multiple_outputs_not_sent)
with pytest.raises(
DagsterInvariantViolationError,
match="Invocation of solid 'solid_multiple_outputs_not_sent' did not return an output "
"for non-optional output '1'",
):
solid_multiple_outputs_not_sent()
def test_output_sent_multiple_times():
@solid(output_defs=[OutputDefinition(int, name="1")])
def solid_yields_twice():
yield Output(1, "1")
yield Output(2, "1")
with pytest.raises(
DagsterInvariantViolationError,
match='Compute for solid "solid_yields_twice" returned an output "1" multiple times',
):
execute_solid(solid_yields_twice)
with pytest.raises(
DagsterInvariantViolationError,
match="Invocation of solid 'solid_yields_twice' yielded an output '1' multiple times",
):
list(solid_yields_twice())
@pytest.mark.parametrize(
"property_or_method_name,val_to_pass",
[
("pipeline_run", None),
("step_launcher", None),
("pipeline_def", None),
("pipeline_name", None),
("mode_def", None),
("solid_handle", None),
("solid", None),
("get_step_execution_context", None),
],
)
def test_invalid_properties_on_context(property_or_method_name, val_to_pass):
@solid
def solid_fails_getting_property(context):
result = getattr(context, property_or_method_name)
# for the case where property_or_method_name is a method, getting an attribute won't cause
# an error, but invoking the method should.
result(val_to_pass) if val_to_pass else result() # pylint: disable=expression-not-assigned
with pytest.raises(DagsterInvalidPropertyError):
solid_fails_getting_property(None)
def test_solid_retry_requested():
@solid
def solid_retries():
raise RetryRequested()
with pytest.raises(RetryRequested):
solid_retries()
def test_solid_failure():
@solid
def solid_fails():
raise Failure("oops")
with pytest.raises(Failure, match="oops"):
solid_fails()
def test_yielded_asset_materialization():
@solid
def solid_yields_materialization(_):
yield AssetMaterialization(asset_key=AssetKey(["fake"]))
yield Output(5)
yield AssetMaterialization(asset_key=AssetKey(["fake2"]))
events = list(solid_yields_materialization(None))
outputs = [event for event in events if isinstance(event, Output)]
assert outputs[0].value == 5
materializations = [
materialization
for materialization in events
if isinstance(materialization, AssetMaterialization)
]
assert len(materializations) == 2
def test_input_type_check():
@solid(input_defs=[InputDefinition("x", dagster_type=int)])
def solid_takes_input(x):
return x + 1
assert solid_takes_input(5) == 6
with pytest.raises(
DagsterTypeCheckDidNotPass,
match='Description: Value "foo" of python type "str" must be a int.',
):
solid_takes_input("foo")
def test_output_type_check():
@solid(output_defs=[OutputDefinition(dagster_type=int)])
def wrong_type():
return "foo"
with pytest.raises(
DagsterTypeCheckDidNotPass,
match='Description: Value "foo" of python type "str" must be a int.',
):
wrong_type()
def test_pending_node_invocation():
@solid
def basic_solid_to_hook():
return 5
assert basic_solid_to_hook.with_hooks(set())() == 5
@solid
def basic_solid_with_tag(context):
assert context.has_tag("foo")
return context.get_tag("foo")
assert basic_solid_with_tag.tag({"foo": "bar"})(None) == "bar"
def test_composite_solid_invocation_out_of_composition():
@solid
def basic_solid():
return 5
@composite_solid
def composite():
basic_solid()
with pytest.raises(
DagsterInvariantViolationError,
match="Attempted to call composite solid "
"'composite' outside of a composition function. Invoking composite solids is only valid in a "
"function decorated with @pipeline or @composite_solid.",
):
composite()
def test_pipeline_invocation():
@pipeline
def basic_pipeline():
pass
with pytest.raises(
DagsterInvariantViolationError,
match="Attempted to call pipeline "
"'basic_pipeline' directly. Pipelines should be invoked by using an execution API function "
r"\(e.g. `execute_pipeline`\).",
):
basic_pipeline()
@solid
async def foo_async() -> str:
return "bar"
def test_coroutine_asyncio_invocation():
async def my_coroutine_test():
result = await foo_async()
assert result == "bar"
loop = asyncio.get_event_loop()
loop.run_until_complete(my_coroutine_test())
def test_solid_invocation_nothing_deps():
@solid(input_defs=[InputDefinition("start", Nothing)])
def nothing_dep():
return 5
# Ensure that providing the Nothing-dependency input throws an error
with pytest.raises(
DagsterInvalidInvocationError,
match="Attempted to provide value for nothing input 'start'. Nothing dependencies are ignored "
"when directly invoking solids.",
):
nothing_dep(start="blah")
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'nothing_dep'. This may be because "
"you attempted to provide a value for a nothing dependency. Nothing dependencies are "
"ignored when directly invoking solids.",
):
nothing_dep("blah")
# Ensure that not providing nothing dependency also works.
assert nothing_dep() == 5
@solid(input_defs=[InputDefinition("x"), InputDefinition("y", Nothing), InputDefinition("z")])
def sandwiched_nothing_dep(x, z):
return x + z
assert sandwiched_nothing_dep(5, 6) == 11
with pytest.raises(
DagsterInvalidInvocationError,
match="Too many input arguments were provided for solid 'sandwiched_nothing_dep'. This may "
"be because you attempted to provide a value for a nothing dependency. Nothing "
"dependencies are ignored when directly invoking solids.",
):
sandwiched_nothing_dep(5, 6, 7)
def test_dynamic_output_gen():
@solid(
output_defs=[
DynamicOutputDefinition(name="a", is_required=False),
OutputDefinition(name="b", is_required=False),
]
)
def my_dynamic():
yield DynamicOutput(value=1, mapping_key="1", output_name="a")
yield DynamicOutput(value=2, mapping_key="2", output_name="a")
yield Output(value="foo", output_name="b")
a1, a2, b = my_dynamic()
assert a1.value == 1
assert a1.mapping_key == "1"
assert a2.value == 2
assert a2.mapping_key == "2"
assert b.value == "foo"
def test_dynamic_output_async_gen():
@solid(
output_defs=[
DynamicOutputDefinition(name="a", is_required=False),
OutputDefinition(name="b", is_required=False),
]
)
async def aio_gen():
yield DynamicOutput(value=1, mapping_key="1", output_name="a")
yield DynamicOutput(value=2, mapping_key="2", output_name="a")
await asyncio.sleep(0.01)
yield Output(value="foo", output_name="b")
async def get_results():
res = []
async for output in aio_gen():
res.append(output)
return res
loop = asyncio.get_event_loop()
a1, a2, b = loop.run_until_complete(get_results())
assert a1.value == 1
assert a1.mapping_key == "1"
assert a2.value == 2
assert a2.mapping_key == "2"
assert b.value == "foo"
def test_dynamic_output_non_gen():
@solid(output_defs=[DynamicOutputDefinition(name="a", is_required=False)])
def should_not_work():
return DynamicOutput(value=1, mapping_key="1", output_name="a")
with pytest.raises(
DagsterInvariantViolationError,
match="Attempted to return a DynamicOutput from solid. DynamicOuts are only supported "
"using yield syntax.",
):
should_not_work()
def test_dynamic_output_async_non_gen():
@solid(output_defs=[DynamicOutputDefinition(name="a", is_required=False)])
def should_not_work():
asyncio.sleep(0.01)
return DynamicOutput(value=1, mapping_key="1", output_name="a")
loop = asyncio.get_event_loop()
with pytest.raises(
DagsterInvariantViolationError,
match="Attempted to return a DynamicOutput from solid. DynamicOuts are only supported "
"using yield syntax.",
):
loop.run_until_complete(should_not_work())
def test_solid_invocation_with_bad_resources(capsys):
@resource
def bad_resource(_):
if 1 == 1:
raise Exception("oopsy daisy")
yield "foo"
@solid(required_resource_keys={"my_resource"})
def solid_requires_resource(context):
return context.resources.my_resource
with pytest.raises(
DagsterResourceFunctionError,
match="Error executing resource_fn on ResourceDefinition my_resource",
):
with build_solid_context(resources={"my_resource": bad_resource}) as context:
assert solid_requires_resource(context) == "foo"
captured = capsys.readouterr()
# make sure there are no exceptions in the context destructor (__del__)
assert "Exception ignored in" not in captured.err
@pytest.mark.parametrize("context_builder", [build_solid_context, build_op_context])
def test_build_context_with_resources_config(context_builder):
@resource(config_schema=str)
def my_resource(context):
assert context.resource_config == "foo"
@solid(required_resource_keys={"my_resource"})
def my_solid(context):
assert context.run_config["resources"]["my_resource"] == {"config": "foo"}
context = context_builder(
resources={"my_resource": my_resource},
resources_config={"my_resource": {"config": "foo"}},
)
my_solid(context)
# bad resource config case
with pytest.raises(
DagsterInvalidConfigError,
match='Received unexpected config entry "bad_resource" at the root.',
):
context_builder(
resources={"my_resource": my_resource},
resources_config={"bad_resource": {"config": "foo"}},
)
def test_logged_user_events():
@op
def logs_events(context):
context.log_event(AssetMaterialization("first"))
context.log_event(Materialization("second"))
context.log_event(ExpectationResult(success=True))
context.log_event(AssetObservation("fourth"))
yield AssetMaterialization("fifth")
yield Output("blah")
context = build_op_context()
list(logs_events(context))
assert [type(event) for event in context.get_events()] == [
AssetMaterialization,
Materialization,
ExpectationResult,
AssetObservation,
]
def test_add_output_metadata():
@op(out={"out1": Out(), "out2": Out()})
def the_op(context):
context.add_output_metadata({"foo": "bar"}, output_name="out1")
yield Output(value=1, output_name="out1")
context.add_output_metadata({"bar": "baz"}, output_name="out2")
yield Output(value=2, output_name="out2")
context = build_op_context()
events = list(the_op(context))
assert len(events) == 2
assert context.get_output_metadata("out1") == {"foo": "bar"}
assert context.get_output_metadata("out2") == {"bar": "baz"}
def test_add_output_metadata_after_output():
@op
def the_op(context):
yield Output(value=1)
context.add_output_metadata({"foo": "bar"})
with pytest.raises(
DagsterInvariantViolationError,
match="In op 'the_op', attempted to log output metadata for output 'result' which has already been yielded. Metadata must be logged before the output is yielded.",
):
list(the_op(build_op_context()))
def test_log_metadata_multiple_dynamic_outputs():
@op(out={"out1": DynamicOut(), "out2": DynamicOut()})
def the_op(context):
context.add_output_metadata({"one": "one"}, output_name="out1", mapping_key="one")
yield DynamicOutput(value=1, output_name="out1", mapping_key="one")
context.add_output_metadata({"two": "two"}, output_name="out1", mapping_key="two")
context.add_output_metadata({"three": "three"}, output_name="out2", mapping_key="three")
yield DynamicOutput(value=2, output_name="out1", mapping_key="two")
yield DynamicOutput(value=3, output_name="out2", mapping_key="three")
context.add_output_metadata({"four": "four"}, output_name="out2", mapping_key="four")
yield DynamicOutput(value=4, output_name="out2", mapping_key="four")
context = build_op_context()
events = list(the_op(context))
assert len(events) == 4
assert context.get_output_metadata("out1", mapping_key="one") == {"one": "one"}
assert context.get_output_metadata("out1", mapping_key="two") == {"two": "two"}
assert context.get_output_metadata("out2", mapping_key="three") == {"three": "three"}
assert context.get_output_metadata("out2", mapping_key="four") == {"four": "four"}
def test_log_metadata_after_dynamic_output():
@op(out=DynamicOut())
def the_op(context):
yield DynamicOutput(1, mapping_key="one")
context.add_output_metadata({"foo": "bar"}, mapping_key="one")
with pytest.raises(
DagsterInvariantViolationError,
match="In op 'the_op', attempted to log output metadata for output 'result' with mapping_key 'one' which has already been yielded. Metadata must be logged before the output is yielded.",
):
list(the_op(build_op_context()))
def test_kwarg_inputs():
@op(ins={"the_in": In(str)})
def the_op(**kwargs) -> str:
return kwargs["the_in"] + "foo"
with pytest.raises(
DagsterInvalidInvocationError,
match="op 'the_op' has 0 positional inputs, but 1 positional inputs were provided.",
):
the_op("bar")
assert the_op(the_in="bar") == "barfoo"
with pytest.raises(KeyError):
the_op(bad_val="bar")
@op(ins={"the_in": In(), "kwarg_in": In(), "kwarg_in_two": In()})
def the_op(the_in, **kwargs):
return the_in + kwargs["kwarg_in"] + kwargs["kwarg_in_two"]
assert the_op("foo", kwarg_in="bar", kwarg_in_two="baz") == "foobarbaz"
def test_default_kwarg_inputs():
@op
def the_op(x=1, y=2):
return x + y
assert the_op() == 3
def test_kwargs_via_partial_functools():
def fake_func(foo, bar):
return foo + bar
new_func = partial(fake_func, foo=1, bar=2)
new_op = op(name="new_func")(new_func)
assert new_op() == 3
| python |
from pytest import approx
from CompAero.FannoFlowRelations import FannoFlowRelations as ffr
from CompAero.internal import FlowState as FS
class TestFannoClassFuncs:
gamma = 1.4
# Test the Functions for Subsonic Case
#######################################################################################
def test_subsonic_t_tstar(self):
assert ffr.calc_T_Tstar(0.5, self.gamma) == approx(1.1429, rel=1e-4)
def test_subsonic_mach_from_t_tstar(self):
assert ffr.calc_mach_from_T_TStar(1.14285714, self.gamma) == approx(0.5, rel=1e-2)
def test_subsonic_p_pstar(self):
assert ffr.calc_P_Pstar(0.5, self.gamma) == approx(2.1381, rel=1e-4)
def test_subsonic_mach_from_p_pstar(self):
assert ffr.calc_mach_from_P_PStar(2.13808993, self.gamma) == approx(0.5, rel=1e-2)
def test_subsonic_rho_rhoStar(self):
assert ffr.calc_Rho_RhoStar(0.5, self.gamma) == approx(1.871, rel=1e-4)
def test_subsonic_mach_from_rho_rhoStar(self):
assert ffr.calc_mach_from_Rho_RhoStar(1.871, 1.4) == approx(0.5, 1e-3)
def test_subsonic_p0_p0Star(self):
assert ffr.calc_Po_PoStar(0.5, self.gamma) == approx(1.3398, rel=1e-4)
def test_subsonic_mach_from_p0_p0Star(self):
assert ffr.calc_mach_from_Po_PoStar(1.33984375, self.gamma, flowType=FS.SUB_SONIC) == approx(
0.5, 1e-3
)
def test_subsonic_4FLstarD(self):
assert ffr.calc_4FLSt_D(0.5, self.gamma) == approx(1.0691, rel=1e-4)
def test_subsonic_mach_from_4FLstarD(self):
assert ffr.calc_mach_from_4FLSt_D(1.06906031, self.gamma, flowType=FS.SUB_SONIC) == approx(
0.5, rel=1e-3
)
def test_subsonic_u_uStar(self):
assert ffr.calc_U_UStar(0.5, self.gamma) == approx(0.5345, rel=1e-4)
def test_subsonic_mach_from_u_uStar(self):
assert ffr.calc_mach_from_U_USt(0.53452248, self.gamma) == approx(0.5, rel=1e-3)
# Test the Functions for Supersonic Case
#######################################################################################
def test_supersonic_t_tstar(self):
assert ffr.calc_T_Tstar(1.5, self.gamma) == approx(0.82759, rel=1e-4)
def test_supersonic_mach_from_t_tstar(self):
assert ffr.calc_mach_from_T_TStar(0.82758620, self.gamma) == approx(1.5, rel=1e-2)
def test_supersonic_p_pstar(self):
assert ffr.calc_P_Pstar(1.5, self.gamma) == approx(0.6065, rel=1e-4)
def test_supersonic_mach_from_p_pstar(self):
assert ffr.calc_mach_from_P_PStar(0.60647843, self.gamma) == approx(1.5, rel=1e-2)
def test_supersonic_rho_rhoStar(self):
assert ffr.calc_Rho_RhoStar(1.5, self.gamma) == approx(0.7328, rel=1e-4)
def test_supersonic_mach_from_rho_rhoStar(self):
assert ffr.calc_mach_from_Rho_RhoStar(0.7328, 1.4) == approx(1.5, 1e-3)
def test_supersonic_p0_p0Star(self):
assert ffr.calc_Po_PoStar(1.5, self.gamma) == approx(1.1762, rel=1e-4)
def test_supersonic_mach_from_p0_p0Star(self):
assert ffr.calc_mach_from_Po_PoStar(1.17616705, self.gamma, flowType=FS.SUPER_SONIC) == approx(
1.5, 1e-3
)
def test_supersonic_4FLstarD(self):
assert ffr.calc_4FLSt_D(1.5, self.gamma) == approx(0.13605, rel=1e-4)
def test_supersonic_mach_from_4FLstarD(self):
assert ffr.calc_mach_from_4FLSt_D(0.13605021, self.gamma, flowType=FS.SUPER_SONIC) == approx(
1.5, rel=1e-3
)
def test_supersonic_u_uStar(self):
assert ffr.calc_U_UStar(1.5, self.gamma) == approx(1.3646, rel=1e-4)
def test_supersonic_mach_from_u_uStar(self):
assert ffr.calc_mach_from_U_USt(1.36457647, self.gamma) == approx(1.5, rel=1e-3)
class TestFannoClassSubsonic:
gamma = 1.4
def test_fanno_from_mach(self):
inst = ffr(self.gamma, mach=0.5)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_t_tStar(self):
inst = ffr(self.gamma, t_tSt=1.1428571428571428)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_p_pStar(self):
inst = ffr(self.gamma, p_pSt=2.1381)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_rho_rhoStar(self):
inst = ffr(self.gamma, rho_rhoSt=1.8708286933869707)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_po_poStar(self):
inst = ffr(self.gamma, po_poSt=1.33984375, flowType=FS.SUB_SONIC)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_f4LStar_D(self):
inst = ffr(self.gamma, f4LSt_D=1.0690603127182559, flowType=FS.SUB_SONIC)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_u_uStar(self):
inst = ffr(self.gamma, u_uSt=0.5345224838248488)
inst.apply_pipe_parameters(0.4, 11, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(0.593, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.1211, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.7855, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.1966, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.5926, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.5191, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(0.6279, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_choked_flow(self):
inst = ffr(self.gamma, mach=0.5)
inst.apply_pipe_parameters(0.4, 22, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(0.5, rel=1e-3)
assert inst.t_tSt == approx(1.1429, rel=1e-4)
assert inst.p_pSt == approx(2.1381, rel=1e-4)
assert inst.rho_rhoSt == approx(1.871, rel=1e-4)
assert inst.po_poSt == approx(1.3398, rel=1e-4)
assert inst.f4LSt_D == approx(1.0691, rel=1e-4)
assert inst.u_uSt == approx(0.5345, rel=1e-4)
assert inst.flowType == FS.SUB_SONIC
assert inst.chockedFlow
assert inst.dwnStrmMach == approx(1.0, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.0, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.0, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.0, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
class TestFannoClassSupersonic:
gamma = 1.4
def test_fanno_from_mach(self):
inst = ffr(self.gamma, mach=1.5)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_t_tStar(self):
inst = ffr(self.gamma, t_tSt=0.8275862068965517)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_p_pStar(self):
inst = ffr(self.gamma, p_pSt=0.6064784348631227)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_rho_rhoStar(self):
inst = ffr(self.gamma, rho_rhoSt=0.7328281087929399)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_po_poStar(self):
inst = ffr(self.gamma, po_poSt=1.1761670524691357, flowType=FS.SUPER_SONIC)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_f4LStar_D(self):
inst = ffr(self.gamma, f4LSt_D=0.13605021738414635, flowType=FS.SUPER_SONIC)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_from_u_uStar(self):
inst = ffr(self.gamma, u_uSt=1.364576478442026)
inst.apply_pipe_parameters(0.4, 1.5, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert not inst.chockedFlow
assert inst.dwnStrmMach == approx(1.2887, 1e-3)
assert inst.dwnStrm_t_tSt == approx(0.9008, 1e-3)
assert inst.dwnStrm_p_pSt == approx(0.7365, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0616, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(0.8176, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.06105, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.2231, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
def test_fanno_choked_flow(self):
inst = ffr(self.gamma, mach=1.5)
inst.apply_pipe_parameters(0.4, 22, 0.005)
assert inst.gamma == approx(self.gamma, rel=1e-3)
assert inst.mach == approx(1.5, rel=1e-3)
assert inst.t_tSt == approx(0.8276, rel=1e-4)
assert inst.p_pSt == approx(0.6065, rel=1e-4)
assert inst.rho_rhoSt == approx(0.7328, rel=1e-4)
assert inst.po_poSt == approx(1.1762, rel=1e-4)
assert inst.f4LSt_D == approx(0.13605, rel=1e-4)
assert inst.u_uSt == approx(1.3646, rel=1e-4)
assert inst.flowType == FS.SUPER_SONIC
assert inst.chockedFlow
assert inst.dwnStrmMach == approx(1.0, 1e-3)
assert inst.dwnStrm_t_tSt == approx(1.0, 1e-3)
assert inst.dwnStrm_p_pSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_po_poSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_rho_rhoSt == approx(1.0, rel=1e-4)
assert inst.dwnStrm_f4LSt_D == approx(0.0, rel=1e-4)
assert inst.dwnStrm_u_uSt == approx(1.0, rel=1e-4)
assert inst.p2_p1 == approx(inst.dwnStrm_p_pSt / inst.p_pSt, rel=1e-5)
assert inst.rho2_rho1 == approx(inst.dwnStrm_rho_rhoSt / inst.rho_rhoSt, rel=1e-5)
assert inst.t2_t1 == approx(inst.dwnStrm_t_tSt / inst.t_tSt, rel=1e-5)
assert inst.po2_po1 == approx(inst.dwnStrm_po_poSt / inst.po_poSt, rel=1e-5)
assert inst.f4LD2_f4LD1 == approx(inst.dwnStrm_f4LSt_D / inst.f4LSt_D, rel=1e-5)
assert inst.u2_u1 == approx(inst.dwnStrm_u_uSt / inst.u_uSt, rel=1e-5)
| python |
import json, os, copy
def tag_check(resourceTags, include=True):
checkTags = json.loads(os.environ['checkTags'])
tagResults = copy.copy(checkTags)
if include == True:
for cTag in checkTags:
for rTag in resourceTags:
#Check each check Tag where there is both a key and value, if match remove from list
if (rTag == cTag):
tagResults.remove(cTag)
#CHeck where only a tag key is provided, if match remove from list
if "Value" not in cTag:
if cTag['Key'] == rTag['Key']:
tagResults.remove(cTag)
#success if when the checkTags is an empty array, i.e. []
if tagResults == []:
return (True)
else:
return (False)
elif include == False:
tagResults = True
for cTag in checkTags:
for rTag in resourceTags:
#Check each check Tag where there is both a key and value, if match remove from list
if (rTag == cTag):
tagResults = True
#CHeck where only a tag key is provided, if match remove from list
if "Value" not in cTag:
if cTag['Key'] == rTag['Key']:
tagResults = True
#success if when the checkTags is an empty array, i.e. []
return (tagResults) | python |
import itertools
def get_output_filename(input_filename):
return input_filename.replace('input', 'output')
class Checksum:
day = 2
test = 2
def get_input_filename(self):
return "day" + str(self.day).zfill(2) + ".input"
def process(self, raw_input):
input_spreadsheet = self.parseInput(raw_input)
row_checksums = self.calculate_row_checkums(input_spreadsheet)
result = sum(row_checksums)
return result
def calculate_row_checkums(self, input_spreadsheet):
if self.test == 1:
return [self.calculate_checksum(row) for row in input_spreadsheet]
if self.test == 2:
return [self.get_divisors(row) for row in input_spreadsheet]
def calculate_checksum(self, row):
return max(row) - min(row)
def get_divisors(self, row):
for a, b in itertools.permutations(row, 2):
if a % b == 0:
return int(a / b)
def parseInput(self, raw_input):
result = []
for row in raw_input:
result.append([int(number) for number in row.split()])
return result
def executeTestOnFile(self, input_filename):
with open(input_filename) as input_file:
raw_input = input_file.readlines()
result = self.process(raw_input)
print(result)
with open(get_output_filename(input_filename), 'w') as output_file:
output_file.write(str(result))
if __name__ == "__main__":
checksum = Checksum()
checksum.test = 2
checksum.executeTestOnFile(checksum.get_input_filename())
| python |
from typing import Iterable, List
from .syntax import Field, NodeType, Syntax
from .node import Node
class CField(Field):
PATH = "path"
NAME = "name"
VALUE = "value"
PARAMETERS = "parameters"
directive = "directive"
argument = "argument"
OPERATOR = "operator"
FUNCTION = "function"
ARGUMENT = "argument"
ARGUMENTS = "arguments"
LEFT = "left"
RIGHT = "right"
DECLARATOR = "declarator"
BODY = "body"
PREFIX = "prefix"
SIZE = "size"
TYPE = "type"
LABEL = "label"
CONDITION = "condition"
CONSEQUENCE = "consequence"
ALTERNATIVE = "alternative"
INITIALIZER = "initializer"
INDEX = "index"
DESIGNATOR = "designator"
UPDATE = "update"
class CNodeType(NodeType):
# Binary expression operators
PLAIN_ASSIGNMENT = "="
ARITHMETIC_ADDITION = "+"
ARITHMETIC_SUBTRACTION = "-"
ARITHMETIC_ADDITION_ADDITION = "++"
ARITHMETIC_SUBTRACTION_SUBTRACTION = "--"
ARITHMETIC_MULTIPLICATION = "*"
ARITHMETIC_DIVISION = "/"
ARITHMETIC_MODULO = "%"
BITWISE_OR = "|"
BITWISE_AND = "&"
BITWISE_XOR = "^"
SHIFT_LEFT = "<<"
SHIFT_RIGHT = ">>"
LOGICAL_AND = "&&"
LOGICAL_OR = "||"
RELATIONAL_LESS_THAN = "<"
RELATIONAL_GREATER_THAN = ">"
RELATIONAL_LESS_THAN_OR_EQUAL = "<="
RELATIONAL_GREATER_THAN_OR_EQUAL = ">="
RELATIONAL_EQUAL = "=="
RELATIONAL_NOT_EQUAL = "!="
Logical_NOT = "!"
ARITHMETIC_COMPOUND_ADDITION = "+="
ARITHMETIC_COMPOUND_SUBTRACTION = "-="
ARITHMETIC_COMPOUND_MULTIPLICATION = "*="
ARITHMETIC_COMPOUND_DIVISION = "/="
ARITHMETIC_COMPOUND_MODULO = "%="
BITWISE_COMPOUND_OR = "|="
BITWISE_COMPOUND_AND = "&="
BITWISE_COMPOUND_XOR = "^="
SHIFT_COMPOUND_LEFT = "<<="
SHIFT_COMPOUND_RIGHT = ">>="
# Literals
IDENTIFIER = "identifier"
# Types
PRIMITIVE_TYPE = "primitive_type"
SIZED_TYPE_SPECIFIER = "sized_type_specifier"
STRUCT_SPECIFIER = "struct_specifier"
UNION_SPECIFIER = "union_specifier"
# Constructs
EXPRESSION_STATEMENT = "expression_statement"
ASSIGNMENT_EXPRESSION = "assignment_expression"
DECLARATION = "declaration"
IF_STATEMENT = "if_statement"
WHILE_STATEMENT = "while_statement"
TRANSLATION_UNIT = "translation_unit"
COMPOUND_STATEMENT = "compound_statement"
DO_STATEMENT = "do_statement"
FOR_STATEMENT = "for_statement"
SWITCH_STATEMENT = "switch_statement"
BREAK_STATEMENT = "break_statement"
CONTINUE_STATEMENT = "continue_statement"
RETURN_STATEMENT = "return_statement"
LABELED_STATEMENT = "labeled_statement"
GOTO_STATEMENT = "goto_statement"
FUNCTION_DEFINITION = "function_definition"
TYPE_DEFINITION = "type_definition"
ENUM_SPECIFIER = "enum_specifier"
STORAGE_CLASS_SPECIFIERS = "storage_class_specifier"
TYPE_QUALIFIER = "type_qualifier"
INIT_DECLARATOR = "init_declarator"
ARRAY_DECLARATOR = "array_declarator"
POINTER_DECLARATOR = "pointer_declarator"
PARAMETER_DECLARATION = "parameter_declaration"
PREPROC_IFDEF = "preproc_ifdef"
PREPROC_DEF = "preproc_def"
CASE_STATEMENT = "case_statement"
PARENTHESIZED_EXPRESSION = "parenthesized_expression"
UNARY_EXPRESSION = "unary_expression"
BINARY_EXPRESSION = "binary_expression"
NUMBER_LITERAL = "number_literal"
UPDATE_EXPRESSION = "update_expression"
class CSyntax(Syntax):
@property
def plain_assignment(self) -> Iterable[CNodeType]:
return [
CNodeType.PLAIN_ASSIGNMENT,
]
@property
def logical_unary_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.Logical_NOT
]
@property
def arithmetic_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.ARITHMETIC_ADDITION,
CNodeType.ARITHMETIC_SUBTRACTION,
CNodeType.ARITHMETIC_MULTIPLICATION,
CNodeType.ARITHMETIC_DIVISION,
CNodeType.ARITHMETIC_MODULO,
]
@property
def arithmetic_unary_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.ARITHMETIC_ADDITION,
CNodeType.ARITHMETIC_SUBTRACTION,
]
@property
def update_expression_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.ARITHMETIC_ADDITION_ADDITION,
CNodeType.ARITHMETIC_SUBTRACTION_SUBTRACTION,
]
@property
def bitwise_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.BITWISE_OR,
CNodeType.BITWISE_AND,
CNodeType.BITWISE_XOR,
]
@property
def shift_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.SHIFT_LEFT,
CNodeType.SHIFT_RIGHT,
]
@property
def logical_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.LOGICAL_AND,
CNodeType.LOGICAL_OR,
]
@property
def relational_operators(self) -> Iterable[CNodeType]:
return [
CNodeType.RELATIONAL_GREATER_THAN,
CNodeType.RELATIONAL_GREATER_THAN_OR_EQUAL,
CNodeType.RELATIONAL_LESS_THAN,
CNodeType.RELATIONAL_LESS_THAN_OR_EQUAL,
CNodeType.RELATIONAL_EQUAL,
CNodeType.RELATIONAL_NOT_EQUAL,
]
@property
def arithmetic_compound_assignment(self) -> Iterable[CNodeType]:
return [
CNodeType.ARITHMETIC_COMPOUND_ADDITION,
CNodeType.ARITHMETIC_COMPOUND_SUBTRACTION,
CNodeType.ARITHMETIC_COMPOUND_MULTIPLICATION,
CNodeType.ARITHMETIC_COMPOUND_DIVISION,
CNodeType.ARITHMETIC_COMPOUND_MODULO,
]
@property
def bitwise_compound_assignment(self) -> Iterable[CNodeType]:
return [
CNodeType.BITWISE_COMPOUND_OR,
CNodeType.BITWISE_COMPOUND_AND,
CNodeType.BITWISE_COMPOUND_XOR,
]
@property
def shift_compound_assignment(self) -> Iterable[CNodeType]:
return [
CNodeType.SHIFT_COMPOUND_LEFT,
CNodeType.SHIFT_COMPOUND_RIGHT,
]
@property
def structures(self) -> Iterable[CNodeType]:
return [
CNodeType.IF_STATEMENT,
CNodeType.WHILE_STATEMENT,
CNodeType.DO_STATEMENT,
CNodeType.FOR_STATEMENT,
CNodeType.SWITCH_STATEMENT,
CNodeType.FUNCTION_DEFINITION
]
@property
def assignment_query(self) -> str:
return "((assignment_expression) @exp)"
@property
def compound_assignment_query(self) -> str:
return "((assignment_expression) @exp)"
@property
def binary_expression_query(self) -> str:
return '((binary_expression) @exp)' + self.assignment_query
@property
def unary_expression_query(self) -> str:
return '((unary_expression) @exp)'
@property
def update_expression_query(self) -> str:
return '((update_expression) @exp)'
@property
def number_literal_query(self) -> str:
return '((number_literal) @num)'
@property
def function_declaration_query(self) -> str:
return "((function_definition) @def)"
@property
def struct_declaration_query(self) -> str:
return '((struct_specifier) @spec)'
@property
def if_statement_query(self) -> str:
return '(if_statement) @if'
@property
def declaration_query(self) -> str:
return '(declaration) @declaration'
def get_binary_expression_operator(self, node: Node) -> Node:
return node.children[1]
def get_function_definitions(self, node: Node) -> Node:
return node
def get_struct_declaration(self, node: Node) -> Node:
return node
def get_if_declaration(self, node: Node) -> Node:
return node
def get_for_loop_body(self, node: Node) -> Node:
return node.named_children[-1]
def get_function_identifier(self, definition: Node) -> Node:
current = definition \
.child_by_field(CField.DECLARATOR) \
.child_by_field(CField.DECLARATOR)
if current is None: return None
while True:
next = current.child_by_field(CField.DECLARATOR)
if next is None: break
current = next
return current
def get_immediate_structure_descendent(self, node: Node) -> Node:
if node is None: return None
types: List[str] = [ nodeType.value for nodeType in self.structures ]
return node.get_immediate_descendent_of_types(types)
def get_structure_descendent(self, node: Node) -> Node:
if node is None: return None
types: List[str] = [ nodeType.value for nodeType in self.structures ]
return node.get_descendent_of_types(types)
def is_immediate_structure_descendent(self, node: Node, type: CNodeType) -> bool:
if node is None: return False
immediate_structure: Node = self.get_immediate_structure_descendent(node)
if immediate_structure is None: return False
immediate_type: CNodeType = self.node_field(immediate_structure.type)
return type is immediate_type
def is_structure_descendent(self, node: Node, type: CNodeType) -> bool:
if node is None: return False
immediate_structure: Node = self.get_structure_descendent(node)
if immediate_structure is None: return False
immediate_type: CNodeType = self.node_field(immediate_structure.type)
return type is immediate_type
def is_default_switch_case(self, case: Node) -> bool:
if case is None: return False
return case.child_by_field(CField.VALUE) is None
def is_empty_switch_case(self, case: Node) -> bool:
if case is None: return False
if self.is_default_switch_case(case):
return case.named_child_count < 1
return case.named_child_count == 1
def is_switch_case(self, case: Node) -> bool:
if not case.parent.is_type(CNodeType.CASE_STATEMENT):
return False
return case.parent.child_by_field(CField.VALUE) == case
def is_field_of_type(self, node: Node, structure: CNodeType, field: CField) -> bool:
if node is None: return False
structure_node: Node = self.get_structure_descendent(node)
if structure_node is None or not structure_node.is_type(structure):
return False
field_node: Node = structure_node.child_by_field(field)
return field is not None and node == field_node
def is_condition_of_if(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.IF_STATEMENT, CField.CONDITION
)
def is_condition_of_while(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.WHILE_STATEMENT, CField.CONDITION
)
def is_condition_of_for(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.FOR_STATEMENT, CField.CONDITION
)
def is_initialisation_of_for(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.FOR_STATEMENT, CField.INITIALIZER
)
def is_update_of_for(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.FOR_STATEMENT, CField.UPDATE
)
def is_condition_of_do_while(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.DO_STATEMENT, CField.CONDITION
)
def is_body_of_for_loop(self, node: Node) -> bool:
# A for-loop does not have the "body" as a field.
# for this reason we just have to check if the for-loop
# is the first descendent of the structure.
for_statement: Node = node.get_immediate_descendent_of_types(
[ CNodeType.FOR_STATEMENT.value ]
)
if for_statement is None: return False
# The last named child of the for_statement node is the body of the loop
for_body = self.get_for_loop_body(for_statement)
return for_body == node or \
node.is_immediate_descendent_of_node(for_body)
def is_condition_of_switch(self, node: Node) -> bool:
return self.is_field_of_type(
node, CNodeType.SWITCH_STATEMENT, CField.CONDITION
)
def has_else_if(self, node: Node) -> bool:
alternative: Node = node.child_by_field(CField.ALTERNATIVE)
return alternative is not None and alternative.is_type(CNodeType.IF_STATEMENT)
def is_labeled_statement(self, node: Node) -> bool:
return node is not None and node.is_type(CNodeType.LABELED_STATEMENT)
def is_expression_statement(self, node: Node) -> bool:
return node is not None and node.is_type(CNodeType.EXPRESSION_STATEMENT)
def is_return_statement(self, node: Node) -> bool:
return node is not None and node.is_type(CNodeType.RETURN_STATEMENT)
def is_declaration(self, node: Node) -> bool:
return node is not None and node.is_type(CNodeType.DECLARATION)
def is_immediate_of_function_definition(self, node: Node) -> bool:
return node is not None and node.get_immediate_descendent_of_types_field(
[ CNodeType.FUNCTION_DEFINITION.value ], CField.BODY
) is not None
def is_immediate_of_translation_unit(self, node: Node) -> bool:
return node is not None and node.get_immediate_descendent_of_types(
[ CNodeType.TRANSLATION_UNIT ]
) is not None
def is_goto_statement(self, node: Node) -> bool:
return node is not None and node.is_type(CNodeType.GOTO_STATEMENT)
def node_field(self, node_type: str) -> CNodeType:
return CNodeType(node_type) | python |
#from __future__ import unicode_literals
from twisted.internet import reactor, endpoints
from twisted.internet.defer import inlineCallbacks
from ..transit_server import Transit
class ServerBase:
log_requests = False
@inlineCallbacks
def setUp(self):
self._lp = None
if self.log_requests:
blur_usage = None
else:
blur_usage = 60.0
yield self._setup_relay(blur_usage=blur_usage)
self._transit_server._debug_log = self.log_requests
@inlineCallbacks
def _setup_relay(self, blur_usage=None, log_file=None, usage_db=None):
ep = endpoints.TCP4ServerEndpoint(reactor, 0, interface="127.0.0.1")
self._transit_server = Transit(blur_usage=blur_usage,
log_file=log_file, usage_db=usage_db)
self._lp = yield ep.listen(self._transit_server)
addr = self._lp.getHost()
# ws://127.0.0.1:%d/wormhole-relay/ws
self.transit = u"tcp:127.0.0.1:%d" % addr.port
def tearDown(self):
if self._lp:
return self._lp.stopListening()
| python |
#execute(object)
#object will be the python code
a = "print('hiii')"
print(exec(a))
| python |
# -*- coding: utf-8 -*-
"""Walker that performs simple name-binding analysis as it traverses the AST"""
import ast
from .util import merge_dicts
from .walkers import Walker
from . import compat
__all__ = ['Scoped']
@Walker
def find_names(tree, collect, stop, **kw):
if isinstance(tree, (ast.Attribute, ast.Subscript)):
stop()
if isinstance(tree, ast.Name):
collect((tree.id, tree))
@Walker
def find_assignments(tree, collect, stop, **kw):
if isinstance(tree, compat.scope_nodes):
collect((tree.name, tree))
stop()
if isinstance(tree, ast.Assign):
for x in find_names.collect(tree.targets):
collect(x)
def extract_arg_names(args):
return dict(
([(args.vararg.arg, args.vararg)] if args.vararg else []) +
([(args.kwarg.arg, args.kwarg)] if args.kwarg else []) +
[(arg.arg, arg) for arg in args.args] +
[(arg.arg, arg) for arg in args.kwonlyargs]
)
class Scoped(Walker):
"""Used in conjunction with `@Walker`, via
@Scoped
@Walker
def my_func(tree, scope, **kw):
...
This decorator wraps the `Walker` and injects in a `scope` argument into
the function. This argument is a dictionary of names which are in-scope
in the present `tree`s environment, starting from the `tree` on which the
recursion was start.
This can be used to track the usage of a name binding through the AST
snippet, and detecting when the name gets shadowed by a more tightly scoped
name binding.
"""
def __init__(self, walker):
self.walker = walker
def recurse_collect(self, tree, sub_kw=[], **kw):
kw['scope'] = kw.get('scope', dict(find_assignments.collect(tree)))
return Walker.recurse_collect(self, tree, sub_kw, **kw)
def func(self, tree, set_ctx_for, scope, **kw):
def extend_scope(tree, *dicts, **kw):
new_scope = merge_dicts(*([scope] + list(dicts)))
if "remove" in kw:
for rem in kw['remove']:
del new_scope[rem]
set_ctx_for(tree, scope=new_scope)
if isinstance(tree, ast.Lambda):
extend_scope(tree.body, extract_arg_names(tree.args))
if isinstance(tree, (ast.GeneratorExp, ast.ListComp, ast.SetComp,
ast.DictComp)):
iterator_vars = {}
for gen in tree.generators:
extend_scope(gen.target, iterator_vars)
extend_scope(gen.iter, iterator_vars)
iterator_vars.update(dict(find_names.collect(gen.target)))
extend_scope(gen.ifs, iterator_vars)
if isinstance(tree, ast.DictComp):
extend_scope(tree.key, iterator_vars)
extend_scope(tree.value, iterator_vars)
else:
extend_scope(tree.elt, iterator_vars)
if isinstance(tree, compat.function_nodes):
extend_scope(tree.args, {tree.name: tree})
extend_scope(
tree.body,
{tree.name: tree},
extract_arg_names(tree.args),
dict(find_assignments.collect(tree.body)),
)
if isinstance(tree, ast.ClassDef):
extend_scope(tree.bases, remove=[tree.name])
extend_scope(tree.body, dict(find_assignments.collect(tree.body)),
remove=[tree.name])
if isinstance(tree, ast.ExceptHandler):
extend_scope(tree.body, {tree.name: tree.name})
if isinstance(tree, ast.For):
extend_scope(tree.body, dict(find_names.collect(tree.target)))
if isinstance(tree, ast.With):
extend_scope(tree.body, dict(find_names.collect(tree.items)))
return self.walker.func(
tree,
set_ctx_for=set_ctx_for,
scope=scope,
**kw
)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: train.py
import argparse
import itertools
import numpy as np
import os
import cv2
import six
import shutil
assert six.PY3, "FasterRCNN requires Python 3!"
import tensorflow as tf
import tqdm
import tensorpack.utils.viz as tpviz
from tensorpack import *
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.common import get_tf_version_tuple, get_tensors_by_names
from tensorpack.tfutils.summary import add_moving_summary
from tensorpack.tfutils.varreplace import freeze_variables
import model_frcnn
import model_mrcnn
from basemodel import image_preprocess, resnet_c4_backbone, resnet_conv5, resnet_fpn_backbone, backbone_scope
from dataset import DetectionDataset
from config import finalize_configs, config as cfg
from data import get_all_anchors, get_all_anchors_fpn, get_train_dataflow
from eval_utils import EvalCallback
from model_box import RPNAnchors, clip_boxes, crop_and_resize, roi_align
from model_cascade import CascadeRCNNHead, CascadeRCNNHeadWithHardExamples
from model_fpn import fpn_model, generate_fpn_proposals, multilevel_roi_align, multilevel_rpn_losses
from model_frcnn import BoxProposals, FastRCNNHead, fastrcnn_outputs, fastrcnn_predictions, sample_fast_rcnn_targets
from model_mrcnn import maskrcnn_loss, maskrcnn_upXconv_head
from model_rpn import generate_rpn_proposals, rpn_head, rpn_losses
try:
import horovod.tensorflow as hvd
except ImportError:
pass
class DetectionModel(ModelDesc):
def preprocess(self, image):
image = tf.expand_dims(image, 0)
image = image_preprocess(image, bgr=True)
return tf.transpose(image, [0, 3, 1, 2])
@property
def training(self):
return get_current_tower_context().is_training
def optimizer(self):
lr = tf.get_variable('learning_rate', initializer=0.003, trainable=False)
tf.summary.scalar('learning_rate-summary', lr)
# The learning rate in the config is set for 8 GPUs, and we use trainers with average=False.
lr = lr / 8.
opt = tf.train.MomentumOptimizer(lr, 0.9)
if cfg.TRAIN.NUM_GPUS < 8:
opt = optimizer.AccumGradOptimizer(opt, 8 // cfg.TRAIN.NUM_GPUS)
return opt
def get_inference_tensor_names(self):
"""
Returns two lists of tensor names to be used to create an inference callable.
Returns:
[str]: input names
[str]: output names
"""
if cfg.MODE_THIRD_STAGE:
out = ['output/boxes', 'output/scores', 'third_stage_features_out', 'ff_gt_tracklet_scores',
'sparse_tracklet_scores', 'tracklet_score_indices']
else:
out = ['output/boxes', 'output/scores', 'output/labels']
if cfg.MODE_MASK:
out.append('output/masks')
if cfg.EXTRACT_GT_FEATURES:
return ['image', 'roi_boxes'], ['boxes_for_extraction', 'features_for_extraction']
else:
return ['image'], out
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
features = self.backbone(image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
if cfg.EXTRACT_GT_FEATURES:
anchor_inputs["roi_boxes"] = inputs["roi_boxes"]
proposals, rpn_losses = self.rpn(image, features, anchor_inputs) # inputs?
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
head_losses = self.roi_heads(image, features, proposals, targets)
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
class ResNetC4Model(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image'),
tf.placeholder(tf.int32, (None, None, cfg.RPN.NUM_ANCHOR), 'anchor_labels'),
tf.placeholder(tf.float32, (None, None, cfg.RPN.NUM_ANCHOR, 4), 'anchor_boxes'),
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')] # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
return ret
def backbone(self, image):
return [resnet_c4_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS[:3])]
def rpn(self, image, features, inputs):
featuremap = features[0]
rpn_label_logits, rpn_box_logits = rpn_head('rpn', featuremap, cfg.RPN.HEAD_DIM, cfg.RPN.NUM_ANCHOR)
anchors = RPNAnchors(get_all_anchors(), inputs['anchor_labels'], inputs['anchor_boxes'])
anchors = anchors.narrow_to(featuremap)
image_shape2d = tf.shape(image)[2:] # h,w
pred_boxes_decoded = anchors.decode_logits(rpn_box_logits) # fHxfWxNAx4, floatbox
proposal_boxes, proposal_scores = generate_rpn_proposals(
tf.reshape(pred_boxes_decoded, [-1, 4]),
tf.reshape(rpn_label_logits, [-1]),
image_shape2d,
cfg.RPN.TRAIN_PRE_NMS_TOPK if self.training else cfg.RPN.TEST_PRE_NMS_TOPK,
cfg.RPN.TRAIN_POST_NMS_TOPK if self.training else cfg.RPN.TEST_POST_NMS_TOPK)
if self.training:
losses = rpn_losses(
anchors.gt_labels, anchors.encoded_gt_boxes(), rpn_label_logits, rpn_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
featuremap = features[0]
gt_boxes, gt_labels, *_ = targets
if self.training:
# sample proposal boxes in training
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
# The boxes to be used to crop RoIs.
# Use all proposal boxes in inference
boxes_on_featuremap = proposals.boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE)
roi_resized = roi_align(featuremap, boxes_on_featuremap, 14)
feature_fastrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1]) # nxcx7x7
# Keep C5 feature to be shared with mask branch
feature_gap = GlobalAvgPooling('gap', feature_fastrcnn, data_format='channels_first')
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs('fastrcnn', feature_gap, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits, gt_boxes,
tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
# In training, mask branch shares the same C5 feature.
fg_feature = tf.gather(feature_fastrcnn, proposals.fg_inds())
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', fg_feature, cfg.DATA.NUM_CATEGORY, num_convs=0) # #fg x #cat x 14x14
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 14,
pad_border=False) # nfg x 1x14x14
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
roi_resized = roi_align(featuremap, final_boxes * (1.0 / cfg.RPN.ANCHOR_STRIDE), 14)
feature_maskrcnn = resnet_conv5(roi_resized, cfg.BACKBONE.RESNET_NUM_BLOCKS[-1])
mask_logits = maskrcnn_upXconv_head(
'maskrcnn', feature_maskrcnn, cfg.DATA.NUM_CATEGORY, 0) # #result x #cat x 14x14
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx14x14
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNModel(DetectionModel):
def inputs(self):
ret = [
tf.placeholder(tf.float32, (None, None, 3), 'image')]
num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
ret.extend([
tf.placeholder(tf.int32, (None, None, num_anchors),
'anchor_labels_lvl{}'.format(k + 2)),
tf.placeholder(tf.float32, (None, None, num_anchors, 4),
'anchor_boxes_lvl{}'.format(k + 2))])
ret.extend([
tf.placeholder(tf.float32, (None, 4), 'gt_boxes'),
tf.placeholder(tf.int64, (None,), 'gt_labels')]) # all > 0
if cfg.MODE_MASK:
ret.append(
tf.placeholder(tf.uint8, (None, None, None), 'gt_masks')
) # NR_GT x height x width
if cfg.EXTRACT_GT_FEATURES:
ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))
return ret
def slice_feature_and_anchors(self, p23456, anchors):
for i, stride in enumerate(cfg.FPN.ANCHOR_STRIDES):
with tf.name_scope('FPN_slice_lvl{}'.format(i)):
anchors[i] = anchors[i].narrow_to(p23456[i])
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
p23456 = fpn_model('fpn', c2345)
return p23456
def rpn(self, image, features, inputs):
if cfg.EXTRACT_GT_FEATURES:
boxes = inputs['roi_boxes']
return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)
assert len(cfg.RPN.ANCHOR_SIZES) == len(cfg.FPN.ANCHOR_STRIDES)
image_shape2d = tf.shape(image)[2:] # h,w
all_anchors_fpn = get_all_anchors_fpn()
multilevel_anchors = [RPNAnchors(
all_anchors_fpn[i],
inputs['anchor_labels_lvl{}'.format(i + 2)],
inputs['anchor_boxes_lvl{}'.format(i + 2)]) for i in range(len(all_anchors_fpn))]
self.slice_feature_and_anchors(features, multilevel_anchors)
# Multi-Level RPN Proposals
rpn_outputs = [rpn_head('rpn', pi, cfg.FPN.NUM_CHANNEL, len(cfg.RPN.ANCHOR_RATIOS))
for pi in features]
multilevel_label_logits = [k[0] for k in rpn_outputs]
multilevel_box_logits = [k[1] for k in rpn_outputs]
multilevel_pred_boxes = [anchor.decode_logits(logits)
for anchor, logits in zip(multilevel_anchors, multilevel_box_logits)]
proposal_boxes, proposal_scores = generate_fpn_proposals(
multilevel_pred_boxes, multilevel_label_logits, image_shape2d)
if self.training:
losses = multilevel_rpn_losses(
multilevel_anchors, multilevel_label_logits, multilevel_box_logits)
else:
losses = []
return BoxProposals(proposal_boxes), losses
def roi_heads(self, image, features, proposals, targets):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if not cfg.FPN.CASCADE:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
head_feature = fastrcnn_head_func('fastrcnn', roi_feature_fastrcnn)
fastrcnn_label_logits, fastrcnn_box_logits = fastrcnn_outputs(
'fastrcnn/outputs', head_feature, cfg.DATA.NUM_CLASS)
fastrcnn_head = FastRCNNHead(proposals, fastrcnn_box_logits, fastrcnn_label_logits,
gt_boxes, tf.constant(cfg.FRCNN.BBOX_REG_WEIGHTS, dtype=tf.float32))
else:
def roi_func(boxes):
return multilevel_roi_align(features[:4], boxes, 7)
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if cfg.EXTRACT_GT_FEATURES:
roi_feature_fastrcnn = multilevel_roi_align(features[:4], proposals.boxes, 7)
tf.identity(roi_feature_fastrcnn, "rpn/feature")
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
class ResNetFPNTrackModel(ResNetFPNModel):
def inputs(self):
ret = super().inputs()
if cfg.USE_PRECOMPUTED_REF_FEATURES:
ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ref_features'))
else:
ret.append(tf.placeholder(tf.float32, (None, None, 3), 'ref_image'))
ret.append(tf.placeholder(tf.float32, (4,), 'ref_box'))
if cfg.MODE_THIRD_STAGE:
ret.append(tf.placeholder(tf.float32, (256, 7, 7), 'ff_gt_tracklet_feat'))
ret.append(tf.placeholder(tf.float32, (None, 256, 7, 7), 'active_tracklets_feats'))
ret.append(tf.placeholder(tf.float32, (None, 4), 'active_tracklets_boxes'))
ret.append(tf.placeholder(tf.float32, (), 'tracklet_distance_threshold'))
if cfg.MODE_HARD_MINING:
ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_negative_features'))
if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:
ret.append(tf.placeholder(tf.float32, (None, 3, 256, 7, 7), 'hard_positive_features'))
ret.append(tf.placeholder(tf.float32, (None, 3), 'hard_positive_ious'))
ret.append(tf.placeholder(tf.float32, (None, 4), 'hard_positive_gt_boxes'))
ret.append(tf.placeholder(tf.float32, (None, 3, 4), 'hard_positive_jitter_boxes'))
if cfg.EXTRACT_GT_FEATURES:
ret.append(tf.placeholder(tf.float32, (None, 4,), 'roi_boxes'))
return ret
def backbone(self, image):
c2345 = resnet_fpn_backbone(image, cfg.BACKBONE.RESNET_NUM_BLOCKS)
with backbone_scope(freeze=cfg.BACKBONE.FREEZE_AT > 3):
p23456 = fpn_model('fpn', c2345)
return p23456, c2345
def rpn(self, image, features, inputs):
if cfg.EXTRACT_GT_FEATURES:
boxes = inputs['roi_boxes']
return BoxProposals(boxes), tf.constant(0, dtype=tf.float32)
if cfg.BACKBONE.FREEZE_AT > 3:
with freeze_variables(stop_gradient=False, skip_collection=True):
return super().rpn(image, features, inputs)
else:
return super().rpn(image, features, inputs)
def roi_heads(self, image, ref_features, ref_box, features, proposals, targets, hard_negative_features=None,
hard_positive_features=None, hard_positive_ious=None, hard_positive_gt_boxes=None,
hard_positive_jitter_boxes=None, precomputed_ref_features=None):
image_shape2d = tf.shape(image)[2:] # h,w
assert len(features) == 5, "Features have to be P23456!"
gt_boxes, gt_labels, *_ = targets
if self.training:
proposals = sample_fast_rcnn_targets(proposals.boxes, gt_boxes, gt_labels)
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if precomputed_ref_features is None:
roi_aligned_ref_features = multilevel_roi_align(ref_features[:4], ref_box[tf.newaxis], 7)
else:
roi_aligned_ref_features = precomputed_ref_features[tf.newaxis]
if cfg.MODE_SHARED_CONV_REDUCE:
scope = tf.get_variable_scope()
else:
scope = ""
assert cfg.FPN.CASCADE
def roi_func(boxes, already_aligned_features=None):
if already_aligned_features is None:
aligned_features = multilevel_roi_align(features[:4], boxes, 7)
else:
# for hard example mining
aligned_features = already_aligned_features
tiled = tf.tile(roi_aligned_ref_features, [tf.shape(aligned_features)[0], 1, 1, 1])
concat_features = tf.concat((tiled, aligned_features), axis=1)
with argscope(Conv2D, data_format='channels_first',
kernel_initializer=tf.variance_scaling_initializer(
scale=2.0, mode='fan_out',
distribution='untruncated_normal' if get_tf_version_tuple() >= (1, 12) else 'normal')):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
reduced_features = Conv2D('conv_reduce', concat_features, 256, 1, activation=None)
return reduced_features
if cfg.MODE_HARD_MINING and self.training:
fastrcnn_head = CascadeRCNNHeadWithHardExamples(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS, hard_negative_features,
hard_positive_features, cfg.HARD_NEGATIVE_LOSS_SCALING_FACTOR,
cfg.HARD_POSITIVE_LOSS_SCALING_FACTOR, hard_positive_ious, hard_positive_gt_boxes,
hard_positive_jitter_boxes)
else:
fastrcnn_head = CascadeRCNNHead(
proposals, roi_func, fastrcnn_head_func,
(gt_boxes, gt_labels), image_shape2d, cfg.DATA.NUM_CLASS)
if cfg.EXTRACT_GT_FEATURES:
# get boxes and features for each of the three cascade stages!
b0 = proposals.boxes
b1, b2, _ = fastrcnn_head._cascade_boxes
f0 = multilevel_roi_align(features[:4], b0, 7)
f1 = multilevel_roi_align(features[:4], b1, 7)
f2 = multilevel_roi_align(features[:4], b2, 7)
tf.concat([b0, b1, b2], axis=0, name="boxes_for_extraction")
tf.concat([f0, f1, f2], axis=0, name="features_for_extraction")
if self.training:
all_losses = fastrcnn_head.losses()
if cfg.MODE_MASK:
gt_masks = targets[2]
# maskrcnn loss
roi_feature_maskrcnn = multilevel_roi_align(
features[:4], proposals.fg_boxes(), 14,
name_scope='multilevel_roi_align_mask')
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
target_masks_for_fg = crop_and_resize(
tf.expand_dims(gt_masks, 1),
proposals.fg_boxes(),
proposals.fg_inds_wrt_gt, 28,
pad_border=False) # fg x 1x28x28
target_masks_for_fg = tf.squeeze(target_masks_for_fg, 1, 'sampled_fg_mask_targets')
all_losses.append(maskrcnn_loss(mask_logits, proposals.fg_labels(), target_masks_for_fg))
if cfg.MEASURE_IOU_DURING_TRAINING:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output_train')
# if predictions are empty, this might break...
# to prevent, stack dummy box
boxes_for_iou = tf.concat([final_boxes[:1], tf.constant([[0.0, 0.0, 1.0, 1.0]],
dtype=tf.float32)], axis=0)
from examples.FasterRCNN.utils.box_ops import pairwise_iou
iou_at_1 = tf.identity(pairwise_iou(gt_boxes[:1], boxes_for_iou)[0, 0], name="train_iou_at_1")
add_moving_summary(iou_at_1)
return all_losses
else:
decoded_boxes = fastrcnn_head.decoded_output_boxes()
decoded_boxes = clip_boxes(decoded_boxes, image_shape2d, name='fastrcnn_all_boxes')
label_scores = fastrcnn_head.output_scores(name='fastrcnn_all_scores')
final_boxes, final_scores, final_labels = fastrcnn_predictions(
decoded_boxes, label_scores, name_scope='output')
if cfg.MODE_MASK:
# Cascade inference needs roi transform with refined boxes.
roi_feature_maskrcnn = multilevel_roi_align(features[:4], final_boxes, 14)
maskrcnn_head_func = getattr(model_mrcnn, cfg.FPN.MRCNN_HEAD_FUNC)
mask_logits = maskrcnn_head_func(
'maskrcnn', roi_feature_maskrcnn, cfg.DATA.NUM_CATEGORY) # #fg x #cat x 28 x 28
indices = tf.stack([tf.range(tf.size(final_labels)), tf.cast(final_labels, tf.int32) - 1], axis=1)
final_mask_logits = tf.gather_nd(mask_logits, indices) # #resultx28x28
tf.sigmoid(final_mask_logits, name='output/masks')
return []
def build_graph(self, *inputs):
inputs = dict(zip(self.input_names, inputs))
image = self.preprocess(inputs['image']) # 1CHW
fpn_features, backbone_features = self.backbone(image)
if cfg.USE_PRECOMPUTED_REF_FEATURES:
ref_features = None
ref_box = None
else:
ref_image = self.preprocess(inputs['ref_image']) # 1CHW
ref_box = inputs['ref_box']
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
ref_features, _ = self.backbone(ref_image)
anchor_inputs = {k: v for k, v in inputs.items() if k.startswith('anchor_')}
if cfg.EXTRACT_GT_FEATURES:
anchor_inputs["roi_boxes"] = inputs["roi_boxes"]
proposals, rpn_losses = self.rpn(image, fpn_features, anchor_inputs) # inputs?
second_stage_features = fpn_features
targets = [inputs[k] for k in ['gt_boxes', 'gt_labels', 'gt_masks'] if k in inputs]
hard_negative_features = None
hard_positive_features = None
hard_positive_ious = None
hard_positive_gt_boxes = None
hard_positive_jitter_boxes = None
if cfg.MODE_HARD_MINING:
hard_negative_features = inputs['hard_negative_features']
if cfg.MODE_IF_HARD_MINING_THEN_ALSO_POSITIVES:
hard_positive_features = inputs['hard_positive_features']
hard_positive_ious = inputs['hard_positive_ious']
hard_positive_gt_boxes = inputs['hard_positive_gt_boxes']
hard_positive_jitter_boxes = inputs['hard_positive_jitter_boxes']
precomputed_ref_features = None
if cfg.USE_PRECOMPUTED_REF_FEATURES:
precomputed_ref_features = inputs['ref_features']
# Extend proposals by previous frame detections
if not self.training and cfg.MODE_THIRD_STAGE and cfg.EXTEND_PROPOSALS_BY_ACTIVE_TRACKLETS:
proposal_boxes = proposals.boxes
tracklet_boxes = inputs['active_tracklets_boxes']
concat_boxes = tf.concat([proposal_boxes, tracklet_boxes], axis=0)
proposals = BoxProposals(concat_boxes)
head_losses = self.roi_heads(image, ref_features, ref_box, second_stage_features, proposals, targets,
hard_negative_features, hard_positive_features, hard_positive_ious,
hard_positive_gt_boxes, hard_positive_jitter_boxes,
precomputed_ref_features=precomputed_ref_features)
if cfg.MODE_THIRD_STAGE:
self._run_third_stage(inputs, second_stage_features, tf.shape(image)[2:4])
if self.training:
wd_cost = regularize_cost(
'.*/W', l2_regularizer(cfg.TRAIN.WEIGHT_DECAY), name='wd_cost')
total_cost = tf.add_n(
rpn_losses + head_losses + [wd_cost], 'total_cost')
add_moving_summary(total_cost, wd_cost)
return total_cost
def _run_third_stage(self, inputs, second_stage_features, image_hw):
boxes, scores = get_tensors_by_names(['output/boxes', 'output/scores'])
# let's fix (as in finalize) the boxes, so we can roi align only one time
aligned_features_curr = multilevel_roi_align(second_stage_features[:4], boxes, 7)
# these also need to be extracted!
aligned_features_curr = tf.identity(aligned_features_curr, name='third_stage_features_out')
ff_gt_tracklet_scores, _ = self._score_for_third_stage(ref_feats=inputs['ff_gt_tracklet_feat'][tf.newaxis],
det_feats=aligned_features_curr)
tf.identity(ff_gt_tracklet_scores, name='ff_gt_tracklet_scores')
sparse_tracklet_scores, tracklet_score_indices = self._score_for_third_stage(
ref_feats=inputs['active_tracklets_feats'], det_feats=aligned_features_curr,
dense=False, ref_boxes=inputs['active_tracklets_boxes'], det_boxes=boxes, image_hw=image_hw,
tracklet_distance_threshold=inputs['tracklet_distance_threshold'])
tf.identity(sparse_tracklet_scores, name='sparse_tracklet_scores')
tf.identity(tracklet_score_indices, name='tracklet_score_indices')
def _score_for_third_stage(self, ref_feats, det_feats, dense=True, ref_boxes=None, det_boxes=None, image_hw=None,
tracklet_distance_threshold=0.08):
# build all pairs
n_refs = tf.shape(ref_feats)[0]
n_dets = tf.shape(det_feats)[0]
active_tracklets_tiled = tf.tile(ref_feats[:, tf.newaxis], multiples=[1, n_dets, 1, 1, 1])
dets_tiled = tf.tile(det_feats[tf.newaxis], multiples=[n_refs, 1, 1, 1, 1])
concated = tf.concat([active_tracklets_tiled, dets_tiled], axis=2)
if not dense:
# use boxes to prune the connectivity
assert ref_boxes is not None
assert det_boxes is not None
assert image_hw is not None
def xyxy_to_cxcywh(boxes_xyxy):
wh = boxes_xyxy[:, 2:] - boxes_xyxy[:, :2]
c = boxes_xyxy[:, :2] + wh / 2
boxes_cwh = tf.concat((c, wh), axis=1)
return boxes_cwh
active_tracklets_boxes_cxcywh = xyxy_to_cxcywh(ref_boxes)
boxes_cxcywh = xyxy_to_cxcywh(det_boxes)
# normalize by image size
h = image_hw[0]
w = image_hw[1]
norm = tf.cast(tf.stack([w, h, w, h], axis=0), tf.float32)
diffs = tf.abs(active_tracklets_boxes_cxcywh[:, tf.newaxis] - boxes_cxcywh[tf.newaxis]) / norm[
tf.newaxis, tf.newaxis]
# use distances of boxes, first frame scores ("scores") to prune
thresholds = tf.stack([tracklet_distance_threshold] * 4, axis=0)
keep_mask = tf.reduce_all(diffs < thresholds, axis=2)
indices = tf.where(keep_mask)
flattened = tf.boolean_mask(concated, keep_mask)
else:
indices = None
flattened = tf.reshape(
concated, [tf.shape(concated)[0] * tf.shape(concated)[1]] + [int(x) for x in concated.shape[2:]])
fastrcnn_head_func = getattr(model_frcnn, cfg.FPN.FRCNN_HEAD_FUNC)
if cfg.MODE_SHARED_CONV_REDUCE:
scope = tf.get_variable_scope()
else:
scope = ""
all_posteriors = []
# do this for every cascade stage
for idx in range(3):
with tf.variable_scope('cascade_rcnn_stage{}'.format(idx + 1), reuse=True):
with argscope(Conv2D, data_format='channels_first'):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):
reduced_features = Conv2D('conv_reduce', flattened, 256, 1, activation=None)
head_feats = fastrcnn_head_func('head', reduced_features)
with tf.variable_scope('outputs_new', reuse=True):
classification = FullyConnected('class', head_feats, 2)
posteriors = tf.nn.softmax(classification)
all_posteriors.append(posteriors)
posteriors = (all_posteriors[0] + all_posteriors[1] + all_posteriors[2]) / tf.constant(3.0, dtype=tf.float32)
scores = posteriors[:, 1]
return scores, indices
def get_inference_tensor_names(self):
inp, out = super().get_inference_tensor_names()
if cfg.USE_PRECOMPUTED_REF_FEATURES:
inp.append('ref_features')
else:
inp.append('ref_image')
inp.append('ref_box')
if cfg.MODE_THIRD_STAGE:
inp.append('ff_gt_tracklet_feat')
inp.append('active_tracklets_feats')
inp.append('active_tracklets_boxes')
inp.append('tracklet_distance_threshold')
return inp, out
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', help='load a model for evaluation or training. Can overwrite BACKBONE.WEIGHTS')
parser.add_argument('--logdir', help='log directory', default='train_log/siamrcnn')
parser.add_argument('--config', help="A list of KEY=VALUE to overwrite those defined in config.py",
nargs='+')
if get_tf_version_tuple() < (1, 6):
# https://github.com/tensorflow/tensorflow/issues/14657
logger.warn("TF<1.6 has a bug which may lead to crash in FasterRCNN if you're unlucky.")
args = parser.parse_args()
if args.config:
cfg.update_args(args.config)
MODEL = ResNetFPNTrackModel()
DetectionDataset() # initialize the config with information from our dataset
is_horovod = cfg.TRAINER == 'horovod'
if is_horovod:
hvd.init()
logger.info("Horovod Rank={}, Size={}".format(hvd.rank(), hvd.size()))
if not is_horovod or hvd.rank() == 0:
# keep the old log folder if already existing! (before it would just delete it)
logger.set_logger_dir(args.logdir, 'k')
# logger.set_logger_dir(args.logdir, 'd')
finalize_configs(is_training=True)
stepnum = cfg.TRAIN.STEPS_PER_EPOCH
# warmup is step based, lr is epoch based
init_lr = cfg.TRAIN.WARMUP_INIT_LR * min(8. / cfg.TRAIN.NUM_GPUS, 1.)
warmup_schedule = [(0, init_lr), (cfg.TRAIN.WARMUP, cfg.TRAIN.BASE_LR)]
warmup_end_epoch = cfg.TRAIN.WARMUP * 1. / stepnum
lr_schedule = [(int(warmup_end_epoch + 0.5), cfg.TRAIN.BASE_LR)]
factor = 8. / cfg.TRAIN.NUM_GPUS
for idx, steps in enumerate(cfg.TRAIN.LR_SCHEDULE[:-1]):
mult = 0.1 ** (idx + 1)
lr_schedule.append(
(steps * factor // stepnum, cfg.TRAIN.BASE_LR * mult))
logger.info("Warm Up Schedule (steps, value): " + str(warmup_schedule))
logger.info("LR Schedule (epochs, value): " + str(lr_schedule))
train_dataflow = get_train_dataflow()
# This is what's commonly referred to as "epochs"
total_passes = cfg.TRAIN.LR_SCHEDULE[-1] * 8 / train_dataflow.size()
logger.info("Total passes of the training set is: {:.5g}".format(total_passes))
callbacks = [
PeriodicCallback(
ModelSaver(max_to_keep=10, keep_checkpoint_every_n_hours=1),
# every_k_epochs=1),
every_k_epochs=20),
# linear warmup
ScheduledHyperParamSetter(
'learning_rate', warmup_schedule, interp='linear', step_based=True),
ScheduledHyperParamSetter('learning_rate', lr_schedule),
PeakMemoryTracker(),
EstimatedTimeLeft(median=True),
SessionRunTimeout(60000).set_chief_only(True), # 1 minute timeout
] + [
EvalCallback(dataset, *MODEL.get_inference_tensor_names(), args.logdir)
for dataset in cfg.DATA.VAL
]
if not is_horovod:
callbacks.append(GPUUtilizationTracker())
start_epoch = cfg.TRAIN.STARTING_EPOCH
if is_horovod and hvd.rank() > 0:
session_init = None
else:
# first try to find existing model
checkpoint_path = os.path.join(args.logdir, "checkpoint")
if os.path.exists(checkpoint_path):
session_init = get_model_loader(checkpoint_path)
start_step = int(session_init.path.split("-")[-1])
start_epoch = start_step // stepnum
logger.info(
"initializing from existing model, " + session_init.path + ", starting from epoch " + str(start_epoch))
else:
if args.load:
session_init = get_model_loader(args.load)
else:
session_init = get_model_loader(cfg.BACKBONE.WEIGHTS) if cfg.BACKBONE.WEIGHTS else None
max_epoch = min(cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum, cfg.TRAIN.MAX_NUM_EPOCHS)
traincfg = TrainConfig(
model=MODEL,
data=QueueInput(train_dataflow),
callbacks=callbacks,
steps_per_epoch=stepnum,
# max_epoch=cfg.TRAIN.LR_SCHEDULE[-1] * factor // stepnum,
max_epoch=max_epoch,
session_init=session_init,
starting_epoch=start_epoch
)
if is_horovod:
trainer = HorovodTrainer(average=False)
else:
# nccl mode appears faster than cpu mode
trainer = SyncMultiGPUTrainerReplicated(cfg.TRAIN.NUM_GPUS, average=False, mode='nccl')
launch_train_with_config(traincfg, trainer)
| python |
import csv
import matplotlib.pyplot as plt
import numpy as np
def MC_size_dependence():
f = open("./size_dep.csv", 'r')
x = csv.reader(f)
size_MC = []
times = []
for i in x:
size_MC.append(int(i[0]))
times.append(int(i[1]))
fig = plt.figure()
plt.plot(size_MC, times)
plt.xlabel("Number of pairs stored")
plt.ylabel("Average query time in ms")
fig.savefig("../figures/MC_size_dependence.pdf")
plt.show()
def pre_process_time():
fp1 = open("./TC_pre_pro.txt", 'r')
fp2 = open("./MC_pre_pro.txt", 'r')
nodes = [i for i in range(10, 21, 2)]
f1 = csv.reader(fp1)
f2 = csv.reader(fp2)
p2 = [[]] * 5
p1 = [int(i[0]) for i in f1]
p2_temp = [int(i[0]) for i in f2]
print(p2_temp)
for i in range(5):
p2[i] = [p2_temp[j] / 20 for j in range(i, len(p2_temp), 5)]
f = plt.figure()
plt.plot(nodes, p1, label="Complete transitive closure")
for i in range(5):
plt.plot(nodes, p2[i], label=str((i + 1) * 10) + "% pairs computed")
plt.xlabel("|V|(in thousands)")
plt.ylabel("Time taken to build table(in ms)")
plt.title("Comparision of pre-processing times for Algorithm 1 & 3")
plt.legend()
plt.show()
f.savefig("../figures/pre_pro_10iter_MC.pdf", bbox_inches='tight')
def hits_vs_miss():
fp = csv.reader(open("./hits_and_miss.csv", 'r'))
g_hits = []
g_miss = []
nodes = []
hits = []
misses = []
for i, line in enumerate(fp):
hits.append(int(line[2]))
misses.append(int(line[3]))
if (i + 1) % 5 == 0:
nodes.append(int(line[0]))
g_hits.append(np.mean(hits)/1000)
g_miss.append(np.mean(misses)/1000)
hits = []
misses = []
fig = plt.figure()
plt.plot(nodes, g_hits)
plt.xlabel("Number of pairs stored")
plt.ylabel("Number of hits(in thousands)")
plt.show()
fig.savefig("../figures/hits_vs_misses.pdf")
def f(r):
timings = []
x = []
count = 0
for line in r:
x.append(int(line[1]))
count += 1
if count == 20:
timings.append(sum(x) / 20)
x = []
count = 0
return timings
def edge_size():
f1 = open("./data_algo1.csv", 'r')
f2 = open("./data_algo2.csv", 'r')
f3 = open("./data_algo3.csv", 'r')
r1 = csv.reader(f1)
r2 = csv.reader(f2)
r3 = csv.reader(f3)
x1 = f(r1)
x2 = f(r2)
x3 = f(r3)
x1[3] = x1[3] / 100
print(x1)
print(x2)
print(x3)
x = [2, 3, 4, 5, 6]
fig = plt.figure()
plt.plot(x, x1, label="Full Transitive Closure")
plt.plot(x, x2, label="Partial Transitive Closure")
plt.plot(x[:3], x3[:3], label="BFS")
plt.legend()
plt.xlabel("Edge-Node Ratio")
plt.ylabel("Average Time taken for query in milliseconds")
plt.show()
fig.savefig("../figures/edge_variation.pdf")
# pre_process_time()
hits_vs_miss()
# MC_size_dependence()
# edge_size()
| python |
# Copyright 2021 Katteli Inc.
# TestFlows.com Open-Source Software Testing Framework (http://testflows.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import inspect
from textwrap import indent, dedent
from tempfile import NamedTemporaryFile
from linecache import cache as code_cache
from contextlib import ExitStack
from testflows._core.contrib.arpeggio import RegExMatch as _
from testflows._core.contrib.arpeggio import OneOrMore, ZeroOrMore, EOF, Optional, Not
from testflows._core.contrib.arpeggio import ParserPython as PEGParser
from testflows._core.contrib.arpeggio import PTNodeVisitor, visit_parse_tree
from testflows._core.exceptions import exception as get_exception
from testflows.texts import *
DummySection = NullStep
class TestStack(ExitStack):
def push_context(self, cm):
return super(TestStack, self).enter_context(cm)
def pop_context(self):
"""Pop and close last context manager from stack.
"""
is_sync, cb = self._exit_callbacks.pop()
assert is_sync
cb(None, None, None)
class Visitor(PTNodeVisitor):
def __init__(self, stack, source_data, *args, **kwargs):
self.stack = stack
self.source_data = source_data
self.globals = globals()
self.locals = {}
self.current_level = 0
super(Visitor, self).__init__(*args, **kwargs)
def visit_header(self, node, children):
self.process(node)
def execute(visitor, node):
visitor.locals["self"] = current()
position = node.position
lines = node.flat_str()
if node.rule_name == "exec_code":
exec_lines = "\n".join(lines.strip().splitlines()[1:-1])
else:
if lines.endswith('"'):
end = lines.rsplit('"')[-1]
exec_lines = fr'''text(fr"""{lines}""", dedent=False, end='{end}')'''
else:
exec_lines = f'text(fr"""{lines}""", dedent=False, end="")'
with NamedTemporaryFile("w+", suffix=".py") as code_file:
code_file.write(exec_lines)
code_file.seek(0)
code_file.flush()
visitor.locals["__file__"] = code_file.name
source_code = code_file.read()
source_name = code_file.name
code_cache[source_name] = (
len(source_code), None,
[line+'\n' for line in source_code.splitlines()], source_name
)
try:
exec(compile(source_code, source_name, 'exec'),
visitor.globals, visitor.locals)
except Exception as e:
exc_tb = e.__traceback__
syntax_error = isinstance(e, SyntaxError)
if syntax_error:
tb_lineno = e.lineno
else:
exc_tb = exc_tb.tb_next
tb_lineno = exc_tb.tb_lineno
split_lines = lines.splitlines()
code_offset = 0
if node.rule_name == "exec_code":
code_offset = 1
line_offset = visitor.source_data[:position].count("\n")
line_fmt = " %" + str(len(str(len(split_lines) + line_offset))) + "d| %s"
line_at_fmt = " %" + str(len(str(len(split_lines) + line_offset))) + "d|> %s"
numbered_lines = "\n".join(
[line_fmt % (n + line_offset,l) if n != tb_lineno + code_offset else line_at_fmt % (n + line_offset,l) for n, l in enumerate(
split_lines, 1)])
code_exc = type(e)(str(e) + f"\n\n{'Syntax Error' if syntax_error else 'Error'} occured in the following text:\n\n"
+ numbered_lines)
code_exc.with_traceback(exc_tb)
err(f"{e.__class__.__name__}\n" + get_exception(type(e), code_exc, code_exc.__traceback__))
def process(self, node):
for child in node:
self.execute(child)
def visit_intro(self, node, children):
self.process(node)
def visit_section(self, node, children):
section_level = node[0].value.count("#")
assert self.current_level >= 0, "current level is invalid"
section = Section(node.heading.heading_name.value.strip(), context=SharedContext(current().context))
if section_level > self.current_level:
for i in range(section_level - self.current_level - 1):
self.stack.push_context(DummySection())
else:
for i in range(self.current_level - section_level + 1):
self.stack.pop_context()
self.stack.push_context(section)
self.current_level = section_level
self.process(node)
def Parser():
"""TestFlows executable document parser.
"""
def line():
return _(r"[^\n]*\n")
def non_empty_line():
return _(r"[^\n]+\n")
def final_line():
return _(r"[^\n]+"), EOF
def paragraph():
return OneOrMore(Not(exec_code_start), [non_empty_line, final_line])
def header_sep():
return _(r"---[ \t]*\n")
def header():
return header_sep, ZeroOrMore(Not(header_sep), line), header_sep
def exec_code_start():
return _(r"[ \t]?[ \t]?[ \t]?[`~][`~][`~]python:testflows[ \t]*\n")
def exec_code_end():
return (_(r"[ \t]?[ \t]?[ \t]?[`~][`~][`~][ \t]*"), [_(r"\n"), EOF])
def exec_code():
return exec_code_start, ZeroOrMore(Not(exec_code_end), line), exec_code_end
def intro():
return ZeroOrMore(Not(heading), [exec_code, paragraph, line, final_line])
def section():
return heading, ZeroOrMore(Not(heading), [exec_code, paragraph, line, final_line])
def heading():
return [
(_(r"\s*#+\s+"), heading_name, _(r"\n?")),
(heading_name, _(r"\n?[-=]+\n?"))
]
def heading_name():
return _(r"[^\n]+")
def document():
return Optional(Optional(header), intro, ZeroOrMore(section))
return PEGParser(document, skipws=False)
def execute(source):
"""Execute TestFlows Document (*.tfd).
:param source: source file-like object
"""
parser = Parser()
source_data = source.read()
if not source_data:
fail(f"source file '{os.path.abspath(source.name)}' is empty")
tree = parser.parse(source_data)
if tree is None:
err(f"parsing {os.path.abspath(source.name)} failed")
with TestStack() as stack:
visit_parse_tree(tree, Visitor(stack, source_data))
| python |
#!/bin/python
##
subnetMask = input('Enter your subnet mask in dotted-decimal notation: ')
o1 = int(subnetMask.split('.')[0])
o2 = int(subnetMask.split('.')[1])
o3 = int(subnetMask.split('.')[2])
o4 = int(subnetMask.split('.')[3])
print('Your subnet mask in binary is: {0:08b}.{1:08b}.{2:08b}.{3:08b}'.format(o1, o2, o3, o4))
print('Your subnet mask in hexadecimal is: {0:X}.{1:X}.{2:X}.{3:X}'.format(o1, o2, o3, o4))
##
## End of file...
| python |
import time
import numpy as np
import cv2
import open3d as o3d
from numba import njit, prange
import pycuda.autoinit
import pycuda.driver as cuda
from pycuda import gpuarray
from matplotlib import pyplot as plt
from pf_pose_estimation.tsdf_lib import TSDFVolume
from pf_pose_estimation.cuda_kernels import source_module
class ParticleFilter:
def __init__(self, obj_tsdf_volume: TSDFVolume, num_particles: int = 2048):
# object model
self.obj_tsdf_volume = obj_tsdf_volume
self.obj_surface = obj_tsdf_volume.get_surface_cloud_marching_cubes(voxel_size=0.005)
self.obj_offset = np.asarray(self.obj_surface.points).mean(0)
# initialize particle filter
self.num_particles = num_particles
self.particles = np.tile(np.eye(4), (self.num_particles, 1, 1)).astype(np.float32) # (N, 4, 4)
self.particles = self.jitter(self.particles, 180, 180, 180, 0.05, 0.05, 0.05, init_offset=self.obj_offset)
self.particle_weights_gpu = gpuarray.zeros(self.num_particles, dtype=np.float32)
# load cuda kernels
self._cuda_batch_inlier_metric = source_module.get_function('batchInlierMetric')
@staticmethod
@njit(parallel=True, fastmath=True)
def random_sample_transformations(N, ai, aj, ak, i, j, k):
T = np.empty((N, 4, 4), np.float32)
for idx in prange(N):
ai_rand = np.random.uniform(-ak, ak) # exchange ai, ak
aj_rand = np.random.uniform(-aj, aj)
ak_rand = np.random.uniform(-ai, ai) # exchange ai, ak
x_rand = np.random.uniform(-i, i)
y_rand = np.random.uniform(-j, j)
z_rand = np.random.uniform(-k, k)
si, sj, sk = np.sin(ai_rand), np.sin(aj_rand), np.sin(ak_rand)
ci, cj, ck = np.cos(ai_rand), np.cos(aj_rand), np.cos(ak_rand)
cc, cs = ci*ck, ci*sk
sc, ss = si*ck, si*sk
T[idx, 0, 0] = cj*ck
T[idx, 0, 1] = sj*sc-cs
T[idx, 0, 2] = sj*cc+ss
T[idx, 1, 0] = cj*sk
T[idx, 1, 1] = sj*ss+cc
T[idx, 1, 2] = sj*cs-sc
T[idx, 2, 0] = -sj
T[idx, 2, 1] = cj*si
T[idx, 2, 2] = cj*ci
T[idx, 3, :3] = 0
T[idx, 3, 3] = 1
T[idx, 0, 3] = x_rand
T[idx, 1, 3] = y_rand
T[idx, 2, 3] = z_rand
return T
@staticmethod
def jitter(particles, ai, aj, ak, i, j, k, init_offset=None):
"""
Randomly sample N transformation matrices, by randomly rotating 'rzyx' plus translation
reference: https://github.com/davheld/tf/blob/master/src/tf/transformations.py
ai, aj, ak (degrees) along x-axis, y-axis, z-axis
i, j, k (m)
"""
particles = particles.copy()
if init_offset is not None:
particles[:, :3, 3] -= init_offset
ai = ai * np.pi / 180
aj = aj * np.pi / 180
ak = ak * np.pi / 180
T = ParticleFilter.random_sample_transformations(particles.shape[0], ai, aj, ak, i, j, k)
particles = T @ particles
if init_offset is not None:
particles[:, :3, 3] += init_offset
return particles
@staticmethod
@njit(parallel=True)
def get_roi_from_mask(mask: np.ndarray):
H, W = mask.shape
start_row = H - 1
start_col = W - 1
end_row = 0
end_col = 0
for i in prange(H):
for j in prange(W):
if mask[i, j]:
start_row = min(start_row, i)
start_col = min(start_col, j)
end_row = max(end_row, i)
end_col = max(end_col, j)
return np.array([start_row, start_col, end_row, end_col], dtype=np.int32)
@staticmethod
def create_pcd(depth_im: np.ndarray, cam_intr: np.ndarray, color_im: np.ndarray = None,
cam_extr: np.ndarray = np.eye(4)):
intrinsic_o3d = o3d.camera.PinholeCameraIntrinsic()
intrinsic_o3d.intrinsic_matrix = cam_intr
depth_im_o3d = o3d.geometry.Image(depth_im)
if color_im is not None:
color_im_o3d = o3d.geometry.Image(color_im)
rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(color_im_o3d, depth_im_o3d,
depth_scale=1, convert_rgb_to_intensity=False)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd, intrinsic_o3d, extrinsic=cam_extr)
else:
pcd = o3d.geometry.PointCloud.create_from_depth_image(depth_im_o3d, intrinsic_o3d, extrinsic=cam_extr,
depth_scale=1)
return pcd
def estimate(self, color_im: np.ndarray, depth_im: np.ndarray, cam_intr: np.ndarray, mask: np.ndarray = None,
num_iters: int = 50, visualize: bool = True):
H, W = depth_im.shape
if mask is None:
color_im_bgr = cv2.cvtColor(color_im, cv2.COLOR_RGB2BGR)
roi = cv2.selectROI("select_roi", color_im_bgr, fromCenter=False, showCrosshair=True)
cv2.destroyWindow("select_roi")
start_col, start_row, roi_w, roi_h = roi
mask = np.zeros((H, W), dtype=bool)
mask[start_row:start_row+roi_h+1, start_col:start_col+roi_w+1] = True
# find the center of current observation as the initial position
masked_depth_im = depth_im.copy()
masked_depth_im[~mask] = 0
obs_pcd = ParticleFilter.create_pcd(masked_depth_im, cam_intr)
obs_offset = np.asarray(obs_pcd.points).mean(0)
# get region of interest
start_row, start_col, end_row, end_col = ParticleFilter.get_roi_from_mask(mask)
roi_h = end_row - start_row + 1
roi_w = end_col - start_col + 1
# cropped depth image
cropped_depth_im = masked_depth_im[start_row:end_row+1, start_col:end_col+1]
tic = time.time()
for idx in range(num_iters):
# Particle diffusion
top_thresh = int(0.1 * self.num_particles) # top 10% of the particles will be kept without diffusion
top_particles = self.particles[:top_thresh].copy()
if idx < 0.5 * num_iters:
self.particles = self.jitter(self.particles, 10, 10, 10, 0.04, 0.04, 0.04, init_offset=self.obj_offset)
elif idx < 0.3 * num_iters:
self.particles = self.jitter(self.particles, 2, 2, 2, 0.02, 0.02, 0.02, init_offset=self.obj_offset)
elif idx < 0.2 * num_iters:
self.particles = self.jitter(self.particles, 2, 2, 2, 0.01, 0.01, 0.01, init_offset=self.obj_offset)
else:
self.particles = self.jitter(self.particles, 1, 1, 1, 0.01, 0.01, 0.01, init_offset=self.obj_offset)
self.particles[:top_thresh] = top_particles
# rendering
shifted_particles = self.particles.copy()
shifted_particles[:, :3, 3] += obs_offset
batch_depth_gpu, _ = self.obj_tsdf_volume.batch_ray_casting(roi_w, roi_h, cam_intr,
np.linalg.inv(shifted_particles), shifted_particles, start_row, start_col,
self.num_particles, to_host=False)
# compute weights
self.compute_weights_inlier_metric(batch_depth_gpu, cropped_depth_im, self.particle_weights_gpu,
inlier_thresh=0.01)
weights = self.particle_weights_gpu.get()
sorted_indices = np.argsort(weights)[::-1] # descending order
# get maximum likely estimate
best_weight = weights[sorted_indices[0]]
best_particle = shifted_particles[sorted_indices[0]].copy()
# resample particles
weights_sum = np.sum(weights)
if np.allclose(weights_sum, 0):
p = np.ones_like(weights) / len(weights)
else:
p = weights / weights_sum
resampled_indices = np.random.choice(self.num_particles, size=self.num_particles, replace=True, p=p)
resampled_indices[:top_thresh] = sorted_indices[:top_thresh]
self.particles = self.particles[resampled_indices]
if visualize:
self.visualize_particles(color_im, batch_depth_gpu, sorted_indices, start_row, start_col, top_k=5,
pause=False, text="iteration:" + str(idx).zfill(4), text_color=(0, 0, 255),
window_name="visualization")
toc = time.time()
print(f"Perform {num_iters} iterations in {toc - tic:.03f}s")
if visualize:
self.visualize_particles(color_im, batch_depth_gpu, sorted_indices, start_row, start_col, top_k=10,
pause=True, text="iteration:" + str(idx).zfill(4), text_color=(0, 0, 255),
window_name='visualization')
cv2.destroyWindow("visualization")
return best_particle, best_weight
def visualize_particles(self, color_im, batch_depth_gpu, sorted_indices, start_row, start_col,
top_k=1, pause=False, text=None, text_color=(0, 0, 0), window_name="visualization"):
color_im = cv2.cvtColor(color_im, cv2.COLOR_RGB2BGR)
color_im = cv2.putText(color_im, text, (50, 50), cv2.FONT_HERSHEY_SIMPLEX,
1, text_color, 1, cv2.LINE_AA)
rendered_im = color_im.copy()
batch_depth_cpu = batch_depth_gpu.get()[sorted_indices[:top_k]].astype(bool)
for i in range(1, top_k):
batch_depth_cpu[0] |= batch_depth_cpu[i]
h, w = batch_depth_cpu[0].shape
rendered_depth = np.zeros((h, w, 3), dtype=np.uint8)
rendered_depth[batch_depth_cpu[0].astype(bool)] = [0, 0, 255]
rendered_im[start_row:start_row+h, start_col:start_col+w, :] = rendered_depth
alpha = 0.5
blended_im = cv2.addWeighted(color_im, alpha, rendered_im, (1 - alpha), 0.0)
cv2.imshow(window_name, blended_im)
if pause:
cv2.waitKey(0)
else:
cv2.waitKey(1)
def compute_weights_inlier_metric(self, batch_depth_gpu, depth_im, particle_weights_gpu, inlier_thresh=0.005):
H, W = depth_im.shape
self._cuda_batch_inlier_metric(
np.int32(H),
np.int32(W),
np.int32(self.num_particles),
batch_depth_gpu,
cuda.In(depth_im.astype(np.float32)),
particle_weights_gpu,
np.float32(inlier_thresh),
block=(1024, 1, 1),
grid=(int(np.ceil(self.num_particles / 1024)), 1, 1)
)
| python |
import re
import os
import requests
from xml.dom.minidom import (
parseString,
parse as parseFile,
Text as TextNode,
)
from ...conventions import LOCAL_PATH
from .. import readers
class DNFRepositoryMetalink:
def __init__(self, baseurl, repo, architecture):
self.baseurl = baseurl
self.repo = repo
self.architecture = architecture
self.url = '{base}?repo={repo}&arch={arch}'.format(
base=baseurl, repo=repo, arch=architecture)
self.identifier = re.sub(r'[^a-zA-Z0-9_]', '_',
'{}/{}/{}'
.format(baseurl, repo, architecture)
)
def get_repositories(self):
req = requests.get(self.url)
tree = parseString(req.text)
urls = tree.getElementsByTagName('url')
https_urls = [url for url in urls if url.getAttribute('protocol') == 'https']
for https_url in sorted(https_urls, key=lambda x: int(x.getAttribute("preference"))):
url = https_url.childNodes[0].wholeText
print('Downloading data from {url}'.format(url=url))
yield DNFRepository(url)
def download_to(self, dir_name):
cache_file_name = self.identifier
if os.path.exists(os.path.join(dir_name, 'data.xml.gz')):
return
for repository in self.get_repositories():
try:
return repository.download_to(dir_name)
except Exception as e:
print(e)
class DNFRepository:
def __init__(self, url):
self.url = url
def download_to(self, dir_name):
os.makedirs(dir_name, exist_ok=True)
data = parseString(requests.get(self.url).text)
primary = [node
for node
in data.getElementsByTagName('data')
if node.getAttribute('type') == 'primary'][0]
tree_base = re.sub(r'/tree/.*', '/tree/', self.url)
extension = primary.getElementsByTagName('location')[0].getAttribute('href')
url = tree_base + extension
with open(os.path.join(dir_name, 'data.xml.gz'), 'wb') as f:
r = requests.get(url)
f.write(readers.gz(r.content))
def getText(node):
return node.childNodes[0].wholeText
def parse_package(package):
nodes = { node.tagName: node
for node
in package.childNodes
if not isinstance(node, TextNode)
}
data = {
'Package': getText(nodes['name']),
'Description': getText(nodes['summary']),
'Section': (getText(nodes['format']
.getElementsByTagName('rpm:group')[0])),
}
if len(nodes['format'].getElementsByTagName('rpm:requires')) != 0:
nodes['Dependencies'] = [
entry.getAttribute('name')
for entry in (nodes['format']
.getElementsByTagName('rpm:requires')[0]
.getElementsByTagName('rpm:entry'))
]
yield data
class DNFCacheReader:
def __init__(self, repositories, identifier):
cache_dir = get_cache_path(identifier)
self.paths = []
for repo in repositories:
self.paths.append(os.path.join(cache_dir, repo.identifier, 'data.xml.gz'))
assert(os.path.exists(self.paths[-1]))
def get_packages(self):
for fname in self.paths:
with open(fname, 'rt') as f:
tree = parseFile(f)
for package in tree.getElementsByTagName('package'):
yield from parse_package(package)
def get_cache_path(identifier):
return os.path.join(LOCAL_PATH, 'cache', identifier + '.cache')
def build_cache(repositories, identifier):
cache_dir = get_cache_path(identifier)
os.makedirs(cache_dir, exist_ok=True)
for repo in repositories:
repo.download_to(os.path.join(cache_dir, repo.identifier))
| python |
# Generated by Django 3.1.5 on 2021-07-11 17:27
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('profiles', '0001_initial'),
('classroom', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='student',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='member', to='profiles.student'),
),
]
| python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
"""Import ONNX model to gluon interface"""
# pylint: disable=no-member
from .import_onnx import GraphProto
def import_to_gluon(model_file, ctx):
"""
Imports the ONNX model files, passed as a parameter, into Gluon SymbolBlock object.
Parameters
----------
model_file : str
ONNX model file name
ctx : Context or list of Context
Loads the model into one or many context(s).
Returns
-------
sym_block : :class:`~mxnet.gluon.SymbolBlock`
A SymbolBlock object representing the given model file.
Notes
-----
This method is available when you ``import mxnet.contrib.onnx``
"""
graph = GraphProto()
try:
import onnx
except ImportError:
raise ImportError("Onnx and protobuf need to be installed. Instructions to"
+ " install - https://github.com/onnx/onnx#installation")
model_proto = onnx.load_model(model_file)
net = graph.graph_to_gluon(model_proto.graph, ctx)
return net
| python |
from django.http import JsonResponse
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import permissions
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from django_cookie_app import models
from django_cookie_app.rest import serializers
from django_cookie_app import filtersets
class MyViewSetMixin:
def get_permissions(self):
if self.action == 'list':
self.permission_classes.append(
permissions.IsAuthenticated
)
else:
self.permission_classes = [permissions.IsAdminUser]
return [permission() for permission in self.permission_classes]
class OrderViewSet(MyViewSetMixin, ModelViewSet):
"""
OrderViewSet
"""
serializer_class = serializers.OrderSerializer
queryset = models.Order.objects.select_related(
'choco_oran',
'mint_choco',
'syrup',
'vanilla',
'raspberry').all()
permission_classes = []
filterset_class = filtersets.OrderFilter
filter_backends = (DjangoFilterBackend,)
@action(
detail=False,
methods=['post', 'get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def inspect(self, request, pk=None):
"""
inspect
"""
if request.method == 'POST':
order_id = models.Order.objects.inspect(dict(request.data))
return JsonResponse(order_id)
return Response({"user": request.user})
class ChocoOrangeViewSet(MyViewSetMixin, ModelViewSet):
"""
ChocoOrangeViewSet
"""
serializer_class = serializers.ChocoOrangeSerializer
queryset = models.ChocoOrange.objects.all()
@action(
detail=False,
methods=['get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def update_bucket(self, request, pk=None):
"""
update_bucket
"""
if request.method == 'GET':
order_id = models.ChocoOrange.objects.update_last()
return JsonResponse(order_id)
return Response({"user": request.user})
class MintChocoViewSet(MyViewSetMixin, ModelViewSet):
"""
MintChocoViewSet
"""
serializer_class = serializers.MintChocoSerializer
queryset = models.MintChoco.objects.all()
@action(
detail=False,
methods=['get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def update_bucket(self, request, pk=None):
"""
update_bucket
"""
if request.method == 'GET':
order_id = models.MintChoco.objects.update_last()
return JsonResponse(order_id)
return Response({"user": request.user})
class SyrupViewSet(MyViewSetMixin, ModelViewSet):
"""
SyrupViewSet
"""
serializer_class = serializers.SyrupSerializer
queryset = models.Syrup.objects.all()
@action(
detail=False,
methods=['get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def update_bucket(self, request, pk=None):
"""
update_bucket
"""
if request.method == 'GET':
order_id = models.Syrup.objects.update_last()
return JsonResponse(order_id)
return Response({"user": request.user})
class VanillaStrawberryChocolateViewSet(MyViewSetMixin, ModelViewSet):
"""
VanillaStrawberryChocolateViewSet
"""
serializer_class = serializers.VanillaStrawberryChocolateSerializer
queryset = models.VanillaStrawberryChocolate.objects.all()
@action(
detail=False,
methods=['get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def update_bucket(self, request, pk=None):
"""
update_bucket
"""
if request.method == 'GET':
order_id = models.VanillaStrawberryChocolate.objects.update_last()
return JsonResponse(order_id)
return Response({"user": request.user})
class RaspberryWhiteChocolateViewSet(MyViewSetMixin, ModelViewSet):
"""
RaspberryWhiteChocolateViewSet
"""
serializer_class = serializers.RaspberryWhiteChocolateSerializer
queryset = models.RaspberryWhiteChocolate.objects.all()
@action(
detail=False,
methods=['get'],
permission_classes=[permissions.IsAuthenticatedOrReadOnly])
def update_bucket(self, request, pk=None):
"""
update_bucket"
"""
if request.method == 'GET':
order_id = models.RaspberryWhiteChocolate.objects.update_last()
return JsonResponse(order_id)
return Response({"user": request.user})
| python |
import os
import re
import numpy as np
import pandas as pd
FILE_ROOT_PATH = os.path.dirname(os.path.abspath(__file__))
source = os.path.join(FILE_ROOT_PATH, '9889.xlsx')
output = os.path.join(FILE_ROOT_PATH, 'result_9889.xlsx')
def merge_sheets(path: str) -> pd.DataFrame:
'''将每个表格的sheet页中日期、期货成交汇总、期货持仓汇总聚集到一起
'''
df = pd.read_excel(path, sheet_name=None)
print(len(list(df.keys())))
all_indexs = dict()
all_sheet_df_res = pd.DataFrame()
for sheet_index, sheet in enumerate(list(df.keys())):
if sheet_index < len(list(df.keys())) + 1: # 测试时候控制前几个
print(sheet_index)
df_sheet = pd.read_excel(path, sheet_name=sheet)
row, col = df_sheet.shape
child_indexs = []
child_table1_flag = 0
child_table2_flag = 0
for r in range(row):
each_row_list = list(df_sheet.loc[r])
if "交易日期" in each_row_list:
key_date = str(each_row_list[7])
date_row = r
# print(key_date)
elif "期货持仓汇总" in each_row_list:
# print(each_row_list)
# print("从此行开始截取数据", r)
first_start = r
child_indexs.append(first_start)
child_table1_flag = 1
elif "合计" in each_row_list and child_table1_flag == 1:
# print(each_row_list)
# print("第一段结束", r)
first_end = r
child_indexs.append(first_end)
# 跳出前总结数据
if key_date:
all_indexs[key_date] = child_indexs
elif "期权持仓汇总" in each_row_list:
# print(each_row_list)
# print("从此行开始截取数据", r)
second_start = r
child_indexs.append(second_start)
child_table2_flag = 1
elif "合计" in each_row_list and child_table2_flag == 1:
# print(each_row_list)
# print("第二段结束", r)
second_end = r
if second_end not in child_indexs:
child_indexs.append(second_end)
# 跳出前总结数据
if key_date:
all_indexs[key_date] = child_indexs
each_sheet_res = [] # 每个sheet页的结果
# print(child_indexs)
if len(child_indexs) == 2:
df_sheet.loc[date_row][0] = df_sheet.loc[date_row][7]
df_sheet.loc[date_row][1:] = np.nan
each_sheet_res.append(df_sheet.loc[date_row]) # 日期行
# print(df_sheet.loc[date_row][0])
for i in range(child_indexs[0], child_indexs[1] + 1):
# print(i)
# print(df_sheet.loc[i])
each_sheet_res.append(df_sheet.loc[i])
elif len(child_indexs) == 4:
df_sheet.loc[date_row][0] = df_sheet.loc[date_row][7]
df_sheet.loc[date_row][1:] = np.nan
each_sheet_res.append(df_sheet.loc[date_row]) # 日期行
# print(df_sheet.loc[date_row])
for i in range(child_indexs[0], child_indexs[1] + 1):
# print(i)
# print(df_sheet.loc[i])
each_sheet_res.append(df_sheet.loc[i])
for j in range(child_indexs[2], child_indexs[3] + 1):
# print(j)
# print(df_sheet.loc[j])
each_sheet_res.append(df_sheet.loc[j])
# print(each_sheet_res)
each_sheet_res_df = pd.DataFrame(each_sheet_res).reset_index(drop=True)
all_sheet_df_res = pd.concat([all_sheet_df_res, each_sheet_res_df], axis=0)
# break
return all_sheet_df_res
if __name__ == "__main__":
res = merge_sheets(source)
res.to_excel(output, header=None, index=False)
| python |
from ligeor import TwoGaussianModel as TwoG
import numpy as np
def test_initialize_filename(filename, data):
model = TwoG(filename=filename, n_downsample=1, delimiter=',')
assert(((model.phases == data[:,0]) & (model.fluxes == data[:,1]) & (model.sigmas == data[:,2])).all())
def test_initialize_data(data):
model = TwoG(phases=data[:,0], fluxes=data[:,1], sigmas=data[:,2])
assert(((model.phases == data[:,0]) & (model.fluxes == data[:,1]) & (model.sigmas == data[:,2])).all())
return model
def test_estimate_ecl_pos_widths(model, result):
est_positions = model.estimate_eclipse_positions_widths(model.phases, model.fluxes)
estimates = {}
estimates['pos1'] = est_positions['ecl_positions'][0]
estimates['pos2'] = est_positions['ecl_positions'][1]
estimates['width1'] = est_positions['ecl_widths'][0]
estimates['width2'] = est_positions['ecl_widths'][1]
for key in result.keys():
assert(np.abs(estimates[key] - result[key]) < 2e-1)
def test_fit(model, result):
model.fit()
assert(model.best_fit['func'] == result['func'])
assert((np.abs(
model.best_fit['param_vals'][0] -
np.array(result['param_vals'])) < 1e-2*np.ones(len(model.best_fit['param_vals'][0]))).all())
return model
def test_compute_ecl_params(model, result):
eb_dict = model.compute_eclipse_params()
for key in eb_dict.keys():
if key in result.keys():
assert(np.abs(eb_dict[key] - result[key]) < 5e-2)
elif key != 'eclipse_edges':
assert(np.isnan(eb_dict[key]))
else:
pass
# if __name__=='main':
# true values of all models
C = 1.
mu1 = 0.
sigma1 = 0.015
d1 = 0.5
mu2 = 0.42
sigma2 = 0.01
d2 = 0.35
Aell = 0.05
# load data on each synthetic model
data_c = np.loadtxt('../data/const.csv', delimiter=',')
data_cg = np.loadtxt('../data/cg.csv', delimiter=',')
data_ce = np.loadtxt('../data/ce.csv', delimiter=',')
data_cge = np.loadtxt('../data/cge.csv', delimiter=',')
data_cg12 = np.loadtxt('../data/cg12.csv', delimiter=',')
data_cg12e1 = np.loadtxt('../data/cg12e1.csv', delimiter=',')
data_cg12e2 = np.loadtxt('../data/cg12e2.csv', delimiter=',')
# check if file initialization works
test_initialize_filename('../data/cg12.csv', data_cg12)
#create a twoG model for each
# model_c = test_initialize_data(data_c)
model_cg = test_initialize_data(data_cg)
# model_ce = test_initialize_data(data_ce)
model_cge = test_initialize_data(data_cge)
model_cg12 = test_initialize_data(data_cg12)
model_cg12e1 = test_initialize_data(data_cg12e1)
model_cg12e2 = test_initialize_data(data_cg12e2)
# test estimated eclipse positions
test_estimate_ecl_pos_widths(model_cg, {'pos1': 0., 'width1': 0.015})
test_estimate_ecl_pos_widths(model_cge, {'pos1': 0., 'width1': 0.015})
test_estimate_ecl_pos_widths(model_cg12, {'pos1': 0., 'width1': 0.015, 'pos2': 0.42, 'width2': 0.01})
# test fits for all models
# test_fit(model_c, {'func': 'C', 'param_vals': [C]})
test_fit(model_cg, {'func': 'CG', 'param_vals': [C,mu1,d1,sigma1]})
# test_fit(model_ce, {'func': 'CE', 'param_vals': [C, Aell, mu1]})
test_fit(model_cge, {'func': 'CGE', 'param_vals': [C, mu1, d1, sigma1, Aell]})
test_fit(model_cg12, {'func': 'CG12', 'param_vals': [C, mu1, d1, sigma1, mu2, d2, sigma2]})
test_fit(model_cg12e1, {'func': 'CG12E1', 'param_vals': [C, mu1, d1, sigma1, mu2, d2, sigma2, Aell]})
test_fit(model_cg12e2, {'func': 'CG12E2', 'param_vals': [C, mu1, d1, sigma1, mu2, d2, sigma2, Aell]})
# test eclipse parameters for all models
test_compute_ecl_params(model_cg, {'primary_width': 5.6*sigma1,
'primary_position': mu1,
'primary_depth': d1})
test_compute_ecl_params(model_cge, {'primary_width': 5.6*sigma1,
'primary_position': mu1,
'primary_depth': d1})
test_compute_ecl_params(model_cg12, {'primary_width': 5.6*sigma1,
'secondary_width': 5.6*sigma2,
'primary_position': mu1,
'secondary_position': mu2,
'primary_depth': d1,
'secondary_depth': d2})
test_compute_ecl_params(model_cg12e1, {'primary_width': 5.6*sigma1,
'secondary_width': 5.6*sigma2,
'primary_position': mu1,
'secondary_position': mu2,
'primary_depth': d1,
'secondary_depth': d2})
test_compute_ecl_params(model_cg12e2, {'primary_width': 5.6*sigma1,
'secondary_width': 5.6*sigma2,
'primary_position': mu1,
'secondary_position': mu2,
'primary_depth': d1,
'secondary_depth': d2})
| python |
"""
小结 - 实例成员与类成员
创建
实例变量在构造函数中:对象.变量名 = 数据
实例方法:
def 方法名(self):
pass
类变量在类中方法外:变量名 = 数据
类方法:
@classmethod
def 方法名(cls):
pass
使用
实例变量:对象.变量名
实例方法:对象.方法名()
类变量:类.变量名
类方法:类.方法名()
特殊:
"""
class MyClass:
# 创建类变量
data02 = 20
# 创建类方法
@classmethod
def func02(cls):
print(cls.data02)
def __init__(self):
# 创建实例变量
self.data01 = 10
# 创建实例方法
def func01(self):
print(self.data01)
m01 = MyClass()
# 操作实例变量
print(m01.data01)
# 操作类变量
print(MyClass.data02)
# 通过对象访问实例方法
m01.func01()
# 不建议通过类名访问
# MyClass.func01(m01)
# 通过类访问类方法
MyClass.func02()
# 不建议通过对象访问类方法
# m01.func02() | python |
import argparse
import json
import os.path as pth
import sys
from glob import iglob
from typing import Any, Callable, Dict
from typing import MutableMapping as Map
from typing import Optional, Type, Union, cast
from ..core.fp import OneOf
from ..core.io import CiTool, env, error_block
from ..core.issue import Issue
from .validators import validate_non_empty_str
class CmdModule:
"""Interface for command modules."""
meta: Dict[str, str]
@staticmethod
def add_arguments(_parser: argparse.ArgumentParser) -> None:
"""Should be defined if we want to manipulate the argument parser
object.
This will allow us to define options that may apply to the
subparsers.
"""
...
@staticmethod
def add_parser(
_subparser: argparse._SubParsersAction, # noqa pylint: disable=protected-access
_raw: Type[argparse.RawTextHelpFormatter],
) -> None:
"""This function is required for commands so that we may be able to
define arguments."""
...
@staticmethod
def run(_arg: argparse.Namespace) -> int:
"""This function needs to call a library function and return 0 if
successful or non-zero if there is a failure."""
...
def import_mod(name: str) -> CmdModule:
"""Import a module by string."""
module = __import__(name)
for part in name.split('.')[1:]:
module = getattr(module, part)
return cast(CmdModule, module)
def get_command_modules(
root: str,
commands_module: str,
) -> Map[str, CmdModule]:
"""Return a dictionary mapping command names to modules that define an
`add_parser` method.
root: The absolute path of the directory containing the __main__.py that
activates the cli.
commands_module: The full module resolution. For instance `m.cli.commands`.
"""
dir_name = '/'.join(commands_module.split('.')[1:])
mod_names = list(iglob(f'{root}/{dir_name}/*.py'))
mod = {}
for name in mod_names:
tname = pth.split(name)[1][:-3]
tmod = import_mod(f'{commands_module}.{tname}')
if hasattr(tmod, 'add_parser'):
mod[tname] = tmod
return mod
def get_cli_command_modules(
file_path: str,
) -> Map[str, Union[CmdModule, Map[str, CmdModule]]]:
"""Return a dictionary containing the commands and subcommands for the cli.
Note that file_path is expected to be the absolute path to the
__main__.py file. Another restriction is that the __main__.py file
must have the `cli.commands` module as its sibling.
"""
root = pth.split(pth.abspath(file_path))[0]
main_mod = pth.split(root)[1]
cli_root = f'{main_mod}.cli'
root_cmd = get_command_modules(root, f'{cli_root}.commands')
mod: Map[str, Union[CmdModule, Map[str, CmdModule]]] = {}
for key, val in root_cmd.items():
mod[key] = val
mod['.meta'] = import_mod(f'{cli_root}.commands')
subcommands = list(iglob(f'{root}/cli/commands/*'))
for name in subcommands:
if name.endswith('.py') or name.endswith('__'):
continue
tname = pth.split(name)[1]
mod[tname] = get_command_modules(root, f'{cli_root}.commands.{tname}')
mod[f'{tname}.meta'] = import_mod(f'{cli_root}.commands.{tname}')
return mod
def main_parser(
mod: Map[str, Union[CmdModule, Map[str, CmdModule]]],
add_args=None,
):
"""Creates an argp parser and returns the result calling its parse_arg
method.
The `add_args` param may be provided as a function that takes in an
`argparse.ArgumentParser` instance to be able to take additional
actions.
"""
meta_mod = cast(CmdModule, mod['.meta'])
main_meta = meta_mod.meta # type: ignore
raw = argparse.RawTextHelpFormatter
# NOTE: In the future we will need to extend from this class to be able to
# override the error method to be able to print CI environment messages.
argp = argparse.ArgumentParser(
formatter_class=raw,
description=main_meta['description'],
)
if add_args:
add_args(argp)
subp = argp.add_subparsers(
title='commands',
dest='command_name',
required=True,
help='additional help',
metavar='<command>',
)
names = sorted(mod.keys())
for name in names:
if name.endswith('.meta'):
continue
if isinstance(mod[name], dict):
meta_mod = cast(CmdModule, mod[f'{name}.meta'])
meta = meta_mod.meta # type: ignore
parser = subp.add_parser(
name,
help=meta['help'],
formatter_class=raw,
description=meta['description'],
)
if hasattr(meta_mod, 'add_arguments'):
meta_mod.add_arguments(parser)
subsubp = parser.add_subparsers(
title='commands',
dest='subcommand_name',
required=True,
help='additional help',
metavar='<command>',
)
sub_mod = cast(Dict[str, CmdModule], mod[name])
for subname in sorted(sub_mod.keys()):
sub_mod[subname].add_parser(subsubp, raw)
else:
cast(CmdModule, mod[name]).add_parser(subp, raw)
return argp.parse_args()
def run_cli(
file_path: str,
main_args=None,
) -> None:
"""Helper function to create a cli application.
def main_args(argp):
argp.add_argument(...)
def main():
run_cli(__file__, main_args)
We only need `main_args` if we need to gain access to the
`argparse.ArgumentParser` instance.
"""
mod = get_cli_command_modules(file_path)
arg = main_parser(mod, main_args)
if arg == 1:
sys.exit(1)
if hasattr(arg, 'subcommand_name'):
sub_mod = cast(Dict[str, CmdModule], mod[arg.command_name])
sys.exit(sub_mod[arg.subcommand_name].run(arg))
sys.exit(cast(CmdModule, mod[arg.command_name]).run(arg))
def display_issue(issue: Issue) -> None:
"""print an error message."""
CiTool.error(issue.message)
error_block(str(issue))
def display_result(val: Any) -> None:
"""print the JSON stringification of the param `val` provided that val is
not `None`."""
if val is not None:
try:
print(json.dumps(val, separators=(',', ':')))
except Exception:
print(val)
def run_main(
callback: Callable[[], OneOf[Issue, Any]],
handle_result: Callable[[Any], None] = display_result,
handle_issue: Callable[[Issue], None] = display_issue,
):
"""Run the callback and print the returned value as a JSON string. Set the
print_raw param to True to bypass the JSON stringnification. To change how
the result or an issue should be display then provide the optional
arguments handle_result and handle_issue. For instance, to display the raw
value simply provide the `print` function.
Return 0 if the callback is a `Good` result otherwise return 1.
"""
try:
res = callback()
val = res.value
if res.is_bad:
if isinstance(val, Issue):
handle_issue(val)
else:
issue = Issue('non-issue exception', cause=cast(Issue, val))
handle_issue(issue)
return 1
handle_result(val)
except Exception as ex:
issue = Issue('unknown caught exception', cause=ex)
handle_issue(issue)
return 1
return 0
def call_main(fun, args, print_raw=False) -> int:
"""
@deprecated: Use run_main
The `fun` param will be called by providing the list of values in
`args`. By default, the result of calling `fun` will be JSON stringified
but we can avoid this by providing `print_raw` set to True. """
try:
res = fun(*args)
val = res.value
if res.is_bad:
if isinstance(val, Issue):
return error(val.message, val)
issue = Issue('non-issue exception', cause=val)
return error(issue.message, issue)
if val is not None or isinstance(val, list):
if print_raw:
print(val)
else:
try:
print(json.dumps(val, separators=(',', ':')))
except Exception:
print(val, file=sys.stderr)
except Exception as ex:
CiTool.error('unknown caught exception')
error_block(repr(ex))
return 1
return 0
def error(msg: str, issue: Optional[Issue] = None) -> int:
"""print an error message."""
CiTool.error(msg)
if issue:
error_block(str(issue))
return 1
def cli_integration_token(integration: str, env_var: str):
"""Return a function that takes in a parser.
This generated function registers a token argument in the parser
which looks for its value in the environment variables.
"""
return lambda parser: parser.add_argument(
'-t',
'--token',
type=validate_non_empty_str,
default=env(env_var),
help=f'{integration} access token (default: env.{env_var})',
)
| python |
import hashlib
import os
import pickle
from urllib.request import urlretrieve
import numpy as np
from PIL import Image
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import resample
from tqdm import tqdm
from zipfile import ZipFile
import math
import numpy as np
import tensorflow as tf
from tqdm import tqdm
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import matplotlib.gridspec as gridspec
import cv2
"""
Helper-function for flattening a layer
A convolutional layer produces an output tensor with 4 dimensions. We will add fully-connected layers after the
convolution layers, so we need to reduce the 4-dim tensor to 2-dim which can be used as input to the fully-connected layer.
"""
def flatten_layer(layer):
# Get the shape of the input layer.
layer_shape = layer.get_shape()
# The shape of the input layer is assumed to be:
# layer_shape == [num_images, img_height, img_width, num_channels]
# The number of features is: img_height * img_width * num_channels
# We can use a function from TensorFlow to calculate this.
num_features = layer_shape[1:4].num_elements()
# Reshape the layer to [num_images, num_features].
# Note that we just set the size of the second dimension
# to num_features and the size of the first dimension to -1
# which means the size in that dimension is calculated
# so the total size of the tensor is unchanged from the reshaping.
layer_flat = tf.reshape(layer, [-1, num_features])
# The shape of the flattened layer is now:
# [num_images, img_height * img_width * num_channels]
# Return both the flattened layer and the number of features.
return layer_flat, num_features
def jitter_images(images):
# fs for features
jittered_images = []
for i in range(len(images)):
image = images[i]
jittered_image = transform_image(image)
jittered_images.append(jittered_image)
return np.array(jittered_images)
def rbg_to_gray(images):
# fs for features
gray_images = []
for i in range(len(images)):
image = images[i]
# gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray_image = color2gray(image)
gray_images.append(gray_image)
return np.array(gray_images)
def color2gray(image):
gray = 0.2989 * image[:,:,0] + 0.5870 * image[:,:,1] + 0.1140 * image[:,:,2]
return gray
def flatten_images(images):
flattened_images = []
for i in range(0, images.shape[0]):
image = images[i]
f = np.array(image, dtype=np.float32).flatten()
flattened_images.append(f)
return np.array(flattened_images)
# Problem 1 - Implement Min-Max scaling for greyscale image data
def normalize_greyscale(image_data):
"""
Normalize the image data with Min-Max scaling to a range of [0.1, 0.9]
:param image_data: The image data to be normalized
:return: Normalized image data
"""
# ToDo: Implement Min-Max scaling for greyscale image data
a = 0.1
b = 0.9
x_min = np.min(image_data)
x_max = np.max(image_data)
x_prime = [a + (((x - x_min) * (b - a)) / (x_max - x_min)) for x in image_data]
# print(image_data, ' normalized to ---> ', x_prime)
return np.array(x_prime)
# Save the data for easy access
def save_data(train_features, train_labels, valid_features, valid_labels, test_features, test_labels):
pickle_file = 'trafficsigns_trained.pickle'
if not os.path.isfile(pickle_file):
print('Saving data to pickle file...')
try:
with open(pickle_file, 'wb') as pfile:
pickle.dump(
{
'train_dataset': train_features,
'train_labels': train_labels,
'valid_dataset': valid_features,
'valid_labels': valid_labels,
'test_dataset': test_features,
'test_labels': test_labels
},
pfile, pickle.HIGHEST_PROTOCOL)
except Exception as e:
print('Unable to save data to', pickle_file, ':', e)
raise
print('Data cached in pickle file.')
def reload_trained_data():
# Reload the data
pickle_file = 'trafficsigns_trained.pickle'
with open(pickle_file, 'rb') as f:
pickle_data = pickle.load(f)
train_features = pickle_data['train_dataset']
train_labels = pickle_data['train_labels']
valid_features = pickle_data['valid_dataset']
valid_labels = pickle_data['valid_labels']
test_features = pickle_data['test_dataset']
test_labels = pickle_data['test_labels']
del pickle_data # Free up memory
print('Data and modules loaded.')
def transform_image(img, ang_range=20, shear_range=10, trans_range=5):
"""
This function transforms images to generate new images.
The function takes in following arguments,
1- Image
2- ang_range: Range of angles for rotation
3- shear_range: Range of values to apply affine transform to
4- trans_range: Range of values to apply translations over.
A Random uniform distribution is used to generate different parameters for transformation
"""
# Rotation
ang_rot = np.random.uniform(ang_range) - ang_range / 2
rows, cols, ch = img.shape
Rot_M = cv2.getRotationMatrix2D((cols / 2, rows / 2), ang_rot, 1)
# Translation
tr_x = trans_range * np.random.uniform() - trans_range / 2
tr_y = trans_range * np.random.uniform() - trans_range / 2
Trans_M = np.float32([[1, 0, tr_x], [0, 1, tr_y]])
# Shear
pts1 = np.float32([[5, 5], [20, 5], [5, 20]])
pt1 = 5 + shear_range * np.random.uniform() - shear_range / 2
pt2 = 20 + shear_range * np.random.uniform() - shear_range / 2
pts2 = np.float32([[pt1, 5], [pt2, pt1], [5, pt2]])
shear_M = cv2.getAffineTransform(pts1, pts2)
img = cv2.warpAffine(img, Rot_M, (cols, rows))
img = cv2.warpAffine(img, Trans_M, (cols, rows))
img = cv2.warpAffine(img, shear_M, (cols, rows))
return img
def plot_image(image):
# image = mpimg.imread(X_train[0][0])
# image = X_train[0][0]
plt.imshow(image, interpolation='nearest')
plt.axis('off')
plt.show()
def plot_images(images, jitter=False):
gs1 = gridspec.GridSpec(10, 10)
gs1.update(wspace=0.01, hspace=0.02) # set the spacing between axes.
plt.figure(figsize=(12,12))
for i in range(len(images)):
ax1 = plt.subplot(gs1[i])
ax1.set_xticklabels([])
ax1.set_yticklabels([])
ax1.set_aspect('equal')
img = images[i]
if jitter == True:
img = transform_image(img)
plt.subplot(10,10,i+1)
plt.imshow(img, interpolation='nearest')
plt.axis('off')
plt.show()
def compute_dimensions(train_features, test_features):
n_train = len(train_features)
n_test = len(test_features)
image_shape = train_features.shape[1:3]
labels_count = len(np.unique(train_labels))
image_size = image_shape[0]
# Images are stored in one-dimensional arrays of this length.
img_size_flat = image_size * image_size
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of labels =", labels_count)
print("Image size =", image_size)
print("img_size_flat =", img_size_flat)
print("")
print("")
return n_train, n_test, num_channels, image_shape, labels_count, image_size, img_size_flat
def next_batch(batch_size, fake_data=False):
"""Return the next `batch_size` examples from this data set."""
global _index_in_epoch, _num_examples, _epochs_completed, X_train, train_labels
start = _index_in_epoch
_index_in_epoch += batch_size
if _index_in_epoch > _num_examples:
# Finished epoch
_epochs_completed += 1
# Shuffle the data
perm = np.arange(_num_examples)
np.random.shuffle(perm)
X_train = X_train[perm]
train_labels = train_labels[perm]
# Start next epoch
start = 0
_index_in_epoch = batch_size
assert batch_size <= _num_examples
end = _index_in_epoch
return X_train[start:end], train_labels[start:end]
# 1. Load in train and test pickle files
training_file = '../traffic-sign-data/train.p'
testing_file = '../traffic-sign-data/test.p'
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, train_labels, train_size, train_coords = train['features'], train['labels'], train['sizes'], train['coords']
X_test, test_labels, test_size, test_coords = test['features'], test['labels'], test['sizes'], test['coords']
assert len(X_train) == len(train_labels), 'features must be same size as labels'
# Detect if any images' sizes differ from their coords ROI
# for i in range(len(train_coords)):
# if not np.array_equal(train_size[i], train_coords[i]):
# print("size: {} coords: {}".format(train_size[i], train_coords[i]))
# 2. Get randomized datasets for training and validation
print('train_features before split: ', len(X_train))
print('train_labels before split: ', len(train_labels))
print('test_features before split: ', len(X_test))
print('test_labels before split: ', len(test_labels))
print('')
split_test_size = 0.05
X_train, valid_features, train_labels, valid_labels = train_test_split(
X_train,
train_labels,
test_size=0.15,
random_state=832289)
print('Training features and labels randomized and split with train_test_split (test_size: {})'.format(split_test_size))
print('')
print('train_features after split: ', len(X_train))
print('train_labels after split: ', len(train_labels))
print('test_features after split: ', len(X_test))
print('test_labels after split: ', len(test_labels))
# Globals
_epochs_completed = 0
_index_in_epoch = 0
_num_examples = len(X_train)
# [Adapted from Lesson 7 - MiniFlow]
# Turn labels into numbers and apply One-Hot Encoding
print(X_train.shape)
X_train = rbg_to_gray(X_train)
X_test = rbg_to_gray(X_test)
print(X_train.shape)
# Flatten train and test features
# X_train = np.arange(len(X_train) * 1024).reshape((len(X_train), 1024))
# X_test = np.arange(len(X_test) * 1024).reshape((len(X_test), 1024))
# assert len(X_train) == len(train_labels), 'features must be same size as labels'
X_train = flatten_images(X_train)
X_test = flatten_images(X_test)
print(X_train.shape)
X_train = normalize_greyscale(X_train)
X_test = normalize_greyscale(X_test)
print(X_train.shape)
num_channels = 1
# let's compute the dimensions of our data
n_train, n_test, num_channels, image_shape, labels_count, image_size, img_size_flat = compute_dimensions(X_train,
X_test)
# [Adapted from Lesson 7 - MiniFlow]
# Turn labels into numbers and apply One-Hot Encoding
encoder = LabelBinarizer()
encoder.fit(train_labels)
train_labels = encoder.transform(train_labels)
test_labels = encoder.transform(test_labels)
# Change to float32, so it can be multiplied against the features in TensorFlow, which are float32
train_labels = train_labels.astype(np.float32)
test_labels = test_labels.astype(np.float32)
is_labels_encod = True
print('Labels One-Hot Encoded')
features_count = image_size * num_channels # TRAFFIC SIGNS data input (img shape: 32*32)
a_mode = 1
b_mode = 1
if a_mode == 1:
# Parameters
# learning_rate = tf.constant(0.2)
# Passing global_step to minimize() will increment it at each step.
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.25
learning_rate = tf.train.exponential_decay(initial_learning_rate, global_step, 50000, 0.96, staircase=True)
training_epochs = 100
batch_size = 32
display_step = 1
n_hidden_layer = 256 # layer number of features
n2_hidden_layer = 512 # layer number of features
# Store layers weight & bias
weights = [
{
'hidden_layer': tf.Variable(tf.random_normal([features_count, n_hidden_layer])),
'out': tf.Variable(tf.random_normal([n_hidden_layer, labels_count]))
},
{
'hidden_layer': tf.Variable(tf.random_normal([features_count, n2_hidden_layer])),
'out': tf.Variable(tf.random_normal([n2_hidden_layer, labels_count]))
}
]
biases = [
{
'hidden_layer': tf.Variable(tf.random_normal([n_hidden_layer])),
'out': tf.Variable(tf.random_normal([labels_count]))
},
{
'hidden_layer': tf.Variable(tf.random_normal([n2_hidden_layer])),
'out': tf.Variable(tf.random_normal([labels_count]))
}
]
# tf Graph input
x = tf.placeholder("float", [None, image_size])
y = tf.placeholder("float", [None, labels_count])
x_flat = tf.reshape(x, [-1, features_count])
keep_prob = tf.placeholder(tf.float32) # probability to keep units
# Hidden layer with RELU activation
layer_1 = tf.add(tf.matmul(x_flat, weights[0]['hidden_layer']), biases[0]['hidden_layer'])
layer_1 = tf.nn.relu(layer_1)
# layer_1 = tf.nn.dropout(layer_1, keep_prob)
# Output layer with linear activation
logits = tf.matmul(layer_1, weights[0]['out']) + biases[0]['out']
if b_mode == 1:
# Define loss and optimizer
# cost also called cross_entropy
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(cost, global_step=global_step)
init = tf.initialize_all_variables()
# Launch the graph
with tf.Session() as sess:
sess.run(init)
# Training cycle
for epoch in range(training_epochs):
total_batch = int(math.ceil(n_train / batch_size)) # int(n_train / batch_size)
# Loop over all batches
for i in range(total_batch):
batch_x, batch_y = next_batch(batch_size)
# Run optimization op (backprop) and cost op (to get loss value)
sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
# Display logs per epoch step
if epoch % display_step == 0:
c = sess.run(cost, feed_dict={x: batch_x, y: batch_y, keep_prob: 0.5})
print("Epoch:", '%04d' % (epoch + 1), '/', '%04d'%(training_epochs), "cost=", "{:.9f}".format(c))
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print(" learning rate: ", sess.run(learning_rate))
print(" batch size: ", batch_size)
print(" train accuracy: ", accuracy.eval({x: batch_x, y: batch_y, keep_prob: 1.0}))
print(" test accuracy: ", accuracy.eval({x: X_test, y: test_labels, keep_prob: 1.0}))
print('')
print("Optimization Finished!")
# Test model
# train_step = tf.train.AdamOptimizer(1e-4).minimize(cost)
# Calculate accuracy
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))
print("test accuracy:", accuracy.eval({x: X_test, y: test_labels, keep_prob: 1.0}))
elif b_mode == 2:
# Launch the graph
with tf.Session() as sess:
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits, y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
sess.run(tf.initialize_all_variables())
for i in range(20000):
batch = next_batch(batch_size)
if i % 100 == 0:
train_accuracy = accuracy.eval(feed_dict={x: batch[0], y: batch[1], keep_prob: 1.0})
print("step %d, training accuracy %g" % (i, train_accuracy))
train_step.run(feed_dict={x: batch[0], y: batch[1], keep_prob: 0.5})
print("test accuracy %g" % accuracy.eval(feed_dict={x: X_test, y: test_labels, keep_prob: 1.0}))
elif a_mode == 2:
print("DO lesson_7_miniflow lab process")
# ToDo: Set the features and labels tensors
features = tf.placeholder(tf.float32)
labels = tf.placeholder(tf.float32)
# ToDo: Set the weights and biases tensors
weights = tf.Variable(tf.truncated_normal([features_count, labels_count]))
biases = tf.Variable(tf.zeros([labels_count], dtype=tf.float32))
from tensorflow.python.ops.variables import Variable
assert features._op.name.startswith('Placeholder'), 'features must be a placeholder'
assert labels._op.name.startswith('Placeholder'), 'labels must be a placeholder'
assert isinstance(weights, Variable), 'weights must be a TensorFlow variable'
assert isinstance(biases, Variable), 'biases must be a TensorFlow variable'
assert features._shape == None or ( \
features._shape.dims[0].value is None and \
features._shape.dims[1].value in [None, 1024]), 'The shape of features is incorrect'
assert labels._shape in [None, 43], 'The shape of labels is incorrect'
assert weights._variable._shape == (1024, 43), 'The shape of weights is incorrect'
assert biases._variable._shape == (43), 'The shape of biases is incorrect'
assert features._dtype == tf.float32, 'features must be type float32'
assert labels._dtype == tf.float32, 'labels must be type float32'
# Feed dicts for training, validation, and test session
train_feed_dict = {features: X_train, labels: train_labels}
valid_feed_dict = {features: valid_features, labels: valid_labels}
test_feed_dict = {features: X_test, labels: test_labels}
# self.x_flat = tf.reshape(features, [-1, image_size])
# Linear Function WX + b
logits = tf.matmul(features, weights) + biases
# From Sridhar Sampath in forums at https://carnd-udacity.atlassian.net/wiki/questions/12617346/answers/12620228
logits = -np.amax(logits)
prediction = tf.nn.softmax(logits)
# Cross entropy
# cross_entropy = -tf.reduce_sum(labels * tf.log(prediction), reduction_indices=1)
# From Vivek forum tip at:
#
# https://carnd-udacity.atlassian.net/wiki/cq/viewquestion.action?id=12617346&questionTitle=what-could-be-causing-very-low-accuracy
cross_entropy = tf.reduce_mean(
-tf.reduce_sum(labels * tf.log(tf.clip_by_value(prediction, 1e-10, 1.0)), reduction_indices=[1]))
# Training loss
loss = tf.reduce_mean(cross_entropy)
# Create an operation that initializes all variables
init = tf.initialize_all_variables()
# Test Cases
with tf.Session() as session:
session.run(init)
session.run(loss, feed_dict=train_feed_dict)
session.run(loss, feed_dict=valid_feed_dict)
session.run(loss, feed_dict=test_feed_dict)
biases_data = session.run(biases)
assert not np.count_nonzero(biases_data), 'biases must be zeros'
print('Tests Passed!')
# Determine if the predictions are correct
is_correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(labels, 1))
# Calculate the accuracy of the predictions
accuracy = tf.reduce_mean(tf.cast(is_correct_prediction, tf.float32))
print('Accuracy function created.')
# ToDo: Find the best parameters for each configuration
# Validation accuracy at 0.8085333108901978
learning_rate = 0.0001
epochs = 15
batch_size = 25
### DON'T MODIFY ANYTHING BELOW ###
# Gradient Descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# The accuracy measured against the validation set
validation_accuracy = 0.0
# Measurements use for graphing loss and accuracy
log_batch_step = 50
batches = []
loss_batch = []
train_acc_batch = []
valid_acc_batch = []
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(X_train) / batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i + 1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i * batch_size
batch_features = X_train[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer and get loss
_, l = session.run(
[optimizer, loss],
feed_dict={features: batch_features, labels: batch_labels})
# Log every 50 batches
if not batch_i % log_batch_step:
# Calculate Training and Validation accuracy
training_accuracy = session.run(accuracy, feed_dict=train_feed_dict)
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
# Log batches
previous_batch = batches[-1] if batches else 0
batches.append(log_batch_step + previous_batch)
loss_batch.append(l)
train_acc_batch.append(training_accuracy)
valid_acc_batch.append(validation_accuracy)
# Check accuracy against Validation data
validation_accuracy = session.run(accuracy, feed_dict=valid_feed_dict)
print('Epoch {}, validation accuracy {}'.format(epoch_i, validation_accuracy))
loss_plot = plt.subplot(211)
loss_plot.set_title('Loss')
loss_plot.plot(batches, loss_batch, 'g')
loss_plot.set_xlim([batches[0], batches[-1]])
acc_plot = plt.subplot(212)
acc_plot.set_title('Accuracy')
acc_plot.plot(batches, train_acc_batch, 'r', label='Training Accuracy')
acc_plot.plot(batches, valid_acc_batch, 'b', label='Validation Accuracy')
acc_plot.set_ylim([0, 1.0])
acc_plot.set_xlim([batches[0], batches[-1]])
acc_plot.legend(loc=4)
plt.tight_layout()
plt.show()
print('Validation accuracy for [{}, {}, {}] at {}'.format(epochs, batch_size, learning_rate, validation_accuracy))
"""
Test
Set the epochs, batch_size, and learning_rate with the best learning parameters you discovered in problem 3. You're
going to test your model against your hold out dataset/testing data. This will give you a good indicator of how well
the model will do in the real world. You should have a test accuracy of atleast 80%.
"""
# ToDo: Set the epochs, batch_size, and learning_rate with the best parameters from problem 3
epochs = 100
batch_size = 20
learning_rate = 0.5
### DON'T MODIFY ANYTHING BELOW ###
# The accuracy measured against the test set
test_accuracy = 0.0
with tf.Session() as session:
session.run(init)
batch_count = int(math.ceil(len(X_train) / batch_size))
for epoch_i in range(epochs):
# Progress bar
batches_pbar = tqdm(range(batch_count), desc='Epoch {:>2}/{}'.format(epoch_i + 1, epochs), unit='batches')
# The training cycle
for batch_i in batches_pbar:
# Get a batch of training features and labels
batch_start = batch_i * batch_size
batch_features = X_train[batch_start:batch_start + batch_size]
batch_labels = train_labels[batch_start:batch_start + batch_size]
# Run optimizer
_ = session.run(optimizer, feed_dict={features: batch_features, labels: batch_labels})
# Check accuracy against Test data
test_accuracy = session.run(accuracy, feed_dict=test_feed_dict)
assert test_accuracy >= 0.80, 'Test accuracy at {}, should be equal to or greater than 0.80'.format(test_accuracy)
print('Nice Job! Test Accuracy is {}'.format(test_accuracy))
| python |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 7 13:10:58 2018
@author: Joseph Bell Geoscience Australia
"""
'''
reads a csv file and adds DGGS to it
This is for placeNames
Placenames is from 2018
A query was used to gather basic placename data into a csv
Code fields were converted into their full text equivalent while building the query
This code reflects the number of decimal points in the lat and long by adjusting the size of the DGGS box.
In practice the DGGS boxes were often too big. Needs adjusting eg a level DGGS 5 box is too big for anything.
'''
f = r'\\xxxxxxxx\ACT_grid\ACT_Points.csv'
output = list()
import csv
from auspixdggs.auspixengine.dggs import RHEALPixDGGS
import math
from auspixdggs.auspixengine.utils import my_round
# make an instance
rdggs = RHEALPixDGGS()
# function to write a list to a csv file
# requires the list and the filename to save it too
def write_list_to_file(myList, filename):
"""Write the list to csv file."""
with open(filename, "w") as outfile:
for entries in myList:
outfile.write(entries)
# add a return after each line
outfile.write("\n")
def cleanPosition(pos):
pos = pos.replace('(', '')
pos = pos.replace(')', '')
pos = pos.replace(',', ' ')
return pos
failed = 0
myPoints = list()
myCells = list()
myHeader = "ID, Name, DGGSrHealpix, Longi, Lati"
myPoints.append(myHeader)
# open the data file
with open(f) as csvDataFile:
csvReader = csv.reader(csvDataFile)
next(csvReader) # skip the header
for row in csvReader:
# read in the latlong
longi = float(row[2])
lati = float(row[3])
# feed lat long into convertor
# Pick a (longitude-latitude) point on the ellipsoid and find the resolution cell that contains it ::
t = (longi, lati)
# set the resolution
resolution = 10
# calculate the dggs cell from long and lat ie t
thisCell = rdggs.cell_from_point(resolution, t, plane=False) # false = on the curve
# now have a dggs cell for that point
if thisCell not in myCells: # filter out cells already in there - only do the new ones
myCells.append(thisCell)
dggsCell = str(thisCell)
# find the boundary
dggsLoc = list()
for item in dggsCell: # build a dggs location cell as a list like dggsLoc = ['R', 7, 2, 4, 5, 6, 3, 2, 3, 4, 3, 8, 3]
if item.isalpha():
dggsLoc.append(item)
else:
item = int(item)
dggsLoc.append(item)
#print()
# print(dggsLoc)
c = rdggs.cell(dggsLoc)
# print (c)
bound = list() # a list for the cell boundary
#try:
for p in c.boundary(n=2, plane=False):
bound.append(p)
# prepare for shapefile output
#print(str(row[0]))
myPoints.append(
str(row[0]) + ', ' + str(row[1]) + ',' + str(thisCell) + ',' + str(p[0]) + ', ' + str(p[1]))
print (str(row[0]) + ', ' + str(row[1]) + ',' + str(thisCell) + ',' + str(p[0]) + ', ' + str(p[1]))
# NW = cleanPosition(str(bound[0]))
# #print ('NW = ', NW)
# NE = cleanPosition(str(bound[1]))
# SE = cleanPosition(str(bound[2]))
# SW = cleanPosition(str(bound[3]))
# # print(bound)
# # print()
#
# # build output
# pushout = (str(row[0]) + ',' + str(row[1]) + ',' + str(row[2]) + ',' + str(row[3]) +
# ',' + str(resolution) + ',' + str(NW) + ',' + str(NE) + ',' + str(SE) + ',' + str(SW))
# #print (pushout)
# output.append(pushout)
#except:
# print('failed', str(row[0]), p, c)
# failed += 1
# pass
# overwrites previous file unless you rename or move it
# write_list_to_file(output, r"\\xxxxxx\ACT_grid\ACT_Grid_bounds.csv")
#
write_list_to_file(myPoints, r"\\xxxxxxx\temp\PN_boundings.csv")
print('number failed = ', failed)
#
# for row in output:
#
# print(row)
#
print("finished")
| python |
# -*- coding: utf-8 -*-
# Copyright 2018 IBM.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
This module contains the definition of a base class for
feature map. Several types of commonly used approaches.
"""
from collections import OrderedDict
import copy
import itertools
import logging
import numpy as np
from qiskit import QuantumCircuit, QuantumRegister
from qiskit.quantum_info import Pauli
from qiskit.qasm import pi
from sympy.core.numbers import NaN, Float
from qiskit.aqua import Operator
from qiskit.aqua.components.feature_maps import FeatureMap, self_product
logger = logging.getLogger(__name__)
class PauliExpansion(FeatureMap):
"""
Mapping data with the second order expansion followed by entangling gates.
Refer to https://arxiv.org/pdf/1804.11326.pdf for details.
"""
CONFIGURATION = {
'name': 'PauliExpansion',
'description': 'Pauli expansion for feature map (any order)',
'input_schema': {
'$schema': 'http://json-schema.org/schema#',
'id': 'Pauli_Expansion_schema',
'type': 'object',
'properties': {
'depth': {
'type': 'integer',
'default': 2,
'minimum': 1
},
'entangler_map': {
'type': ['array', 'null'],
'default': None
},
'entanglement': {
'type': 'string',
'default': 'full',
'oneOf': [
{'enum': ['full', 'linear']}
]
},
'paulis': {
'type': ['array'],
"items": {
"type": "string"
},
'default': ['Z', 'ZZ']
}
},
'additionalProperties': False
}
}
def __init__(self, num_qubits, depth=2, entangler_map=None,
entanglement='full', paulis=['Z', 'ZZ'], data_map_func=self_product):
"""Constructor.
Args:
num_qubits (int): number of qubits
depth (int): the number of repeated circuits
entangler_map (list[list]): describe the connectivity of qubits, each list describes
[source, target], or None for full entanglement.
Note that the order is the list is the order of
applying the two-qubit gate.
entanglement (str): ['full', 'linear'], generate the qubit connectivitiy by predefined
topology
paulis (str): a comma-seperated string for to-be-used paulis
data_map_func (Callable): a mapping function for data x
"""
self.validate(locals())
super().__init__()
self._num_qubits = num_qubits
self._depth = depth
if entangler_map is None:
self._entangler_map = self.get_entangler_map(entanglement, num_qubits)
else:
self._entangler_map = self.validate_entangler_map(entangler_map, num_qubits)
self._pauli_strings = self._build_subset_paulis_string(paulis)
self._data_map_func = data_map_func
self._magic_num = np.nan
self._param_pos = OrderedDict()
self._circuit_template = self._build_circuit_template()
def _build_subset_paulis_string(self, paulis):
# fill out the paulis to the number of qubits
temp_paulis = []
for pauli in paulis:
len_pauli = len(pauli)
for possible_pauli_idx in itertools.combinations(range(self._num_qubits), len_pauli):
string_temp = ['I'] * self._num_qubits
for idx in range(len(possible_pauli_idx)):
string_temp[-possible_pauli_idx[idx] - 1] = pauli[-idx - 1]
temp_paulis.append(''.join(string_temp))
# clean up string that can not be entangled.
final_paulis = []
for pauli in temp_paulis:
where_z = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
if len(where_z) == 1:
final_paulis.append(pauli)
else:
is_valid = True
for src, targ in itertools.combinations(where_z, 2):
if [src, targ] not in self._entangler_map:
is_valid = False
break
if is_valid:
final_paulis.append(pauli)
else:
logger.warning("Due to the limited entangler_map,"
" {} is skipped.".format(pauli))
logger.info("Pauli terms include: {}".format(final_paulis))
return final_paulis
def _build_circuit_template(self):
x = np.asarray([self._magic_num] * self._num_qubits)
qr = QuantumRegister(self._num_qubits, name='q')
qc = self.construct_circuit(x, qr)
for index in range(len(qc.data)):
gate_param = qc.data[index][0].params
param_sub_pos = []
for x in range(len(gate_param)):
if isinstance(gate_param[x], NaN):
param_sub_pos.append(x)
if param_sub_pos != []:
self._param_pos[index] = param_sub_pos
return qc
def _extract_data_for_rotation(self, pauli, x):
where_non_i = np.where(np.asarray(list(pauli[::-1])) != 'I')[0]
return x[where_non_i]
def _construct_circuit_with_template(self, x):
coeffs = [self._data_map_func(self._extract_data_for_rotation(pauli, x))
for pauli in self._pauli_strings] * self._depth
qc = copy.deepcopy(self._circuit_template)
data_idx = 0
for key, value in self._param_pos.items():
new_param = coeffs[data_idx]
for pos in value:
qc.data[key].params[pos] = Float(2. * new_param) # rotation angle is 2x
data_idx += 1
return qc
def construct_circuit(self, x, qr=None, inverse=False):
"""
Construct the second order expansion based on given data.
Args:
x (numpy.ndarray): 1-D to-be-transformed data.
qr (QauntumRegister): the QuantumRegister object for the circuit, if None,
generate new registers with name q.
inverse (bool): whether or not inverse the circuit
Returns:
QuantumCircuit: a quantum circuit transform data x.
"""
if not isinstance(x, np.ndarray):
raise TypeError("x must be numpy array.")
if x.ndim != 1:
raise ValueError("x must be 1-D array.")
if x.shape[0] != self._num_qubits:
raise ValueError("number of qubits and data dimension must be the same.")
if qr is None:
qc = self._construct_circuit_with_template(x)
else:
qc = QuantumCircuit(qr)
for _ in range(self._depth):
for i in range(self._num_qubits):
qc.u2(0, pi, qr[i])
for pauli in self._pauli_strings:
coeff = self._data_map_func(self._extract_data_for_rotation(pauli, x))
p = Pauli.from_label(pauli)
qc += Operator.construct_evolution_circuit([[coeff, p]], 1, 1, qr)
if inverse:
qc = qc.inverse()
return qc
| python |
from scapy.all import *
from flpr import FLPR, FLPR_PORT
from ifaces import ifaces
from util import ban_ip
# verify that the last IP in history is the receiver's IP
def ips_flpr_2(pkt):
ip = pkt[IP]
flpr = pkt[FLPR]
print("flipper message received, ID = %s, CTR = %s, LIM = %s" % (flpr.id, flpr.ctr, flpr.lim))
if not flpr.hist:
print("ATTACK DETECTED: history is empty")
ban_ip(ip.src)
elif flpr.ctr == flpr.lim:
print("scores communication")
print("message forwarded")
elif ip.dst != flpr.hist[-1]:
print("ATTACK DETECTED: last IP in history and receiver's IP not matching")
ban_ip(ip.src)
else:
print("regular message")
print("message forwarded")
print()
if __name__ == "__main__":
bind_layers(TCP, FLPR, sport=FLPR_PORT)
bind_layers(TCP, FLPR, dport=FLPR_PORT)
print("listening for FLPR on TCP port %s" % FLPR_PORT)
# intercept only incoming FLPR messages
sniff(prn=ips_flpr_2, iface=ifaces, lfilter=lambda pkt: FLPR in pkt and pkt[Ether].src != Ether().src)
| python |
#!/usr/bin/env python3
from boldui import Oplist, Expr, var, stringify_op, ProtocolServer
from boldui.framework import Clear, Column, Padding, Center, SizedBox, Rectangle, Text, Flexible
def main():
root = Clear(
color=0xff202030,
child=Column([
Padding(
Text(
'Hello, World!',
font_size=18,
color=0xffa0a0a0,
),
all=10,
),
Padding(
Center(
SizedBox(
Rectangle(0xffa0a0a0),
width=abs((var('time') % 1) - 0.5) * 50 + 100,
height=abs(((var('time') + 0.5) % 1) - 0.5) * 50 + 100,
),
),
all=10,
),
Padding(
Rectangle(
color=Expr.if_(var('height') > 600, 0xffa0a0a0, 0xff9090d0)
),
all=10
),
Flexible(
Padding(
Rectangle(
color=Expr.if_(var('width') > 800, 0xffa0a0a0, 0xffd09090)
),
all=10
),
flex_x=3,
),
]),
)
built_root = root.build()
oplist = Oplist()
size = built_root.layout(Expr(0), Expr(0), var('width'), var('height'))
scene = built_root.render(oplist, Expr(0), Expr(0), size[0], size[1])
for op in scene:
print(stringify_op(op))
server = ProtocolServer("/tmp/boldui.hello_world.sock")
server.scene = {'oplist': oplist.to_list(), 'scene': scene, 'vars': {}}
server.serve()
if __name__ == '__main__':
main()
| python |
from Services import ApiService
from Services import DBService
import logging
logger = logging.getLogger('Roby')
#This class can be viewd as an AbstractFactory
# for the adapters, because the data source
# can be different
# Registry / Service container of the Classes that
# can adapt to different data sources or services (could be database, API, FTP, Bucket S3 etc etc)
# Using the Python decorators
SERVICE_ADAPTERS = dict()
def register():
"""Register a Service Class that can adapt to different data sources"""
SERVICE_ADAPTERS["API"] = ApiService.ApiService
SERVICE_ADAPTERS["DB"] = DBService.DbService
class ServiceAdapter:
def __init__(self):
register()
def get_service(self, service_type):
return SERVICE_ADAPTERS[service_type]
| python |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
import sys, signal
import tensorflow as tf
import utils as ut
from agent import Agent
from envs import create_env
logger = ut.logging.get_logger()
def train(args, server, cluster, env, queue_shapes,
trajectory_queue_size, replay_queue_size):
agent = Agent(args, server, cluster, env, queue_shapes,
trajectory_queue_size, replay_queue_size)
# Variable names that start with "local" are not saved in checkpoints.
variables_to_save = [
v for v in tf.global_variables() if not v.name.startswith("local")]
init_op = tf.variables_initializer(variables_to_save)
init_all_op = tf.global_variables_initializer()
saver = ut.tf.FastSaver(variables_to_save)
var_list = tf.get_collection(
tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
logger.info('Trainable vars:')
for v in var_list:
logger.info(' %s %s', v.name, v.get_shape())
def init_fn(ses):
logger.info("Initializing all parameters.")
ses.run(init_all_op)
devices = ["/job:ps"]
if args.task == 0:
devices += ["/job:worker/task:{}/gpu:0".format(args.task),
"/job:worker/task:{}/cpu:0".format(args.task)]
elif args.task == 1:
devices += ["/job:worker/task:{}/gpu:{}".format(args.task, 1 if args.num_gpu > 1 else 0),
"/job:worker/task:{}/cpu:0".format(args.task)]
else:
devices += ["/job:worker/task:{}/cpu:0".format(args.task)]
config = tf.ConfigProto(device_filters=devices, allow_soft_placement=True)
logger.info("Events directory: %s_%s", args.load_path, args.task)
summary_writer = tf.summary.FileWriter(
"{}_{}".format(args.load_path, args.task))
agent.summary_writer = summary_writer
uninitialized_variables = tf.report_uninitialized_variables(variables_to_save)
if args.task == 1 and args.loss == 'gan':
local_init_op = tf.variables_initializer(agent.local_disc.var_list)
else:
local_init_op = None
sv = tf.train.Supervisor(
is_chief=args.task == 0,
logdir=str(args.load_path),
saver=saver,
summary_op=None,
init_op=init_op,
init_fn=init_fn,
local_init_op=local_init_op,
summary_writer=summary_writer,
# very useful when sv.managed_session hang
#ready_op=tf.constant([], dtype=tf.string),
ready_op=uninitialized_variables,
global_step=agent.policy_step,
save_model_secs=30,
save_summaries_secs=30)
num_policy_steps = 100000000
logger.info(
"Starting session. If this hangs, we're mostly likely waiting"
" to connect to the parameter server. One common cause is that"
" the parameter server DNS name isn't resolving yet, or is misspecified.")
with sv.managed_session(server.target, config=config) as sess, \
sess.as_default():
###############################
# Run thread
###############################
if args.task == 1 and args.loss == 'gan':
# master_disc ->local_disc
sess.run(agent.disc_initializer)
agent.start_replay_thread(sess, summary_writer)
elif args.task >= 1:
sess.run(agent.policy_sync)
agent.start_worker_thread(sess, summary_writer)
policy_step = sess.run(agent.policy_step)
logger.info("Starting training at step=%d", policy_step)
while not sv.should_stop() and ( \
not num_policy_steps or policy_step < num_policy_steps):
if args.task == 0:
agent.train_policy(sess)
elif args.task == 1 and args.loss == 'gan':
# local_disc -> master_disc
sess.run(agent.disc_sync)
agent.train_gan(sess)
else:
sess.run(agent.policy_sync)
policy_step = sess.run(agent.policy_step)
# Ask for all the services to stop.
sv.stop()
logger.info('reached %s steps. worker stopped.', policy_step)
| python |
def sortedSquaredArray(array):
# Write your code here.
sortedSquared=[0 for _ in array]
smallIndex =0
largeIndex=len(array)-1
for idx in reversed(range(len(array))):
if abs(array[smallIndex])>abs(array[largeIndex]):
sortedSquared[idx] = array[smallIndex]*array[smallIndex]
smallIndex+=1
else:
sortedSquared[idx]=array[largeIndex]*array[largeIndex]
largeIndex-=1
return sortedSquared
print(sortedSquaredArray([-5,-4,-3,-2,0,2,4,5]))
| python |
import cv2
import os
import scipy as scp
import scipy.misc
import numpy as np
def triangStats(img, noHoles = False, minPercArea = 0.1):
imggray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, imbw = cv2.threshold(imggray, 10, 255, 0)
_, contours, _ = cv2.findContours(imbw, 1, 2)
maxArea = 0;
Ax = Ay = Bx = By = Cx = Cy = 0
areaCnt = 0
maxCnt = None
idx = -1
for cnt in contours:
idx += 1
retval, triangle = cv2.minEnclosingTriangle(cnt)
if (triangle is None):
continue
areaCnt = cv2.contourArea(cnt)
if (areaCnt <= maxArea):
continue
maxArea = areaCnt
maxCnt = idx
Ax = triangle[0][0][0]
Ay = triangle[0][0][1]
Bx = triangle[1][0][0]
By = triangle[1][0][1]
Cx = triangle[2][0][0]
Cy = triangle[2][0][1]
if (maxArea <= minPercArea * imggray.shape[0] * imggray.shape[1]):
return False, None, None, None, None
v1x = 0
v1y = 0
v2x = 0
v2y = 0
v3x = 0
v3y = 0
imgCnt = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
mask = np.zeros((img.shape[0], img.shape[1], 3), np.uint8)
cv2.drawContours(mask, contours, maxCnt, color=(255, 255, 255), thickness=cv2.FILLED)
color = [0, 0, 0]
contActivePixels = 0
valret = True
for i in range(mask.shape[0]):
for j in range(mask.shape[1]):
if (mask[i, j, 0] == 255 and mask[i, j, 1] == 255 and mask[i, j, 2] == 255):
if(img[i, j, 0] != 0 or img[i, j, 1] != 0 or img[i, j, 2] != 0):
contActivePixels+=1
if (color[0] == 0 and color[1] == 0 and color[2] == 0):
color[0] = int(img[i][j][0])
color[1] = int(img[i][j][1])
color[2] = int(img[i][j][2])
else:
if (img[i][j][0] != color[0] or img[i][j][1] != color[1] or img[i][j][2] != color[2]):
if (noHoles or (img[i][j][0] != 0 or img[i][j][1] != 0 or img[i][j][2] != 0)):
valret = False
if(valret == False):
return False, None, None, None, None
cv2.drawContours(imgCnt, contours, maxCnt, color=color, thickness=cv2.FILLED)
if (Cy < By and Cy < Ay):
v1y = Cy
v1x = Cx
if (Ax < Bx):
v2x = Ax
v2y = Ay
v3x = Bx
v3y = By
else:
v2x = Bx
v2y = By
v3x = Ax
v3y = Ay
elif (By < Cy and By < Ay):
v1y = By
v1x = Bx
if (Ax < Cx):
v2x = Ax
v2y = Ay
v3x = Cx
v3y = Cy
else:
v2x = Cx
v2y = Cy
v3x = Ax
v3y = Ay
else:
v1y = Ay
v1x = Ax
if (Bx < Cx):
v2x = Bx
v2y = By
v3x = Cx
v3y = Cy
else:
v2x = Cx
v2y = Cy
v3x = Bx
v3y = By
# (x,y),radius = cv2.minEnclosingCircle(cnt)
triangleArea = abs((v2x * (v1y - v3y) + v1x * (v3y - v2y) + v3x * (v2y - v1y)) / 2)
# print(f"({v1x},{v1y}) ({v2x},{v2y}) ({v3x},{v3y}) {maxArea} {triangleArea}")
# a=input('pare')
# center = (int(x),int(y))
# radius = int(radius)
# cv2.circle(img,center,radius,(255,255,0),2)
#desc = [maxArea / triangleArea, 0 if v3y - v1y == 0 else (v2y - v1y) / (v3y - v1y),
#1 if v1x - v2x > 0 and v3x - v1x > 0 else 0, np.rad2deg(np.arctan( abs(v3y-v2y) / (v3x - v2x)))]
if triangleArea == 0 or (v3x - v2x) == 0:
return False, None, None, None, None
desc = [contActivePixels/triangleArea, np.rad2deg(np.arctan(abs(v3y - v2y) / (v3x - v2x))), 1 if v1x - v2x > 0 and v3x - v1x > 0 else 0 ]
return True, np.array([desc]),contActivePixels/(imggray.shape[0] * imggray.shape[1]), imgCnt, color
| python |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The errors in the conformance process."""
class BlockConformanceFailure(Exception):
def __init__(
self, start_line_number, validated_lines, block_number, file_number,
node, rows):
super(BlockConformanceFailure, self).__init__()
self.start_line_number = start_line_number
self.validated_lines = validated_lines
self.block_number = block_number
self.file_number = file_number
self.node = node
self.rows = rows
def __str__(self):
return (
'Block %d starting on row %d in file number %d is non-conformant.\n\n'
'First invalid row: %d (row %d in input file).\n\n'
'Expected structure:\n%s\n'
'Actual structure:\n%s\n' % (
self.block_number, self.start_line_number, self.file_number,
self.validated_lines + 1,
self.start_line_number + self.validated_lines + 1,
self.node, [str(row.type) for row in self.rows]))
class CardinalityFailure(Exception):
def __init__(self, block_number, file_number, error):
super(CardinalityFailure, self).__init__()
self.block_number = block_number
self.file_number = file_number
self.error = error
def __str__(self):
return ('Block number %s in the file number %s is not conformant (error= %s'
'.).' % (self.block_number, self.file_number, self.error))
| python |
# Generated by Django 2.1.7 on 2019-09-20 15:49
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Fav',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='Thing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200, validators=[django.core.validators.MinLengthValidator(2, 'Title must be greater than 2 characters')])),
('text', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('favorites', models.ManyToManyField(related_name='favorite_things', through='favs.Fav', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='fav_thing_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='fav',
name='thing',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='favs.Thing'),
),
migrations.AddField(
model_name='fav',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='favs_users', to=settings.AUTH_USER_MODEL),
),
migrations.AlterUniqueTogether(
name='fav',
unique_together={('thing', 'user')},
),
]
| python |
import os
import sys
from collections import defaultdict, OrderedDict
from geodata.encoding import safe_decode, safe_encode
from geodata.i18n.unicode_paths import DATA_DIR
from geodata.text.normalize import normalized_tokens, normalize_string
from geodata.text.tokenize import tokenize, token_types
from geodata.text.phrases import PhraseFilter
from marisa_trie import BytesTrie
DICTIONARIES_DIR = os.path.join(DATA_DIR, 'dictionaries')
PREFIX_KEY = u'\x02'
SUFFIX_KEY = u'\x03'
POSSIBLE_ROMAN_NUMERALS = set(['i', 'ii', 'iii', 'iv', 'v', 'vi', 'vii', 'viii', 'ix',
'x', 'xi', 'xii', 'xiii', 'xiv', 'xv', 'xvi', 'xvii', 'xviii', 'xix',
'xx', 'xxx', 'xl', 'l', 'lx', 'lxx', 'lxxx', 'xc',
'c', 'cc', 'ccc', 'cd', 'd', 'dc', 'dcc', 'dccc', 'cm',
'm', 'mm', 'mmm', 'mmmm'])
PHRASE = 'PHRASE'
class DictionaryPhraseFilter(PhraseFilter):
def __init__(self, *dictionaries):
self.dictionaries = dictionaries
self.canonicals = {}
def serialize(self, s):
return s
def deserialize(self, s):
return s
def configure(self, base_dir=DICTIONARIES_DIR):
kvs = defaultdict(OrderedDict)
for lang in os.listdir(DICTIONARIES_DIR):
for filename in self.dictionaries:
is_suffix_dictionary = 'suffixes' in filename
is_prefix_dictionary = 'prefixes' in filename
dictionary_name = filename.split('.', 1)[0]
path = os.path.join(DICTIONARIES_DIR, lang, filename)
if not os.path.exists(path):
continue
for line in open(path):
line = line.strip()
if not line:
continue
phrases = safe_decode(line).split(u'|')
if not phrases:
continue
canonical = phrases[0]
canonical_normalized = normalize_string(canonical)
self.canonicals[(canonical, lang, dictionary_name)] = phrases[1:]
for i, phrase in enumerate(phrases):
if phrase in POSSIBLE_ROMAN_NUMERALS:
continue
is_canonical = normalize_string(phrase) == canonical_normalized
if is_suffix_dictionary:
phrase = SUFFIX_KEY + phrase[::-1]
elif is_prefix_dictionary:
phrase = PREFIX_KEY + phrase
kvs[phrase][(lang, dictionary_name, canonical)] = is_canonical
kvs = [(k, '|'.join([l, d, str(int(i)), safe_encode(c)])) for k, vals in kvs.iteritems() for (l, d, c), i in vals.iteritems()]
self.trie = BytesTrie(kvs)
self.configured = True
def search_substring(self, s):
if len(s) == 0:
return None, 0
for i in xrange(len(s) + 1):
if not self.trie.has_keys_with_prefix(s[:i]):
i -= 1
break
if i > 0:
return (self.trie.get(s[:i]), i)
else:
return None, 0
def search_suffix(self, token):
suffix_search, suffix_len = self.search_substring(SUFFIX_KEY + token[::-1])
if suffix_len > 0:
suffix_len -= len(SUFFIX_KEY)
return suffix_search, suffix_len
def search_prefix(self, token):
prefix_search, prefix_len = self.search_substring(PREFIX_KEY + token)
if prefix_len > 0:
prefix_len -= len(PREFIX_KEY)
return prefix_search, prefix_len
def basic_filter(self, tokens):
return super(DictionaryPhraseFilter, self).filter(tokens)
def filter(self, tokens):
for p, t, data in self.basic_filter(tokens):
if not p:
t, c = t
token = t
token_len = len(token)
suffix_search, suffix_len = self.search_suffix(token)
if suffix_search and self.trie.get(token[(token_len - suffix_len):].rstrip('.')):
yield ([(t, c)], PHRASE, suffix_len, map(safe_decode, suffix_search))
continue
prefix_search, prefix_len = self.search_prefix(token)
if prefix_search and self.trie.get(token[:prefix_len]):
yield ([(t, c)], PHRASE, prefix_len, map(safe_decode, prefix_search))
continue
else:
c = PHRASE
yield t, c, len(t), map(safe_decode, data)
STREET_TYPES_DICTIONARIES = ('street_types.txt',
'directionals.txt',
'concatenated_suffixes_separable.txt',
'concatenated_suffixes_inseparable.txt',
'concatenated_prefixes_separable.txt',
'organizations.txt',
'people.txt',
'personal_suffixes.txt',
'personal_titles.txt',
'qualifiers.txt',
'stopwords.txt',)
GIVEN_NAME_DICTIONARY = 'given_names.txt'
SURNAME_DICTIONARY = 'surnames.txt'
NAME_DICTIONARIES = (GIVEN_NAME_DICTIONARY,
SURNAME_DICTIONARY,)
NAME_ABBREVIATION_DICTIONARIES = STREET_TYPES_DICTIONARIES + ('academic_degrees.txt',
'building_types.txt',
'company_types.txt',
'place_names.txt',
'qualifiers.txt',
'synonyms.txt',
'toponyms.txt',
)
UNIT_ABBREVIATION_DICTIONARIES = ('level_types.txt',
'post_office.txt',
'unit_types.txt',
)
ALL_ABBREVIATION_DICTIONARIES = STREET_TYPES_DICTIONARIES + \
NAME_ABBREVIATION_DICTIONARIES + \
UNIT_ABBREVIATION_DICTIONARIES + \
('no_number.txt', 'nulls.txt',)
_gazetteers = []
def create_gazetteer(*dictionaries):
g = DictionaryPhraseFilter(*dictionaries)
_gazetteers.append(g)
return g
street_types_gazetteer = create_gazetteer(*STREET_TYPES_DICTIONARIES)
names_gazetteer = create_gazetteer(*NAME_ABBREVIATION_DICTIONARIES)
unit_types_gazetteer = create_gazetteer(*UNIT_ABBREVIATION_DICTIONARIES)
street_and_unit_types_gazetteer = create_gazetteer(*(STREET_TYPES_DICTIONARIES + UNIT_ABBREVIATION_DICTIONARIES))
abbreviations_gazetteer = create_gazetteer(*ALL_ABBREVIATION_DICTIONARIES)
given_name_gazetteer = create_gazetteer(GIVEN_NAME_DICTIONARY)
def init_gazetteers():
for g in _gazetteers:
g.configure()
| python |
#!/usr/bin/python3
"""cleaning gentx data
"""
import json
import logging
# ## Preface: python data tools
import pandas as pd
log = logging.getLogger(__name__)
BORING = ['Verified', 'Task Type', 'Event', 'Submission Link']
def main(argv, stdout, cwd):
log.info('versions: %s', dict(pandas=pd.__version__))
[portal_export, dest] = argv[1:3]
tasks = load(cwd / portal_export)
tasks = extract(tasks)
save(tasks, cwd / dest, stdout)
def load(path):
# ## Sumitted Tasks
#
# exported from the portal
task_export = pd.read_csv(path,
parse_dates=['Last Date Updated'])
task_export = task_export[
task_export.Task == 'Create and submit gentx - Metering ']
# log.info('portal update: %s', task_export.dtypes)
# one completed task per participant
tasks = mark_dups(task_export.set_index('TaskBoardID'))
log.info('tasks:\n%s',
tasks.drop(['Status', 'Verified', 'Task Type', 'Event',
'Submission Link'], axis=1).sort_values(
'Last Date Updated').tail())
# ## Clean up markup
#
# The portal exports with newline as `<br>`.
# log.info(
# tasks[tasks['Submission Link'].str.contains('<br />')
# .fillna(False)][['Submission Link']].head(8))
return tasks
# +
def mark_dups(df,
key='Discord ID'):
"""one per participant"""
df = df.sort_values([key, 'Last Date Updated'])
dupd = df.duplicated([key], keep='last')
df.loc[dupd, 'Status'] = 'Obsolete'
dups = df[df.Status == 'Obsolete'].reset_index().drop(
BORING, axis=1)
log.warning('dropping dups by %s:\n%s', key,
dups[['TaskBoardID', 'Discord ID', 'Moniker']])
log.info('tasks: %s',
(dict(submissions_all=len(df), deduped=len(df) - len(dups),
dups=len(dups))))
return df
# +
def nobr(data):
return data.str.replace('<br />', '')
def tryjson(txt):
try:
return json.loads(txt)
except Exception as ex:
return ex
def extract(tasks):
tasks['gentx'] = nobr(tasks['Submission Link']).apply(
lambda txt: tryjson(txt))
tasks['jsonErr'] = tasks.gentx.apply(lambda v: isinstance(v, Exception))
# print(json.dumps(tasks.gentx.iloc[0], indent=2))
tasks['moniker'] = tasks.gentx.apply(
lambda v: None if isinstance(v, Exception)
else v['body']['messages'][0]['description']['moniker'])
tasks['delegator_address'] = tasks.gentx.apply(
lambda v: None if isinstance(v, Exception)
else v['body']['messages'][0]['delegator_address'])
log.warning('JSON errors:\n%s',
tasks[['Discord ID', 'Moniker', 'jsonErr']][tasks.jsonErr])
dup_moniker = tasks[tasks.Status == 'Completed'].sort_values('Moniker')
dup_moniker = dup_moniker[dup_moniker.duplicated('Moniker')]
dup_moniker = dup_moniker.drop(columns=BORING)
if len(dup_moniker):
log.warning('Duplicate Monikers?\n%s', dup_moniker)
# ## No gentx with >50 BLD
tasks['amount'] = tasks[~tasks.jsonErr].gentx.apply(
lambda g: g['body']['messages'][0]['value']['amount']
).astype('float') / 1000000.0
over50 = tasks[tasks.amount > 50]
if len(over50):
log.warning('No gentx with >50 BLD\n%s', over50)
return tasks
def save(tasks, dest, stdout):
# alljson = json.dumps([tx for tx in tasks.gentx.values], indent=2)
# (_home() / 'Desktop' / 'genesis.json').open('w').write(alljson)
dest.mkdir(parents=True, exist_ok=True)
# ## separate files
ok = tasks[(tasks.Status == 'Completed') &
~tasks.jsonErr]
for ix, info in ok[['gentx']].reset_index().iterrows():
path = dest / f'gentx{ix}.json'
json.dump(info.gentx, path.open('w'))
tasks = tasks.sort_values(['Discord ID', 'Last Date Updated'])
tasks[['Discord ID', 'Moniker', 'Status', 'jsonErr',
'Last Date Updated', 'moniker', 'delegator_address']].reset_index().to_csv(stdout)
def _more_checks():
# ## duplicate pubkeys
filestuff = [json.load(p.open()) for p in (_home() / 'Desktop' / 'gentx3').iterdir()]
len(gentxs)
df = pd.DataFrame(pd.Series(filestuff), columns=['gentx'])
df['pubkey'] = df.gentx.apply(lambda g: g['body']['messages'][0]['pubkey']['key'])
df['moniker'] = df.gentx.apply(lambda g: g['body']['messages'][0]['description']['moniker'])
df = df.set_index('pubkey')
df.head()
df.loc[df.index.duplicated()]
df['rate'] = df.gentx.apply(lambda g: g['body']['messages'][0]['commission']['rate'])
df['max_rate'] = df.gentx.apply(lambda g: g['body']['messages'][0]['commission']['max_rate'])
df[['moniker', 'rate', 'max_rate']]
df[['moniker', 'rate', 'max_rate']][df.max_rate <= df.rate]
if __name__ == '__main__':
def _script():
from sys import argv, stdout, stderr
from pathlib import Path
logging.basicConfig(
level=logging.INFO, stream=stderr,
format='%(asctime)s %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
main(argv[:], stdout, cwd=Path('.'))
_script()
| python |
from . import startup
from . import models
from . import services
from fastapi.responses import JSONResponse
import json
import random
app = startup.app
redisDb = startup.redisDb
card_collection = models.CardCollection(startup.source)
@app.get("/decks")
def get_decks():
"""
Returns all deck types
"""
decks = models.DeckType.get_me()
return JSONResponse(content=decks)
@app.get("/all-cards/from-deck/{from_deck}")
def get_all_cards_from_deck(from_deck: models.DeckType):
"""
Return all cards in a given deck
"""
cards = [
item for item in card_collection.values() if item["from_deck"] == from_deck
]
response = json.loads(models.CardResponse(cards=cards).json())
return JSONResponse(content=response)
@app.get("/all-cards")
def get_all_cards():
"""
Return all cards
"""
cards = [item for item in card_collection.values()]
response = json.loads(models.CardResponse(cards=cards).json())
return JSONResponse(content=response)
@app.get("/draw", response_model=models.Card)
def draw():
"""
Randomly Draw a Card
"""
card = random.choice(list(card_collection.values()))
return card
@app.get("/draw/from-deck/{deck_type}", response_model=models.Card)
def draw(deck_type: models.DeckType):
"""
Randomly Draw a Card from a specified deck
"""
deck = list(
filter(lambda x: x["from_deck"] == deck_type, list(card_collection.values()))
)
card = random.choice(deck)
return card
@app.get("/get-current-session")
def get_current_session():
current_session = redisDb.get("Current").decode("utf-8")
return JSONResponse(
content={
"data": current_session,
}
)
@app.post("/start")
def start(request: models.StartRequest):
if redisDb.get("Current"):
return JSONResponse(
content={
"success": False,
"data": "There's currently an existing session.",
}
)
sid = "SESH-" + services.generate_id(4)
redisDb.set("Current", sid)
session = {"PL-" + services.generate_id(3): i for i in request.players}
redisDb.hmset(sid, session)
return JSONResponse(
content={
"success": True,
"sid": sid,
"pids": session,
}
)
@app.post("/end")
def end(request: models.EndRequest):
currentSession = redisDb.get("Current")
if not currentSession:
return JSONResponse(
content={
"success": False,
"data": "There are no sessions.",
}
)
redisDb.delete("Current")
redisDb.delete(request.sid)
return JSONResponse(
content={
"success": True,
"data": "Succesfully ended existing session.",
}
)
| python |
import sys
input = lambda: sys.stdin.readline().rstrip()
n, m = map(int, input().split())
s = {input() for _ in range(n)}
l = []
for _ in range(m):
t = input()
if t in s:
s.remove(t)
l.append(t)
l.sort()
print(len(l), *l, sep="\n") | python |
"""
Backward compatible behaviour with a primary key 'Id' and upper-case field names
"""
from django.db import models
class User(models.Model):
username = models.CharField(max_length=80)
last_name = models.CharField(max_length=80)
first_name = models.CharField(max_length=40, null=True, blank=True)
email = models.EmailField()
is_active = models.BooleanField(default=False)
class Lead(models.Model):
company = models.CharField(max_length=255)
last_name = models.CharField(max_length=80)
owner = models.ForeignKey(User, on_delete=models.DO_NOTHING)
| python |
from flask import render_template, jsonify, request, session, redirect
from dataclasses import Test
import forms
from werkzeug.exceptions import HTTPException
def home():
return render_template("home.html")
def error(e):
return render_template("error.html", error_num=e.code if isinstance(e, HTTPException) else 500, error_txt=str(e).split(": ", 1)[1])
| python |
# Create your models here.
from django.db import models
from Graphic_reporter.models import Image
class Published_Article(models.Model):
slug = models.CharField(max_length=140)
title = models.CharField(max_length=140)
description = models.CharField(max_length=140)
body = models.TextField()
publishedtime = models.DateTimeField(auto_now_add=True)
images = models.ManyToManyField('Graphic_reporter.Image')
def __str__(self):
return 'Publised Article: ' + self.slug | python |
# -*- coding: utf-8 -*-
"""Setup file for easy installation"""
from os.path import join, dirname
from setuptools import setup
version = __import__('social_auth').__version__
LONG_DESCRIPTION = """
Django Social Auth is an easy to setup social authentication/registration
mechanism for Django projects.
Crafted using base code from django-twitter-oauth_ and django-openid-auth_,
implements a common interface to define new authentication providers from
third parties.
"""
def long_description():
"""Return long description from README.rst if it's present
because it doesn't get installed."""
try:
return open(join(dirname(__file__), 'README.rst')).read()
except IOError:
return LONG_DESCRIPTION
setup(name='django-social-auth',
version=version,
author='Matías Aguirre',
author_email='[email protected]',
description='Django social authentication made simple.',
license='BSD',
keywords='django, openid, oauth, social auth, application',
url='https://github.com/omab/django-social-auth',
packages=['social_auth',
'social_auth.backends',
'social_auth.backends.contrib',
'social_auth.backends.pipeline',
'social_auth.db'],
package_data={'social_auth': ['locale/*/LC_MESSAGES/*']},
long_description=long_description(),
install_requires=['django>=1.2.5',
'oauth2>=1.5.167',
'python_openid>=2.2'],
classifiers=['Framework :: Django',
'Development Status :: 4 - Beta',
'Topic :: Internet',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Environment :: Web Environment',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7'])
| python |
from django.urls import path, include
from . import views
urlpatterns = [
# Home Page URLs
path('', views.home, name="home"),
path(r'^logout/$', views.logoutUser, name="logout"),
path('about/', views.about, name="about"),
# Registrations
path('customer-registration/', views.cusRegister,
name="customer-registration"),
path('restaurant-registration/', views.resRegister,
name="restaurant-registration"),
# login Pages
path('res-login/', views.reslogin, name="reslogin"),
path('cus-login/', views.cuslogin, name="cuslogin"),
path('restaurant/', include('restaurants.urls')),
path('customer/', include('customers.urls')),
]
| python |
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class VarSet:
@abc.abstractmethod
def var_names(self, model):
pass
class VarSetFromSubH(VarSet):
"""Creates a VarSet from SubH instances specified in a model.
Args:
label (:class:`VarSet`): VarSet instance
model (:class:`Model`): Model instance
"""
def __init__(self, label):
super(VarSetFromSubH, self).__init__()
self.label = label
def var_names(self, model): # take model here
return model.namespaces[0][self.label]
class VarSetFromVarLabels(VarSet):
"""Creates a VarSet from a provided list of variables.
Args:
var_list: a list of variables
model (:class:`Model`): Model instance
"""
def __init__(self, var_list):
super(VarSetFromVarLabels, self).__init__()
self.var_list = var_list
def var_names(self, model):
return set(
var.label for var in self.var_list
)
class VarSetFromLabels(VarSet):
"""Creates a VarSet from a provided list of variables.
Args:
var_list: a list of variables
model (:class:`Model`): Model instance
"""
def __init__(self, labels):
super(VarSetFromLabels, self).__init__()
self.labels = labels
def var_names(self, model):
return set(
label for label in self.labels
)
class AndVars(VarSet):
"""Creates a VarSet that contains the intersection of two VarSet instances.
Args:
set_a (:class:`VarSet`): VarSet instance
set_b (:class:`VarSet`): VarSet instance
model (:class:`Model`): Model instance
Examples:
In this example, two VarSet instances are created from the SubH class. AndVars
provides the common variable namespaces between these two sets.
>>> from pyqubo import SubH, VarSetFromSubH, Binary, AndVars
>>> a, b, c = Binary("a"), Binary("b"), Binary("c")
>>> exp = (SubH(a + b, 'n1') + SubH(b + c, 'n2'))**2
>>> model = exp.compile()
>>> set_x = VarSetFromSubH('n1')
>>> set_y = VarSetFromSubH('n2')
>>> set_z = AndVars(set_x, set_y)
>>> set_z.var_names(model)
{'b'}
"""
def __init__(self, set_a, set_b):
super(AndVars, self).__init__()
self.set_a = set_a
self.set_b = set_b
def var_names(self, model):
return self.set_a.var_names(model) & self.set_b.var_names(model)
class OrVars(VarSet):
"""Creates a VarSet that contains the union of two VarSet instances.
Args:
set_a (:class:`VarSet`): VarSet instance
set_b (:class:`VarSet`): VarSet instance
model (:class:`Model`): Model instance
Examples:
In this example, two VarSet instances are created from the SubH class. OrVars
provides all namespaces contained in these two sets.
>>> from pyqubo import SubH, VarSetFromSubH, Binary, OrVars
>>> a, b, c = Binary("a"), Binary("b"), Binary("c")
>>> exp = (SubH(a + b, 'n1') + SubH(b + c, 'n2'))**2
>>> model = exp.compile()
>>> set_x = VarSetFromSubH('n1')
>>> set_y = VarSetFromSubH('n2')
>>> set_z = OrVars(set_x, set_y)
>>> set_z.var_names(model) #doctest: +SKIP
{'a', 'b', 'c'}
"""
def __init__(self, set_a, set_b):
super(OrVars, self).__init__()
self.set_a = set_a
self.set_b = set_b
def var_names(self, model):
return self.set_a.var_names(model) | self.set_b.var_names(model)
| python |
from mmdet.models.necks.fpn import FPN
from .second_fpn import SECONDFPN
from .second_fpn_ran import SECONDFPN_RAN
from .second_fpn_mask import SECONDFPNMASK
__all__ = ['FPN', 'SECONDFPN', 'SECONDFPN_RAN', 'SECONDFPNMASK']
| python |
import time
import datetime as dt
date = {}
date["Future"] = dt.datetime.now() + dt.timedelta(seconds = 10)
if date["Future"] <= dt.datetime.now():
print("Succ\n") | python |
import re
examples1 = {
"2x3x4": 58,
"1x1x10": 43
}
examples2 = {
"2x3x4": 34,
"1x1x10": 14
}
def day2a(test=False):
if test:
inputs = examples
else:
inputs = open("d2.txt", "r").read().strip().split("\n")
real_total = 0
for item in inputs:
wayall = 0
bl, bw, bh = re.match("^([0-9]+)x([0-9]+)x([0-9]+)$", item).groups()
bl = int(bl)
bw = int(bw)
bh = int(bh)
ar1 = 2 * bl * bw
ar2 = 2 * bw * bh
ar3 = 2 * bh * bl
total = ar1 + ar2 + ar3
litems = sorted([bl, bw, bh], reverse=True)
sm1 = litems.pop()
sm2 = litems.pop()
total += (sm1 * sm2)
wayall += total
if test:
print(wayall)
print(wayall == examples1[item])
else:
real_total += wayall
if not test:
print(real_total)
def day2b(test=False):
if test:
inputs = examples2
else:
inputs = open("d2.txt", "r").read().strip().split("\n")
real_total = 0
for item in inputs:
wayall = 0
bl, bw, bh = re.match("^([0-9]+)x([0-9]+)x([0-9]+)$", item).groups()
bl = int(bl)
bw = int(bw)
bh = int(bh)
bow = bl * bw * bh
litems = sorted([bl, bw, bh], reverse=True)
sm1 = litems.pop() * 2
sm2 = litems.pop() * 2
wayall += bow + sm1 + sm2
if test:
print(wayall)
print(wayall == examples2[item])
else:
real_total += wayall
if not test:
print(real_total)
day2a()
day2b()
| python |
# Copyright (c) OpenMMLab. All rights reserved.
import os
import time
from mmdet.datasets import DATASETS
from .base_sot_dataset import BaseSOTDataset
@DATASETS.register_module()
class UAV123Dataset(BaseSOTDataset):
"""UAV123 dataset of single object tracking.
The dataset is only used to test.
"""
def __init__(self, *args, **kwargs):
"""Initialization of SOT dataset class."""
super().__init__(*args, **kwargs)
def load_data_infos(self, split='test'):
"""Load dataset information.
Args:
split (str, optional): Dataset split. Defaults to 'test'.
Returns:
list[dict]: The length of the list is the number of videos. The
inner dict is in the following format:
{
'video_path': the video path
'ann_path': the annotation path
'start_frame_id': the starting frame number contained
in the image name
'end_frame_id': the ending frame number contained in
the image name
'framename_template': the template of image name
}
"""
print('Loading UAV123 dataset...')
start_time = time.time()
data_infos = []
data_infos_str = self.loadtxt(
self.ann_file, return_array=False).split('\n')
# the first line of annotation file is a dataset comment.
for line in data_infos_str[1:]:
# compatible with different OS.
line = line.strip().replace('/', os.sep).split(',')
data_info = dict(
video_path=line[0],
ann_path=line[1],
start_frame_id=int(line[2]),
end_frame_id=int(line[3]),
framename_template='%06d.jpg')
data_infos.append(data_info)
print(f'UAV123 dataset loaded! ({time.time()-start_time:.2f} s)')
return data_infos
| python |
# -*- coding: utf-8 -*-
informe_temp_atual = float(input("informe a temperatura atual: "))
if (informe_temp_atual > 0) and (informe_temp_atual <= 15):
print ("Muito frio")
elif (informe_temp_atual >= 16) and (informe_temp_atual <= 23):
print ("Frio")
elif (informe_temp_atual >= 24) and (informe_temp_atual <= 26):
print ("Agradavel")
elif (informe_temp_atual >= 27) and (informe_temp_atual <= 30):
print ("Calor")
elif (informe_temp_atual >= 31):
print ("Muito Quente")
| python |
def merge_sort(arr):
if len(arr) < 2:
return arr
# divide into 2 half
divider = len(arr) // 2
arr1 = merge_sort(arr[0:divider])
arr2 = merge_sort(arr[divider:])
return merge(arr1, arr2)
def merge(arra, arrb):
i = j = 0
merge_list = []
while i < len(arra) and j < len(arrb):
if arra[i] < arrb[j]:
merge_list.append(arra[i])
i += 1
else:
merge_list.append(arrb[j])
j += 1
while i < len(arra):
merge_list.append(arra[i])
i += 1
while j < len(arrb):
merge_list.append(arrb[j])
j += 1
return merge_list
def max_advertise_revenue():
# let total_revenue be total advertisement revenue
total_revenue = 0
# let ad_price_list be a list for ad amount price
ad_price_list = []
# let clicks_list be a list for click count
clicks_list = []
# read file input
with open("./3_3_dot_product20180216.in") as f:
line_num = 0
for line in f:
line_num += 1
# read the first item for the n of items and weight
if line_num == 1:
num_item = int(line)
else:
# read ad revenue
if line_num == 2:
items = line.split()
for item in items:
ad_price_list.append(int(item))
# read clicks
if line_num == 3:
items = line.split()
for item in items:
clicks_list.append(int(item))
# merge sort
clicks_list = merge_sort(clicks_list)
# merge sort
ad_price_list = merge_sort(ad_price_list)
# loop through n items to add up revenue
for index in range(0, len(clicks_list)):
# add up advertisement revenue
total_revenue += clicks_list[index] * ad_price_list[index]
# return program output
return total_revenue
if __name__ == '__main__':
"""
Algorithmic Design and Techniques
Solution to
Programming Challenge 3-3:
Maximum Advertisement Revenue
"""
print("total advertise revenue is %s" % max_advertise_revenue())
| python |
def anagrams(word, words):
return [x for x in words if sorted(list(x)) == sorted(list(word))]
| python |
from random import randint
from compara_texto import ComparaTexto
class GeradorStrings():
def nova(self, comprimento):
comp = ComparaTexto()
caracteres = comp.CARACTERES_POSSIVEIS()
resultado = []
for _ in range(comprimento):
aleatorio = randint(0, len(caracteres) - 1)
resultado.append(caracteres[aleatorio])
return ''.join(resultado) | python |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from x2paddle.decoder.onnx_decoder import ONNXGraph, ONNXGraphNode, ONNXGraphDataNode
from x2paddle.core.graph import GraphNode
from x2paddle.core.util import *
from functools import reduce
import numpy as np
import onnx
import onnx.numpy_helper as numpy_helper
from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE
import logging as _logging
from collections import OrderedDict
import math
import os
import copy
import sys
import shutil
_logger = _logging.getLogger(__name__)
def _const_weight_or_none(node, necessary=False):
if 'Constant' in node.layer_type:
return node.value
if isinstance(node, ONNXGraphDataNode):
return node.weight
if necessary:
assert '{} should be an initializer or Constant operator.'.format(
node.name)
return None
def _rename_or_remove_weight(weights, origin_name, target_name=None, is_remove=True):
'''
Rename parameters by Paddle's naming rule of parameters.
Args:
weights(dict[String:np.ndarray]): Dict stored paramters, the key in weights is name of parameter.
origin_name(String): Name of parameter to rename or remove.
target_name(String, optional): if target_name is not None, add new key-value pair
{target_name:weights[origin_name]} to weights, and target_name must follow paddle's
naming rule of parameters. Default: None.
is_remove: if is_remove is True, remove origin key-value pair. Default: True.
Returns:
None
'''
if origin_name not in weights:
raise KeyError('{} not a key in {}'.format(origin_name, weights))
if is_remove:
# remove weight
data = weights.pop(origin_name)
else:
data = weights[origin_name]
if target_name is not None:
# rename weight
weights[target_name] = data
def _is_static_shape(shape):
negtive_dims = 0
error_dims = 0
for dim in shape:
if dim < 0:
negtive_dims += 1
if dim < -1:
error_dims += 1
if negtive_dims > 1:
return False
if error_dims > 0:
return False
return True
def _get_same_padding(in_size, kernel_size, stride):
new_size = int(math.ceil(in_size * 1.0 / stride))
pad_size = (new_size - 1) * stride + kernel_size - in_size
pad0 = int(pad_size / 2)
pad1 = pad_size - pad0
return [pad0, pad1]
def print_mapping_info(func):
def run_mapping(*args, **kwargs):
node = args[1]
try:
res = func(*args, **kwargs)
except:
print("convert failed node:{}, op_type is {}".format(
node.name[9:], node.layer_type))
raise
else:
return res
return run_mapping
class OpSet9():
elementwise_ops = {
'Add': 'paddle.add',
'Div': 'paddle.divide',
'Sub': 'paddle.subtract',
'Mul': 'paddle.multiply',
'Pow': 'paddle.pow',
}
directly_map_ops = {
'Ceil': ['paddle.ceil'],
# reduce function
'ReduceMean': ['paddle.mean',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceSum': ['paddle.sum',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdims=1)],
'ReduceMin': ['paddle.min',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceMax': ['paddle.max',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
'ReduceProd': ['paddle.prod',
dict(axes='axis', keepdims='keepdim'),
dict(axes=None, keepdim=1)],
# active function
'Relu': ['paddle.nn.ReLU'],
'LeakyRelu': ['paddle.nn.LeakyReLU',
dict(alpha='negative_slope'),
dict(negative_slope=.01)],
'Elu': ['paddle.nn.functional.elu',
dict(alpha='alpha'),
dict(alpha=1.)],
'ThresholdedRelu': ['paddle.nn.functional.thresholded_relu',
dict(alpha='threshold'),
dict(alpha=1.)],
'Tanh': ['paddle.nn.Tanh'],
'Sigmoid': ['paddle.nn.Sigmoid'],
'Softsign': ['paddle.nn.Softsign'],
'Softplus': ['paddle.nn.Softplus',
dict(threshold='threshold'),
dict(threshold=float(sys.maxsize))],
'Exp': ['paddle.exp'],
'Log': ['paddle.log'],
'LogSoftmax': ['paddle.nn.functional.log_softmax',
dict(axis='axis'),
dict(axis=1)],
'Softmax': ['paddle.nn.Softmax',
dict(axis='axis'),
dict(axis=1)],
'Sqrt': ['paddle.sqrt'],
'Floor': ['paddle.floor'],
'Abs': ['paddle.abs'],
'Erf': ['paddle.erf'],
}
def __init__(self, decoder, paddle_graph):
super(OpSet9, self).__init__()
self.graph = decoder.graph
self.paddle_graph = paddle_graph
self.input_index = 0
self.inputs_info = dict()
self.weights = dict()
self.nn_name2id = dict()
self.done_weight_list = list()
@print_mapping_info
def directly_map(self, node, *args, **kwargs):
inputs = node.layer.input
assert len(inputs) == 1, 'directly_map error with multi inputs'
input = self.graph.get_input_node(node, idx=0, copy=True)
onnx_attrs = node.attr_map
if '' in onnx_attrs:
onnx_attrs.pop('')
if '_' in onnx_attrs:
onnx_attrs.pop('_')
op_info = self.directly_map_ops[node.layer_type]
paddle_op = op_info[0]
layer_attrs = dict()
if len(op_info) > 1:
attrs_name_map_dict = op_info[1]
for onnx_attr_name, pd_attr_name in attrs_name_map_dict.items():
if onnx_attr_name in onnx_attrs:
layer_attrs[pd_attr_name] = onnx_attrs[onnx_attr_name]
else:
layer_attrs[pd_attr_name] = op_info[2][onnx_attr_name]
if paddle_op.startswith("paddle.nn") and 'functional' not in paddle_op:
op_name = paddle_op[10:].lower()
op_name = name_generator(op_name, self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs={"x": input.name},
outputs=layer_outputs,
**layer_attrs)
else:
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs={"x": input.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def elementwise_map(self, node):
op_type = self.elementwise_ops[node.layer_type]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
inputs_dict = {'x': val_x.name,
'y': val_y.name}
self.paddle_graph.add_layer(
op_type,
inputs=inputs_dict,
outputs=[node.name])
@print_mapping_info
def place_holder(self, node):
shape = node.out_shapes[0]
for i, dim_shape in enumerate(shape):
if dim_shape == 0 and i == 0:
shape[i] = 1
if dim_shape == 0 and i != 0:
assert 'shape of input is not assigned'
self.paddle_graph.add_layer(
kernel="paddle.to_tensor",
inputs={},
outputs=[node.name],
data="x{}".format(self.input_index))
self.inputs_info["x{}".format(self.input_index)] = [shape, node.dtype]
self.input_index += 1
@print_mapping_info
def create_parameter(self, node, parameter=None):
if parameter is not None:
node = parameter
dtype = node.dtype
shape = node.out_shapes[0]
if hasattr(node.weight, "shape") and len(node.weight.shape) == 0:
self.paddle_graph.add_layer(
"paddle.full",
inputs={},
outputs=[node.name],
dtype=string(dtype),
shape=[1],
fill_value=node.weight)
else:
self.weights[node.name] = node.weight
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[node.name],
shape=shape,
attr=string(node.name),
dtype=string(dtype),
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
def _pad_if_asymmetric(self, node, pads, val_name): # pads: SSEE
assert len(pads) & 1 == 0
symmetric = True
ndims = len(pads) // 2
for idx_dim in range(ndims):
if pads[idx_dim] != pads[ndims + idx_dim]:
symmetric = False
break
if symmetric:
return pads[:ndims], val_name
val_padded = self.Pad(node, op_independent=False)
return [0] * ndims, val_padded
def _interpolate(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
inputs = {'x': val_x.name}
attrs = dict()
if node.layer_type == 'Resize':
if len(node.layer.input) == 2:
# opset 10
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[2:]
elif len(node.layer.input) == 3:
# opset 11
val_scales = self.graph.get_input_node(node, idx=2, copy=True)
# TODO(syf): paddle.nn.functional.interpolate will support the length
# which is the same as the rank of input.
attrs['scale_factor'] = self.weights[val_scales.name].tolist()[2:]
elif len(node.layer.input) == 4:
# opset 11
val_sizes = self.graph.get_input_node(node, idx=3, copy=True)
var_nc, var_hw = val_sizes.name + '_nc', val_sizes.name + '_hw'
self.paddle_graph.add_layer(
'paddle.split',
inputs={"x": val_sizes.name},
outputs=[var_nc, var_hw],
num_or_sections=[2, 2],
axis=0)
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": var_hw},
outputs=[var_hw],
dtype=string('int32'))
inputs['size'] = var_hw
attrs = {"align_corners": False,
"mode": string(node.get_attr('mode', 'nearest'))}
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
return
elif node.layer_type == 'Upsample':
val_scales = self.graph.get_input_node(node, idx=1, copy=True)
self.paddle_graph.add_layer(
"paddle.slice",
inputs={"input": val_scales.name},
outputs=[val_scales.name],
axes=[0],
starts=[2],
ends=[4])
inputs['scale_factor'] = val_scales.name
mode = node.get_attr('mode', 'nearest')
attrs.update({"align_corners": False,
"mode": string(mode),
"align_mode": 1})
val_x_shape = val_x.out_shapes[0]
if mode == "linear" and len(val_x_shape) == 4:
attrs["mode"] = string("bilinear")
attrs["align_corners"] = True
self.paddle_graph.add_layer(
kernel="paddle.nn.functional.interpolate",
inputs=inputs,
outputs=[node.name],
**attrs)
@print_mapping_info
def HardSigmoid(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
alpha = node.get_attr('alpha', 0.2)
beta = node.get_attr('beta', 0.5)
self.paddle_graph.add_layer(
kernel="paddle.scale",
inputs={"x": val_x.name},
outputs=[node.name + "_val"],
scale=alpha,
bias=beta)
self.paddle_graph.add_layer(
kernel="paddle.clip",
inputs={"x": node.name + "_val"},
outputs=[node.name],
min=0.0,
max=1.0)
@print_mapping_info
def Shape(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
kernel="paddle.shape",
inputs={"input": val_x.name},
outputs=[node.name])
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": node.name},
outputs=[node.name],
dtype=string('int64'))
@print_mapping_info
def RoiAlign(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_rois = self.graph.get_input_node(node, idx=1, copy=True)
pooled_height = node.get_attr('output_height')
pooled_width = node.get_attr('output_width')
spatial_scale = node.get_attr('spatial_scale')
sampling_ratio = node.get_attr('sampling_ratio')
layer_attrs = {
'pooled_height': pooled_height,
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
'sampling_ratio': sampling_ratio,
}
self.paddle_graph.add_layer(
'paddle.fluid.layers.roi_align',
inputs={'input': val_x.name,
'rois': val_rois.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def MaxRoiPool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_rois = self.graph.get_input_node(node, idx=1, copy=True)
spatial_scale = node.get_attr('spatial_scale')
pooled_height, pooled_width = node.get_attr('pooled_shape')
layer_attrs = {
'pooled_height': pooled_height,
'pooled_width': pooled_width,
'spatial_scale': spatial_scale,
}
self.paddle_graph.add_layer(
'paddle.fluid.layers.roi_pool',
inputs={'input': val_x.name,
'rois': val_rois.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def Pad(self, node, op_independent=True):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
pads = node.get_attr('pads')
is_pads_attr = True
if pads is None:
val_pad = self.graph.get_input_node(node, idx=1, copy=True)
pad_shape = val_pad.out_shapes[0]
is_pads_attr = False
pads = _const_weight_or_none(val_pad)
if pads is not None:
is_pads_attr = True
mode = node.get_attr('mode', 'constant')
value = node.get_attr('value', 0.)
data_shape = val_x.out_shapes[0]
output_shape = node.out_shapes[0]
assume_pad = False
layer_attrs = {}
layer_attrs['mode'] = string(mode)
layer_attrs['value'] = value
if not op_independent:
output_name = node.name + '_paded'
else:
output_name = node.name
nn_op_name = name_generator("pad", self.nn_name2id)
layer_outputs = [nn_op_name, output_name]
if is_pads_attr:
paddings = []
if len(pads) in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == len(pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == len(pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.Pad{}D'.format(len(output_shape) - 2)
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32")
paddings = np.flip(paddings, axis=0).flatten().tolist()
layer_attrs['padding'] = paddings
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.functional.pad'
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32").flatten().tolist()
layer_attrs['pad'] = paddings
else:
raise Exception("The padding value {} is wrong!".format(pads))
elif len(pads) == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == len(pads) # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == len(pads) # NCHW
if assume_pad:
paddle_op = 'paddle.nn.Pad2D'
paddings = np.array(pads).reshape(
(2, -1)).transpose().astype("int32")
paddings = np.flip(paddings, axis=0).flatten().tolist()
if sum(paddings[:4]) == 0:
paddings = paddings[4:]
layer_attrs['padding'] = paddings
else:
layer_attrs["pad"] = paddings
paddle_op = "custom_layer:PadAllDim4WithOneInput"
else:
raise Exception("The padding value {} is wrong!".format(pads))
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x.name},
outputs=layer_outputs[1:] if paddle_op == 'paddle.nn.functional.pad' else layer_outputs,
**layer_attrs)
if not op_independent:
return node.name + '_paded'
else:
pads_len = val_pad.out_shapes[0][0]
if pads_len in [2, 4, 6]:
if data_shape:
assume_pad |= data_shape and 2 * (len(data_shape) - 2) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * (len(output_shape) - 2) == pads_len # NCHW
if assume_pad:
if pads_len == 2:
data_format = "NCL"
elif pads_len == 4:
data_format = "NCHW"
else:
data_format = "NCDHW"
self.paddle_graph.add_layer(
"custom_layer:PadWithTwoInput",
inputs={'x': val_x.name, 'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode),
data_format=string(data_format))
else:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
if assume_pad:
if pads_len == 4:
self.paddle_graph.add_layer(
"custom_layer:PadAllDim2",
inputs={'x': val_x.name, 'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
else:
raise Exception("The padding value is wrong!")
elif pads_len == 8:
if data_shape:
assume_pad |= data_shape and 2 * len(data_shape) == pads_len # NCHW
if output_shape:
assume_pad |= output_shape and 2 * len(output_shape) == pads_len # NCHW
if assume_pad:
self.paddle_graph.add_layer(
"custom_layer:PadAllDim4",
inputs={'x': val_x.name, 'pad': val_pad.name},
outputs=layer_outputs,
value=value,
mode=string(mode))
else:
raise Exception("The padding value is wrong!")
if not op_independent:
return node.name + '_paded'
@print_mapping_info
def Unsqueeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
layer_attrs = {'axis': axes}
if len(val_x.out_shapes[0]) == 0:
if node.name:
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=[1])
else:
self.paddle_graph.add_layer(
'paddle.unsqueeze',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def Shrink(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
bias = node.get_attr('bias')
lambd = node.get_attr('lambd')
assert bias == 0.0, 'not support bias!=0'
self.paddle_graph.add_layer(
'paddle.nn.functional.hardshrink',
inputs={"x": val_x.name},
outputs=[node.name],
threshold=lambd)
@print_mapping_info
def Constant(self, node):
val_output = self.graph.get_node(node.layer.output[0], copy=True)
value = node.get_attr('value')
dtype = np.dtype(value.dtype)
output_dtype = val_output.dtype
if output_dtype:
assert dtype == output_dtype, 'tensor dtype unmatches storage dtype'
shape = node.get_attr('shape', None)
if shape is None:
shape = val_output.out_shapes[0]
if shape is None:
shape = list(value.shape)
_logger.warning('in (Constant -> %s): '
'attribute "shape" of %s not inferred, '
'using value as 1-D tensor may lead to fails',
val_output.name, val_output.name)
if len(value) == 1:
value = value.tolist()
value = value[0]
self.paddle_graph.add_layer(
"paddle.full",
inputs={},
outputs=[node.name],
dtype=string(dtype),
shape=[1],
fill_value=value)
else:
value = np.reshape(value, shape)
self.weights[node.name] = value
self.paddle_graph.add_layer(
"self.create_parameter",
inputs={},
outputs=[node.name],
shape=shape,
attr=string(node.name),
dtype=string(dtype),
default_initializer="paddle.nn.initializer.Constant(value=0.0)")
@print_mapping_info
def Resize(self, node):
self._interpolate(node)
@print_mapping_info
def Upsample(self, node):
self._interpolate(node)
@print_mapping_info
def InstanceNormalization(self, node):
op_name = name_generator("instanse_norm", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_scale = self.graph.get_input_node(node, idx=1, copy=True)
val_b = self.graph.get_input_node(node, idx=2, copy=True)
epsilon = node.get_attr('epsilon', 1e-5)
self.weights[op_name+'.scale'] = self.weights[val_scale.name]
self.weights[op_name+'.bias'] = self.weights[val_b.name]
layer_attrs = {
'num_features': node.out_shapes[0][1],
'epsilon': epsilon,
}
dim = len(val_x.out_shapes[0])
if dim == 3:
paddle_op = "paddle.nn.InstanceNorm1D"
elif dim == 4:
paddle_op = "paddle.nn.InstanceNorm2D"
elif dim == 5:
paddle_op = "paddle.nn.InstanceNorm3D"
else:
raise Exception("The paddle only support 2D, 3D, 4D or 5D input in InstanceNormalization.")
self.paddle_graph.add_layer(
paddle_op,
inputs={"x": val_x.name},
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def Expand(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_shape = self.graph.get_input_node(node, idx=1, copy=True)
val_x_dtype = val_x.dtype
name_ones = node.name + '_ones'
attr_ones = {
'shape': val_shape.name,
'dtype': string(val_x_dtype),
'fill_value': 1
}
self.paddle_graph.add_layer(
'paddle.full',
inputs={},
outputs=[name_ones],
**attr_ones)
inputs_dict = {'x': name_ones,
'y': val_x.name}
self.paddle_graph.add_layer(
'paddle.multiply',
inputs=inputs_dict,
outputs=[node.name])
@print_mapping_info
def Gather(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
indices = self.graph.get_input_node(node, idx=1, copy=True)
indices_shape = indices.out_shapes[0]
axis = node.get_attr('axis', 0)
#assert len(
# indices_shape) <= 2, "Gather op don't support dim of indice >2 "
if axis == 0 and len(indices_shape) <= 1:
if len(val_x.out_shapes[0]) <= 1:
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': val_x.name,
'index': indices.name},
outputs=[node.name])
elif len(val_x.out_shapes[0]) > 1:
if len(indices_shape) == 0:
gather_ = node.name + '_1'
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': val_x.name,
'index': indices.name},
outputs=[gather_])
self.paddle_graph.add_layer(
'paddle.squeeze',
inputs={'x': gather_},
outputs=[node.name],
axis=[0])
else:
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': val_x.name,
'index': indices.name},
outputs=[node.name])
elif axis > 0 and len(indices_shape) <= 1:
perm = list(range(len(val_x.out_shapes[0])))
perm = [axis] + perm[:axis] + perm[axis + 1:]
name_trans = val_x.name + '_trans'
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": val_x.name},
outputs=[name_trans],
perm=perm)
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': name_trans,
'index': indices.name},
outputs=[node.name])
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[node.name],
perm=perm)
if len(indices_shape) < 1:
self.paddle_graph.add_layer(
'paddle.squeeze',
inputs={'x': node.name},
outputs=[node.name],
axis=[axis])
elif axis == 0 and len(indices_shape) > 1:
if val_x.out_shapes[0] is not None and isinstance(
val_x, ONNXGraphDataNode):
indices_cast = indices.name + '_cast'
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": indices.name},
outputs=[indices_cast],
dtype=string('int64'))
op_name = name_generator("embedding", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
self.weights[op_name + '.weight'] = _const_weight_or_none(val_x)
self.paddle_graph.add_layer(
'paddle.nn.Embedding',
inputs={"x": indices_cast},
outputs=layer_outputs,
num_embeddings=val_x.out_shapes[0][0],
embedding_dim=val_x.out_shapes[0][1])
else:
from functools import reduce
reshape_shape = reduce(lambda x, y: x * y, indices_shape)
indices_reshape = indices.name + '_shape'
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": indices.name},
outputs=[indices_reshape],
shape=[reshape_shape, ])
perm = list(range(len(val_x.out_shapes[0])))
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': val_x.name,
'index': indices_reshape},
outputs=[node.name])
val_x_shape = val_x.out_shapes[0]
reshaped_shape = []
for i in perm:
reshaped_shape.append(indices_shape[i])
for i in val_x_shape[:axis] + val_x_shape[axis + 1:]:
reshaped_shape.append(i)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": node.name},
outputs=[node.name],
shape=reshaped_shape)
elif axis > 0 and len(indices_shape) > 1:
from functools import reduce
reshape_shape = reduce(lambda x, y: x * y, indices_shape)
indices_reshape = indices.name + '_shape'
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": indices.name},
outputs=[indices_reshape],
shape=[reshape_shape, ])
perm = list(range(len(val_x.out_shapes[0])))
perm = [axis] + perm[:axis] + perm[axis + 1:]
name_trans = val_x.name + '_transpose'
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": val_x.name},
outputs=[name_trans],
perm=perm)
self.paddle_graph.add_layer(
'paddle.gather',
inputs={'x': name_trans,
'index': indices_reshape},
outputs=[node.name])
input_transpose = node.name + '_transpose'
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": node.name},
outputs=[input_transpose],
perm=perm)
val_x_shape = val_x.out_shapes[0]
reshaped_shape = []
for i in perm:
reshaped_shape.append(indices_shape[i])
for i in val_x_shape[:axis] + val_x_shape[axis + 1:]:
reshaped_shape.append(i)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": input_transpose},
outputs=[node.name],
shape=reshaped_shape)
@print_mapping_info
def ScatterND(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
indices = self.graph.get_input_node(node, idx=1, copy=True)
updates = self.graph.get_input_node(node, idx=2, copy=True)
if len(indices.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
'paddle.scatter',
inputs={'x': val_x.name,
'index': indices.name,
'updates': updates.name},
outputs=[node.name])
else:
input_inner_indices = node.name + '_input_inner_indices'
shape = val_x.out_shapes[0]
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": indices.name},
outputs=[indices.name],
shape=indices.out_shapes[0])
zeros_like_val_x = val_x.name + '_zeros'
self.paddle_graph.add_layer(
'paddle.zeros_like',
inputs={"x": val_x.name},
outputs=[zeros_like_val_x])
self.paddle_graph.add_layer(
'paddle.scatter_nd_add',
inputs={
'x': zeros_like_val_x,
'index': indices.name,
'updates': updates.name
},
outputs=[input_inner_indices])
indices_mask = node.name + '_indices_mask'
constant_minus_one = node.name + '_constant_minus_one'
# full_like support create tensor shape like input tensor
self.paddle_graph.add_layer(
'paddle.full_like',
inputs={"x": updates.name},
outputs=[constant_minus_one],
dtype=string(updates.dtype),
fill_value=-1)
self.paddle_graph.add_layer(
'paddle.scatter_nd_add',
inputs={
'x': zeros_like_val_x,
'index': indices.name,
'updates': constant_minus_one
},
outputs=[indices_mask])
constant_one = node.name + '_constant_1'
# full_like support create tensor shape like input tensor
self.paddle_graph.add_layer(
'paddle.full_like',
inputs={"x": val_x.name},
outputs=[constant_one],
dtype=string(val_x.dtype),
fill_value=1)
input_out_indices_mask = node.name + '_input_out_indices_mask'
self.paddle_graph.add_layer(
"paddle.add",
inputs={"x": indices_mask,
"y": constant_one},
outputs=[input_out_indices_mask])
input_out_indices = node.name + '_input_out_indices'
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={"x": val_x.name,
"y": input_out_indices_mask},
outputs=[input_out_indices])
self.paddle_graph.add_layer(
"paddle.add",
inputs={"x": input_inner_indices,
"y": input_out_indices},
outputs=[node.name])
@print_mapping_info
def Range(self, node):
val_start = self.graph.get_input_node(node, idx=0, copy=True)
val_limit = self.graph.get_input_node(node, idx=1, copy=True)
val_delta = self.graph.get_input_node(node, idx=2, copy=True)
dtype = val_start.dtype
inputs = {'start': val_start.name,
'end': val_limit.name,
'step': val_delta.name}
self.paddle_graph.add_layer(
'paddle.arange',
inputs=inputs,
outputs=[node.name],
dtype=string(dtype))
@print_mapping_info
def Slice(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
starts, ends, axes, steps = None, None, None, None
layer_attrs = {}
if len(node.inputs) > 1:
starts = self.graph.get_input_node(node, idx=1, copy=True)
ends = self.graph.get_input_node(node, idx=2, copy=True)
starts_value = _const_weight_or_none(starts)
if starts_value is not None:
starts_value = starts_value.tolist()
ends_value = _const_weight_or_none(ends)
if ends_value is not None:
ends_value = ends_value.tolist()
if len(node.inputs) > 2:
s_len = len(val_x.out_shapes[0])
axes = list(range(s_len))
if len(node.inputs) > 3:
axes_node = self.graph.get_input_node(node, idx=3, copy=True)
axes = _const_weight_or_none(axes_node, necessary=True).tolist()
if len(node.inputs) > 4:
steps = self.graph.get_input_node(node, idx=4, copy=True)
steps = _const_weight_or_none(steps).tolist()
layer_attrs = {
"axes": axes,
"starts": starts.name,
"ends": ends.name
}
if starts_value is not None and ends_value is not None and axes is not None:
starts_value = starts_value.copy()
ends_value = ends_value.copy()
for idx in range(len(ends_value)):
if starts_value[idx] >= val_x.out_shapes[0][axes[idx]] and val_x.out_shapes[0][axes[idx]] > 0:
starts_value[idx] = val_x.out_shapes[0][axes[idx]] - 1
ends_value[idx] = val_x.out_shapes[0][axes[idx]]
elif ends_value[idx] > 2**31 - 1:
ends_value[idx] = 2**31 - 1
layer_attrs = {
"axes": axes,
"starts": starts_value,
"ends": ends_value
}
else:
if starts.dtype != 'int32':
starts_cast = starts.name + '_cast'
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": starts.name},
outputs=[starts_cast],
dtype=string('int32'))
layer_attrs['starts'] = starts_cast
if ends.dtype != 'int32':
ends_cast = ends.name + '_cast'
else:
ends_cast = ends.name
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": ends.name},
outputs=[ends_cast],
dtype=string('int32'))
layer_attrs['ends'] = ends_cast
else:
starts = node.get_attr('starts')
ends = node.get_attr('ends')
axes = node.get_attr('axes')
for idx in range(len(ends)):
if ends[idx] > 2**31 - 1:
ends[idx] = 2**31 - 1
layer_attrs = {"axes": axes, "starts": starts, "ends": ends}
if steps is not None:
layer_attrs['strides'] = steps
self.paddle_graph.add_layer(
'paddle.strided_slice',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
else:
self.paddle_graph.add_layer(
'paddle.slice',
inputs={"input": val_x.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def ConstantOfShape(self, node):
val_shape = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
value = node.get_attr('value')
dtype = value.dtype
value = value.tolist()
assert len(value) == 1, ('given value not Scalar, shape of value > 1, '
'this is not supported')
if len(value) == 1:
value = value[0]
layer_attrs = {
'dtype': string(dtype),
'fill_value': value
}
self.paddle_graph.add_layer(
"paddle.full",
inputs={'shape': val_shape.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def Clip(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_node(node.layer.output[0], copy=True)
max_value, min_value = None, None
if len(node.inputs) == 1:
max_value = node.get_attr('max')
min_value = node.get_attr('min')
layer_attrs = {
'max': max_value,
'min': min_value,
}
self.paddle_graph.add_layer(
'paddle.clip',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
else:
min_ipt = self.graph.get_input_node(node, idx=1, copy=True)
max_ipt = self.graph.get_input_node(node, idx=2, copy=True)
min_value = _const_weight_or_none(min_ipt)
max_value = _const_weight_or_none(max_ipt)
if max_value.shape == (1, ):
max_value = max_value[0]
if min_value.shape == (1, ):
min_value = min_value[0]
if max_value is not None and min_value is not None:
layer_attrs = {'max': max_value, 'min': min_value}
self.paddle_graph.add_layer(
'paddle.clip',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
else:
raise
@print_mapping_info
def Split(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
paddle_op = 'split'
split = node.get_attr('split')
axis = node.get_attr('axis', 0)
layer_attrs = {
'num_or_sections': split,
'axis': axis,
}
outputs_list = list()
if isinstance(split, list) or isinstance(split, tuple):
if len(split) == 1:
outputs_list.append(node.name)
else:
for i in range(len(split)):
outputs_list.append("{}_p{}".format(node.layer_name, i))
else:
outputs_list.append(node.name)
self.paddle_graph.add_layer(
'paddle.split',
inputs={"x": val_x.name},
outputs=outputs_list,
**layer_attrs)
@print_mapping_info
def Reshape(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_shape = self.graph.get_input_node(node, idx=1, copy=True)
val_reshaped = self.graph.get_node(node.layer.output[0], copy=True)
shape_value = _const_weight_or_none(val_shape)
shape_dims = len(val_shape.out_shapes[0])
if shape_value is not None:
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={'x': val_x.name},
outputs=[node.name],
shape=shape_value.tolist())
elif len(node.out_shapes[0]) > 0 and _is_static_shape(node.out_shapes[
0]):
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={'x': val_x.name},
outputs=[node.name],
shape=node.out_shapes[0])
else:
# shape may be [], come form Gather by scalar indices
if len(val_shape.out_shapes[0]) > 0:
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={'x': val_shape.name},
outputs=[val_shape.name],
shape=val_shape.out_shapes[0])
if val_shape.dtype != "int32":
self.paddle_graph.add_layer(
'paddle.cast',
inputs={'x': val_shape.name},
outputs=[val_shape.name],
dtype=string("int32"))
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={'x': val_x.name,
'shape': val_shape.name},
outputs=[node.name])
@print_mapping_info
def Cast(self, node):
val_input = self.graph.get_input_node(node, idx=0, copy=True)
val_output = self.graph.get_node(node.layer.output[0], copy=True)
dtype = node.get_attr('to')
if not isinstance(dtype, np.dtype):
dtype = TENSOR_TYPE_TO_NP_TYPE[dtype]
output_dtype = val_output.dtype
if output_dtype:
assert dtype == output_dtype, 'dtype of to unmatches output'
self.paddle_graph.add_layer(
'paddle.cast',
inputs={'x': val_input.name},
outputs=[node.name],
dtype=string(dtype))
@print_mapping_info
def Not(self, node):
val_input = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer('paddle.logical_not',
inputs={'x': val_input.name},
outputs=[node.name])
@print_mapping_info
def AveragePool(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
auto_pad = node.get_attr('auto_pad', 'NOTSET')
kernel_shape = node.get_attr("kernel_shape")
poolnd = len(kernel_shape)
strides = node.get_attr("strides")
pad_mode = node.get_attr("pads")
ceil_mode = bool(node.get_attr('ceil_mode', 0))
pads = node.get_attr('pads', [0] * (poolnd * 2))
paddings, val_x = self._pad_if_asymmetric(node, pads, val_x)
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER":
input_shape = val_x.out_shapes[0]
pad_h = _get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = _get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
paddings = pad_h + pad_w
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
paddle_op = 'paddle.nn.AvgPool{}D'.format(poolnd)
assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported'
layer_attrs = {
"kernel_size": kernel_shape,
"stride": strides,
"padding": paddings,
"ceil_mode": ceil_mode,
"exclusive": 'True',
}
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x if isinstance(val_x, str) else val_x.name},
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def Concat(self, node):
inputs_list = []
dtypes = set()
for i in range(len(node.layer.input)):
ipt = self.graph.get_input_node(node, idx=i, copy=True)
inputs_list.append(ipt.name)
dtypes.add(ipt.dtype)
if len(dtypes) > 1:
assert 'Unspported situation happened, please create issue on https://github.com/PaddlePaddle/X2Paddle/issues.'
axis = node.get_attr('axis')
self.paddle_graph.add_layer(
'paddle.concat',
inputs={"x": inputs_list},
outputs=[node.name],
axis=axis)
@print_mapping_info
def Flatten(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
output_shape = node.out_shapes[0]
axis = node.get_attr('axis', 1)
shape_list = [1, 1]
if axis == 0:
for s in output_shape:
shape_list[1] *= s
else:
for s in output_shape[:axis]:
shape_list[0] *= s
for s in output_shape[axis:]:
shape_list[1] *= s
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": val_x.name},
outputs=[node.name],
shape=shape_list)
@print_mapping_info
def Gemm(self, node):
val_a = self.graph.get_input_node(node, idx=0, copy=True)
val_b = self.graph.get_input_node(node, idx=1, copy=True)
val_c = self.graph.get_input_node(node, idx=2, copy=True)
alpha = node.get_attr('alpha', 1.) # optional
beta = node.get_attr('beta', 1.) # optional
trans_a = bool(node.get_attr('transA', 0)) # optional
trans_b = bool(node.get_attr('transB', 0)) # optional
val_mm = node.name + '_mm'
matmul_inputs = {"x": val_a.name,
"y": val_b.name}
attr_matmul = {
"transpose_x": trans_a,
"transpose_y": trans_b,
}
self.paddle_graph.add_layer(
'paddle.matmul',
inputs=matmul_inputs,
outputs=[val_mm],
**attr_matmul)
self.paddle_graph.add_layer(
"paddle.scale",
inputs={"x": val_mm},
outputs=[val_mm],
scale=alpha)
if beta != 0:
if beta == 1.:
add_inputs = {"x": val_mm,
"y": val_c.name}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
else:
var_beta = node.name + '_beta'
self.paddle_graph.add_layer(
"paddle.scale",
inputs={"x": val_c.name},
outputs=[var_beta],
scale=beta)
add_inputs = {"x": val_mm, "y": var_beta}
self.paddle_graph.add_layer(
"paddle.add",
inputs=add_inputs,
outputs=[node.name])
@print_mapping_info
def Sum(self, node):
val_inps = node.layer.input
inputs_dict = {
"x": self.graph.get_input_node(
node, idx=0, copy=True).name,
"y": self.graph.get_input_node(
node, idx=1, copy=True).name,
}
self.paddle_graph.add_layer("paddle.add",
inputs=inputs_dict,
outputs=[node.name])
for idx, ipt in enumerate(val_inps[2:]):
y = self.graph.get_input_node(node, idx=idx, copy=True)
inputs_dict = {
"x": node.name,
"y": y.name,
}
self.paddle_graph.add_layer(
"paddle.add",
inputs=inputs_dict,
outputs=[node.name])
@print_mapping_info
def MatMul(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
x_shape = val_x.out_shapes[0]
y_shape = val_y.out_shapes[0]
inputs_dict = {"x": val_x.name,
"y": val_y.name}
if y_shape[0] == 1 and x_shape[-1] != 1 and x_shape[0] != 1:
y_squeeze = val_y.name + '_squeeze'
self.paddle_graph.add_layer(
"paddle.squeeze",
inputs={"x": val_y.name},
outputs=[y_squeeze],
axis=[0])
inputs_dict['y'] = y_squeeze
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
else:
self.paddle_graph.add_layer(
"paddle.matmul",
inputs=inputs_dict,
outputs=[node.name])
@print_mapping_info
def BatchNormalization(self, node):
op_name = name_generator("batchnorm", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_scale = self.graph.get_input_node(node, idx=1, copy=True)
val_b = self.graph.get_input_node(node, idx=2, copy=True)
val_mean = self.graph.get_input_node(node, idx=3, copy=True)
val_var = self.graph.get_input_node(node, idx=4, copy=True)
momentum = node.get_attr('momentum', .9)
epsilon = node.get_attr('epsilon', 1e-5)
c = val_x.out_shapes[0][1]
_rename_or_remove_weight(self.weights, val_scale.name, op_name+'.weight')
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias')
_rename_or_remove_weight(self.weights, val_var.name, op_name+'._variance')
_rename_or_remove_weight(self.weights, val_mean.name, op_name+'._mean')
# Attribute: spatial is used in BatchNormalization-1,6,7
spatial = bool(node.get_attr('spatial'))
layer_attrs = {
"num_channels": c,
"momentum": momentum,
"epsilon": epsilon,
"is_test": True,
"use_global_stats": False,
}
self.paddle_graph.add_layer(
"paddle.nn.BatchNorm",
inputs={"x": val_x.name},
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def Transpose(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
s_len = len(val_x.out_shapes[0])
perm_default = list(range(s_len))
perm_default.reverse()
perm = node.get_attr('perm', perm_default)
self.paddle_graph.add_layer(
"paddle.transpose",
inputs={"x": val_x.name},
outputs=[node.name],
perm=perm)
@print_mapping_info
def PRelu(self, node):
op_name = name_generator("prelu", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_slope = self.graph.get_input_node(node, idx=1, copy=True)
mode = 'channel'
shape_slope = val_slope.out_shapes[0]
if shape_slope == [1] * len(shape_slope):
mode = 'all'
if mode == "element":
self.paddle_graph.add_layer(
"paddle.zeros",
inputs={},
outputs=[output_name + "__zeros"],
shape=shape_slope,
dtype=string(node.dtype))
self.paddle_graph.add_layer(
"paddle.maximum",
inputs={"x": val_x.name,
"y": output_name + "__zeros"},
outputs=[output_name + "__max"])
self.paddle_graph.add_layer(
"paddle.minimum",
inputs={"x": val_x.name,
"y": output_name + "__zeros"},
outputs=[output_name + "__max"])
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={"x": val_slope.name,
"y": output_name + "__min"},
outputs=[output_name + "__mul"])
self.paddle_graph.add_layer(
"paddle.add",
inputs={"x": output_name + "__max",
"y": output_name + "__mul"},
outputs=[output_name])
else:
if mode == 'channel':
slope_data = _const_weight_or_none(val_slope)
if slope_data is None:
self.paddle_graph.add_layer(
"paddle.reshape",
inputs={"x": val_slope.name},
outputs=[val_slope.name],
shape=[shape_slope[0]])
self.paddle_graph.add_layer(
"paddle.nn.functional.prelu",
inputs={"x": val_x.name,
"weight": val_slope.name},
outputs=[node.name])
return
_rename_or_remove_weight(self.weights, val_slope.name)
if len(shape_slope) > 1:
self.weights[op_name+'._weight'] = np.reshape(slope_data, shape_slope[0])
num_parameters = val_x.out_shapes[0][1]
else:
num_parameters = 1
_rename_or_remove_weight(self.weights, val_slope.name)
self.weights[op_name+'._weight'] = np.reshape(self.weights[val_slope.name], [1])
self.paddle_graph.add_layer(
"paddle.nn.PReLU",
inputs={"x": val_x.name},
outputs=layer_outputs,
num_parameters=num_parameters)
@print_mapping_info
def Squeeze(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axes = node.get_attr('axes')
if len(val_x.out_shapes[0]) == 1:
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_x.name},
outputs=[node.name],
dtype=string(val_x.dtype))
else:
self.paddle_graph.add_layer(
"paddle.squeeze",
inputs={"x": val_x.name},
outputs=[node.name],
axis=axes)
@print_mapping_info
def Equal(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
self.paddle_graph.add_layer(
"paddle.equal",
inputs={'x': val_x.name,
'y': val_y.name},
outputs=[node.name])
@print_mapping_info
def Greater(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_y = self.graph.get_input_node(node, idx=1, copy=True)
self.paddle_graph.add_layer(
"paddle.greater_than",
inputs={'x': val_x.name,
'y': val_y.name},
outputs=[node.name],
param_attr=None)
@print_mapping_info
def Where(self, node):
condition = self.graph.get_input_node(node, idx=0, copy=True)
val_x = self.graph.get_input_node(node, idx=1, copy=True)
val_y = self.graph.get_input_node(node, idx=2, copy=True)
not_condition = condition.name + '_not'
self.paddle_graph.add_layer(
"paddle.logical_not",
inputs={"x": condition.name},
outputs=[not_condition])
cast_not_condition = not_condition + '_cast'
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": not_condition},
outputs=[cast_not_condition],
dtype=string(val_x.dtype))
cast_condition = condition.name + '_cast'
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": condition.name},
outputs=[cast_condition],
dtype=string(val_x.dtype))
mul_val_x = val_x.name + '_mul'
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={'x': val_x.name,
'y': cast_condition},
outputs=[mul_val_x])
mul_val_y = val_y.name + '_mul'
self.paddle_graph.add_layer(
"paddle.multiply",
inputs={'x': val_y.name,
'y': cast_not_condition},
outputs=[mul_val_y])
self.paddle_graph.add_layer(
"paddle.add",
inputs={'x': mul_val_x,
'y': mul_val_y},
outputs=[node.name])
@print_mapping_info
def NonZero(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_x_dim = len(val_x.out_shapes[0])
if val_x_dim == 1:
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": val_x.name},
outputs=[val_x.name])
self.paddle_graph.add_layer(
"paddle.transpose",
inputs={"x": val_x.name},
outputs=[node.layer_name],
perm=[1, 0])
if val_x_dim > 1:
self.paddle_graph.add_layer(
"paddle.nonzero",
inputs={"x": val_x.name},
outputs=[val_x.name])
self.paddle_graph.add_layer(
"paddle.split",
inputs={"x": val_x.name},
outputs=[val_x.name],
num_or_sections=1,
axis=val_x_dim)
self.paddle_graph.add_layer(
"paddle.concat",
inputs={"x": val_x.name},
outputs=[node.name])
@print_mapping_info
def Identity(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.assign",
inputs={"x": val_x.name},
outputs=[node.name])
@print_mapping_info
def Tile(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_repeats = self.graph.get_input_node(node, idx=1, copy=True)
repeats = _const_weight_or_none(val_repeats)
if repeats is None:
repeats = val_repeats.name
if val_repeats.dtype != 'int32':
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": repeats},
outputs=["{}.tmp".format(repeats)],
dtype=string("int32"))
repeats = "{}.tmp".format(repeats)
elif isinstance(repeats, int):
repeats = [repeats]
attr = {
'expand_times': repeats,
"name": string(node.name),
}
self.paddle_graph.add_layer(
"paddle.tile",
inputs={"x": val_x.name},
outputs=[node.name],
repeat_times=repeats)
@print_mapping_info
def MaxPool(self, node):
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
auto_pad = node.get_attr('auto_pad', 'NOTSET')
assert node.get_attr(
"dilations") is None, 'only dilations = 0 is supported' # optional
kernel_shape = node.get_attr("kernel_shape")
poolnd = len(kernel_shape)
strides = node.get_attr("strides")
pad_mode = node.get_attr("pads")
ceil_mode = bool(node.get_attr('ceil_mode', 0)) # optional
pads = node.get_attr('pads', [0] * (poolnd * 2)) # optional
paddle_op = 'paddle.nn.MaxPool{}D'.format(poolnd)
assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported'
paddings, val_x = self._pad_if_asymmetric(node, pads, val_x)
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER":
input_shape = val_x.out_shapes[0]
pad_h = _get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = _get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
paddings = pad_h + pad_w
layer_attrs = {
"kernel_size": kernel_shape,
"stride": strides,
"padding": paddings,
"ceil_mode": ceil_mode,
}
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x if isinstance(val_x, str) else val_x.name},
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def GlobalMaxPool(self, node):
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
input_shape = val_x.out_shapes[0]
if len(input_shape) == 4:
poolnd = 2
elif len(input_shape) == 5:
poolnd = 3
elif len(input_shape) == 3:
poolnd = 1
paddle_op = 'paddle.nn.AdaptiveMaxPool{}D'.format(poolnd)
assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported'
output_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x.name},
outputs=layer_outputs,
output_size=output_shape[2:])
@print_mapping_info
def GlobalAveragePool(self, node):
op_name = name_generator("pool", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
input_shape = val_x.out_shapes[0]
if len(input_shape) == 4:
poolnd = 2
elif len(input_shape) == 5:
poolnd = 3
elif len(input_shape) == 3:
poolnd = 1
paddle_op = 'paddle.nn.AdaptiveAvgPool{}D'.format(poolnd)
assert 1 <= poolnd <= 3, 'only Pool1D, Pool2D and Pool3D are supported'
output_shape = node.out_shapes[0]
self.paddle_graph.add_layer(
paddle_op,
inputs={'x': val_x.name},
outputs=layer_outputs,
output_size=output_shape[2:])
@print_mapping_info
def Conv(self, node):
op_name = name_generator("conv", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_w = self.graph.get_input_node(node, idx=1, copy=True)
has_bias = len(node.layer.input) == 3
if has_bias:
val_b = self.graph.get_input_node(node, idx=2, copy=True)
auto_pad = node.get_attr('auto_pad', 'NOTSET')
kernel_shape = node.get_attr('kernel_shape')
convnd = len(kernel_shape)
assert 2 <= convnd <= 3, 'only Conv2D and Conv3D is supported'
num_out_channels = val_w.out_shapes[0][0]
num_in_channels = val_w.out_shapes[0][1]
paddle_op = 'paddle.nn.Conv{}D'.format(convnd)
num_groups = node.get_attr('group', 1)
strides = node.get_attr('strides', [1] * convnd)
dilations = node.get_attr('dilations', [1] * convnd)
pads = node.get_attr('pads', [0] * (convnd * 2))
input_shape = val_x.out_shapes[0]
paddings, val_x = self._pad_if_asymmetric(node, pads, val_x)
if auto_pad == "SAME_UPPER" or auto_pad == "SAME_LOWER":
pad_h = _get_same_padding(input_shape[2], kernel_shape[0],
strides[0])
pad_w = _get_same_padding(input_shape[3], kernel_shape[1],
strides[1])
paddings = pad_h + pad_w
layer_inputs = {'x': val_x if isinstance(val_x, str) else val_x.name}
layer_attrs = {
"in_channels": num_in_channels * num_groups,
"out_channels": num_out_channels,
"kernel_size": kernel_shape,
"stride": strides,
"padding": paddings,
"dilation": dilations,
"groups": num_groups,
}
remove_weight = True if val_w.name in self.done_weight_list else False
if remove_weight:
self.done_weight_list.append(val_w.name)
_rename_or_remove_weight(self.weights, val_w.name, op_name+'.weight', remove_weight)
if has_bias:
remove_bias = True if val_b.name in self.done_weight_list else False
if remove_bias:
self.done_weight_list.append(val_b_name)
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias', remove_bias)
else:
layer_attrs["bias_attr"] = False
if reduce(lambda x,y:x*y, input_shape) in [1, -1] and 1 not in input_shape:
input_shape[1] = num_in_channels * num_groups
input_shape[0] = 0
input_shape[2] = 0
self.paddle_graph.add_layer(
"paddle.reshape",
inputs=layer_inputs,
outputs=[layer_inputs["x"]],
shape=input_shape)
self.paddle_graph.add_layer(
paddle_op,
inputs=layer_inputs,
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def ConvTranspose(self, node):
op_name = name_generator("conv_trans", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_w = self.graph.get_input_node(node, idx=1, copy=True)
val_b = None
if len(node.layer.input) > 2:
val_b = self.graph.get_input_node(node, idx=2, copy=True)
auto_pad = node.get_attr('auto_pad', 'NOTSET')
out_padding = node.get_attr('output_padding', [0, 0])
kernel_shape = node.get_attr('kernel_shape')
assert kernel_shape, 'kernel_shape not inferred'
convnd = len(kernel_shape)
assert 2 <= convnd <= 3, 'only Conv2DTranspose and Conv3DTranspose supported'
num_in_channels = val_w.out_shapes[0][0]
num_out_channels = val_w.out_shapes[0][1]
paddle_op = 'paddle.nn.Conv{}DTranspose'.format(convnd)
num_groups = node.get_attr('group', 1)
strides = node.get_attr('strides', [1] * convnd)
dilations = node.get_attr('dilations', [1] * convnd)
output_size = node.get_attr('output_shape', [])
pads = node.get_attr('pads', [0] * (convnd * 2))
paddings, var_x = self._pad_if_asymmetric(node, pads, val_x)
output_size = [0, 0]
output_size[0] = (val_x.out_shapes[0][2] - 1
) * strides[0] - 2 * paddings[0] + dilations[0] * (
kernel_shape[0] - 1) + 1 + out_padding[0]
output_size[1] = (val_x.out_shapes[0][3] - 1
) * strides[1] - 2 * paddings[1] + dilations[1] * (
kernel_shape[1] - 1) + 1 + out_padding[1]
# Conv2DTranspose缺少output_size,只能在forward里头传进output_size
inputs_dict = {'x': val_x if isinstance(val_x, str) else val_x.name}
layer_attrs = {
"in_channels": num_in_channels,
"out_channels": num_out_channels * num_groups,
"kernel_size": kernel_shape,
"stride": strides,
"dilation": dilations,
"padding": paddings,
"groups": num_groups,
"output_padding":out_padding}
_rename_or_remove_weight(self.weights, val_w.name, op_name+'.weight',)
if val_b is not None:
_rename_or_remove_weight(self.weights, val_b.name, op_name+'.bias')
self.paddle_graph.add_layer(
kernel=paddle_op,
inputs=inputs_dict,
outputs=layer_outputs,
**layer_attrs)
@print_mapping_info
def ArgMax(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
axis = node.get_attr('axis')
keepdims = False if node.get_attr('keepdims') == 0 else True
layer_attrs = {'axis': axis,
'keepdim': keepdims}
self.paddle_graph.add_layer(
'paddle.argmax',
inputs={"x": val_x.name},
outputs=[node.name],
**layer_attrs)
@print_mapping_info
def Size(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.shape",
inputs={"input": val_x.name},
outputs=[node.name])
self.paddle_graph.add_layer(
'paddle.cast',
inputs={"x": node.name},
outputs=[node.name],
dtype=string('int64'))
self.paddle_graph.add_layer(
"paddle.prod",
inputs={"x": node.name},
outputs=[node.name])
@print_mapping_info
def Sign(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
if node.dtype not in ["float16", "float32", "float64"]:
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": val_x.name},
outputs=[val_x.name],
dtype=string("float32"))
self.paddle_graph.add_layer(
"paddle.sign",
inputs={"x": val_x.name},
outputs=[node.name])
if node.dtype not in ["float16", "float32", "float64"]:
self.paddle_graph.add_layer(
"paddle.cast",
inputs={"x": node.name},
outputs=[node.name],
dtype=string(node.dtype))
@print_mapping_info
def OneHot(self, node):
nn_op_name = name_generator("onehot", self.nn_name2id)
output_name = node.name
layer_outputs = [nn_op_name, output_name]
indices = self.graph.get_input_node(node, idx=0, copy=True)
depth = self.graph.get_input_node(node, idx=1, copy=True)
values = self.graph.get_input_node(node, idx=2, copy=True)
axis = node.get_attr('axis', -1)
self.paddle_graph.add_layer(
"custom_layer:OneHot",
inputs={"indices": indices.name,
"depth": depth.name,
"values": values.name},
outputs=layer_outputs,
axis=axis)
@print_mapping_info
def Reciprocal(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
self.paddle_graph.add_layer(
"paddle.reciprocal",
inputs={"x": val_x.name},
outputs=[node.name])
@print_mapping_info
def LSTM(self, node):
x = self.graph.get_input_node(node, idx=0, copy=True)
input_weight = self.graph.get_input_node(node, idx=1, copy=True)
hidden_weight = self.graph.get_input_node(node, idx=2, copy=True)
input_nums = len(node.layer.input)
exist_input_nums = 3
have_bias = False
if input_nums > 3 and node.layer.input[3] != '':
bias = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
have_bias = True
exist_input_nums += 1
if input_nums > 4 and node.layer.input[4] != '':
sequence_lens = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
exist_input_nums += 1
if input_nums > 5 and node.layer.input[5] != '':
init_h = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": init_h.name},
outputs=[init_h.name],
shape=init_h.out_shapes[0]
)
exist_input_nums += 1
if input_nums > 6 and node.layer.input[6] != '':
init_c = self.graph.get_input_node(node, idx=exist_input_nums, copy=True)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": init_c.name},
outputs=[init_c.name],
shape=init_c.out_shapes[0]
)
input_weight_np = _const_weight_or_none(input_weight)
_rename_or_remove_weight(self.weights, input_weight.name)
hidden_size = node.get_attr('hidden_size', input_weight_np.shape[1]/4)
input_size = input_weight_np.shape[2]
hidden_weight_np = _const_weight_or_none(hidden_weight)
_rename_or_remove_weight(self.weights, hidden_weight.name)
bias_np = _const_weight_or_none(bias)
_rename_or_remove_weight(self.weights, bias.name)
input_bias_np = bias_np[:, :4*hidden_size]
hidden_bias_np = bias_np[:, 4*hidden_size:]
# parameters order in paddle:lstm:
# 1. gate order in paddle is: input, forget, cell, output.
# 2. gate orfer in onnx is: input, output, forget, cell.
def reform_weights(w, n, intervals):
slices = [w[:,x * n: y * n] for x, y in intervals]
return np.concatenate(slices, axis=1)
def transform_weight_with_bias(weights, n, intervals):
return [reform_weights(w, n, intervals) for w in weights]
reform_permutation = [(0, 1), (2, 4), (1, 2)]
weights = transform_weight_with_bias(
[input_weight_np, hidden_weight_np, input_bias_np, hidden_bias_np],
hidden_size, reform_permutation)
op_name = name_generator("lstm", self.nn_name2id)
y_out = node.output(0)
yh_out = node.output(1)
yc_out = node.output(2)
direction = node.get_attr('direction', 'forward')
def generate_paddle_param_names(op_name, suffix=''):
param_names = []
param_names.extend(['{}.weight_ih_l0{}', '{}.weight_hh_l0{}'])
if have_bias != False: param_names.append('{}.bias_ih_l0{}')
if have_bias != False: param_names.append('{}.bias_hh_l0{}')
param_names = [x.format(op_name, suffix) for x in param_names]
return param_names
def assign_params(op_name, weights, weight_idx=0, suffix=''):
param_names = generate_paddle_param_names(op_name, suffix)
print(param_names)
for param_name, weight in zip(param_names, weights):
self.weights[param_name] = weight[weight_idx]
if direction == 'backward':
raise Exception("LSTM support 'forward' or 'bidirectional', except '{}'.".format(direction))
else:
assign_params(op_name, weights)
if direction == 'bidirectional':
assign_params(op_name, weights, 1, '_reverse')
self.paddle_graph.add_layer(
'paddle.nn.LSTM',
inputs={'input': x.name, 'initial_states': (init_h.name, init_c.name)},
outputs=[op_name, y_out, yh_out, yc_out],
input_size=input_size,
hidden_size=hidden_size,
num_layers=1,
direction=string(direction),
time_major=True)
self.paddle_graph.add_layer(
'paddle.reshape',
inputs={"x": y_out},
outputs=[y_out],
shape=[0, 0, -1, hidden_size]
)
self.paddle_graph.add_layer(
'paddle.transpose',
inputs={"x": y_out},
outputs=[y_out],
perm=[0,2,1,3]
)
@print_mapping_info
def TopK(self, node):
val_x = self.graph.get_input_node(node, idx=0, copy=True)
val_k = self.graph.get_input_node(node, idx=1, copy=True)
layer_attrs = dict()
layer_attrs["axis"] = node.get_attr('axis', -1)
layer_attrs["largest"] = True if node.get_attr('largest', 1) == 1 else False
layer_attrs["sorted"] = True if node.get_attr('sorted', 1) == 1 else False
self.paddle_graph.add_layer(
"paddle.topk",
inputs={"x": val_x.name,
"k": val_k.name},
outputs=["{}_p{}".format(node.layer_name, 0), "{}_p{}".format(node.layer_name, 1)],
**layer_attrs)
@print_mapping_info
def LRN(self, node):
op_name = name_generator("lrn", self.nn_name2id)
output_name = node.name
layer_outputs = [op_name, output_name]
val_x = self.graph.get_input_node(node, idx=0, copy=True)
alpha = node.get_attr('alpha', 0.0001)
beta = node.get_attr('beta', 0.75)
bias = node.get_attr('bias', 1.0)
size = node.get_attr('size')
layer_attrs = {
'size': size,
'alpha': alpha,
'beta': beta,
'k': bias
}
self.paddle_graph.add_layer(
"custom_layer:LocalResponseNorm",
inputs={"x": val_x.name},
outputs=layer_outputs,
**layer_attrs)
| python |
#!/usr/bin/env python3
# authors: RocaPiedra
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
from __future__ import print_function
import subprocess
import glob
import os
import sys
import random
import time
import numpy as np
import cv2
import pygame
from pygame.locals import KMOD_CTRL
from pygame.locals import K_ESCAPE
from pygame.locals import K_q
import numpy as np
# ==============================================================================
# -- Find CARLA module ---------------------------------------------------------
# ==============================================================================
try:
sys.path.append(glob.glob('../carla/dist/carla-*%d.%d-%s.egg' % (
sys.version_info.major,
sys.version_info.minor,
'win-amd64' if os.name == 'nt' else 'linux-x86_64'))[0])
except IndexError:
pass
# ==============================================================================
# -- Add PythonAPI for release mode --------------------------------------------
# ==============================================================================
try:
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + '/carla')
except IndexError:
pass
import carla
# Launch server
def serverLauncher():
bashCommand = "cd ../.. && CarlaUE4.exe"
process = subprocess.Popen(bashCommand.split(), stdout=subprocess.PIPE)
output, error = process.communicate() | python |
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import logging
from copy import deepcopy
from functools import partial
import pytest
import torch
from nncf.common.utils.logger import logger as nncf_logger
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.base_handler import SEHBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_depth import EDBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_kernel import EKBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elastic_width import EWBuilderStateNames
from nncf.experimental.torch.nas.bootstrapNAS.elasticity.elasticity_dim import ElasticityDim
from nncf.torch.model_creation import create_nncf_network
from tests.torch.helpers import BasicConvTestModel
from tests.torch.helpers import get_empty_config
from tests.torch.nas.creators import build_elastic_model_from_handler
from tests.torch.nas.descriptors import ElasticityDesc
from tests.torch.nas.helpers import do_conv2d
from tests.torch.nas.helpers import move_model_to_cuda_if_available
from tests.torch.nas.test_elastic_depth import BASIC_ELASTIC_DEPTH_PARAMS
from tests.torch.nas.test_elastic_depth import BasicTestSuperNet
from tests.torch.nas.test_elastic_depth import DepthBasicConvTestModel
from tests.torch.nas.test_elastic_kernel import BASIC_ELASTIC_KERNEL_PARAMS
from tests.torch.nas.test_elastic_width import BASIC_ELASTIC_WIDTH_PARAMS
from tests.torch.nas.test_elastic_width import TwoConvAddConvTestModel
from tests.torch.nas.test_elastic_width import TwoSequentialConvBNTestModel
@pytest.yield_fixture()
def _nncf_caplog(caplog):
nncf_logger.propagate = True
yield caplog
nncf_logger.propagate = False
def ref_width_output_fn(model, x):
return model.get_minimal_subnet_output_without_reorg(x)
COMMON_WIDTH_STATE_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoConvAddConvTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoConvAddConvTestModel/NNCFConv2d[conv1]/conv2d_0',
'TwoConvAddConvTestModel/NNCFConv2d[conv2]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
ElasticityDesc(
ElasticityDim.WIDTH,
model_cls=TwoSequentialConvBNTestModel,
params=BASIC_ELASTIC_WIDTH_PARAMS,
ref_state={
'elasticity_params': BASIC_ELASTIC_WIDTH_PARAMS,
'grouped_node_names_to_prune': [
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[0]/conv2d_0'],
['TwoSequentialConvBNTestModel/Sequential[all_layers]/NNCFConv2d[3]/conv2d_0']
]
},
ref_output_fn=ref_width_output_fn
),
]
def ref_kernel_output_fn(model, x):
conv = model.conv
ref_padding = 1
ref_weights = conv.weight[:, :, 1:4, 1:4]
return do_conv2d(conv, x, weight=ref_weights, padding=ref_padding)
COMMON_KERNEL_DESC = ElasticityDesc(
ElasticityDim.KERNEL,
model_cls=partial(BasicConvTestModel, 1, out_channels=1, kernel_size=5),
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_output_fn=ref_kernel_output_fn,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: ['BasicConvTestModel/NNCFConv2d[conv]/conv2d_0']
},
input_size=[1, 1, 5, 5]
)
COMMON_DEPTH_SUPERNET_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=BasicTestSuperNet,
params={
'mode': 'auto',
'min_block_size': 2
},
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 2,
'skipped_blocks': None
},
EDBuilderStateNames.SKIPPED_BLOCKS: [
{
'start_node_name': 'BasicTestSuperNet/NNCFConv2d[conv1]/conv2d_0',
'end_node_name': 'BasicTestSuperNet/__add___0'
}
],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: {0: [0]},
EDBuilderStateNames.OrdinalIds: [[1, 3]],
},
ref_search_space=[[0], []]
)
def ref_depth_output_fn(model, x):
model.set_skipped_layers(['conv1'])
return model(x)
COMMON_DEPTH_BASIC_DESC = ElasticityDesc(
ElasticityDim.DEPTH,
model_cls=DepthBasicConvTestModel,
params=BASIC_ELASTIC_DEPTH_PARAMS,
ref_output_fn=ref_depth_output_fn,
ref_search_space=[[0], []],
ref_state={
'elasticity_params': {
'allow_linear_combination': False,
'allow_nested_blocks': False,
'max_block_size': 50,
'min_block_size': 6,
'skipped_blocks': [['DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv0]/conv2d_0',
'DepthBasicConvTestModel/Sequential[branch_with_blocks]/NNCFConv2d[conv1]/conv2d_0']]
},
EDBuilderStateNames.SKIPPED_BLOCKS: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_state'],
EDBuilderStateNames.SKIPPED_BLOCKS_DEPENDENCIES: BASIC_ELASTIC_DEPTH_PARAMS['skipped_blocks_dependencies'],
EDBuilderStateNames.OrdinalIds: None,
}
)
LIST_STATE_AFTER_BUILD_DESCS = [
*COMMON_WIDTH_STATE_DESCS,
COMMON_DEPTH_SUPERNET_DESC,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_AFTER_BUILD_DESCS, ids=map(str, LIST_STATE_AFTER_BUILD_DESCS))
def test_can_get_builder_state_after_build(desc):
_, builder = desc.build_handler()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
ELASTIC_WIDTH_PARAMS_BB = {'filter_importance': 'L2', **BASIC_ELASTIC_WIDTH_PARAMS}
LIST_STATE_BEFORE_BUILD_DESCS = [
ElasticityDesc(
ElasticityDim.WIDTH,
params=ELASTIC_WIDTH_PARAMS_BB,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: ELASTIC_WIDTH_PARAMS_BB,
EWBuilderStateNames.GROUPED_NODE_NAMES_TO_PRUNE: []
}
),
ElasticityDesc(
ElasticityDim.KERNEL,
params=BASIC_ELASTIC_KERNEL_PARAMS,
ref_state={
SEHBuilderStateNames.ELASTICITY_PARAMS: BASIC_ELASTIC_KERNEL_PARAMS,
EKBuilderStateNames.NODE_NAMES_TO_MAKE_ELASTIC: []
}
),
COMMON_DEPTH_BASIC_DESC
]
@pytest.mark.parametrize('desc', LIST_STATE_BEFORE_BUILD_DESCS, ids=map(str, LIST_STATE_BEFORE_BUILD_DESCS))
class TestBeforeBuild:
def test_can_get_builder_state_before_build(self, desc: ElasticityDesc):
builder = desc.create_builder()
actual_state = builder.get_state()
assert actual_state == desc.ref_state
def test_output_warning_when_state_overrides_params(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder_with_config({})
old_state = old_builder.get_state()
new_params = desc.params
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
record = next(iter(_nncf_caplog.records))
assert record.levelno == logging.WARNING
def test_no_warning_when_state_and_params_are_the_same(self, desc: ElasticityDesc, _nncf_caplog):
old_builder = desc.create_builder()
old_state = old_builder.get_state()
new_params = desc.params.copy()
new_builder = desc.create_builder_with_config(new_params)
new_builder.load_state(old_state)
assert not _nncf_caplog.records
LIST_LOAD_STATE_DESCS = [
COMMON_DEPTH_BASIC_DESC,
*COMMON_WIDTH_STATE_DESCS,
COMMON_KERNEL_DESC
]
@pytest.mark.parametrize('desc', LIST_LOAD_STATE_DESCS, ids=map(str, LIST_LOAD_STATE_DESCS))
def test_can_load_handler_state(desc: ElasticityDesc):
model = desc.model_cls()
move_model_to_cuda_if_available(model)
model_copy = deepcopy(model)
device = next(iter(model.parameters())).device
dummy_input = torch.ones(model.INPUT_SIZE).to(device)
input_size = desc.input_size
if not input_size:
input_size = model.INPUT_SIZE
config = get_empty_config(input_sample_sizes=input_size)
old_nncf_network = create_nncf_network(model, config)
old_builder = desc.create_builder()
old_handler = old_builder.build(old_nncf_network)
elastic_model = build_elastic_model_from_handler(old_nncf_network, old_handler)
old_handler.activate_minimum_subnet()
old_output = elastic_model(dummy_input)
ref_output = desc.ref_output_fn(model, dummy_input)
assert torch.allclose(old_output, ref_output)
new_nncf_network = create_nncf_network(model_copy, config)
builder_state = old_builder.get_state()
# no need in config to restore builder state
new_builder = desc.create_builder_with_config({})
new_builder.load_state(builder_state)
new_handler = new_builder.build(new_nncf_network)
elastic_model = build_elastic_model_from_handler(new_nncf_network, new_handler)
new_handler.activate_minimum_subnet()
new_output = elastic_model(dummy_input)
assert torch.allclose(old_output, new_output)
| python |
# -*- coding: utf-8 -*-
"""CquenceR.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1iywElgfFio7e8VN1yZOemHm8hRV4IJHy
# Clone CquenceR and PatchBundle
"""
!git clone https://github.com/SecureThemAll/PatchBundle.git
!git clone https://github.com/SecureThemAll/CquenceR.git
"""# Install python 3.7"""
!apt-get install python3.7 python3.7-dev
"""# Initialize CquenceR
Version of OpenNMT needs no be under 2.0.0, or some dependencies will not work later, such as the OpenNMT's preprocess script and other inputs. Also, the python version check in the init script might fail, just comment it.
"""
! CquenceR/init.sh
"""# Install python 3.7 dependencies"""
!curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py
!python3.7 get-pip.py
!python3.7 -m pip install pandas
!python3.7 -m pip install sklearn
!python3.7 -m pip install python-Levenshtein
!python3.7 -m pip install PyGithub
!python3.7 -m pip install matplotlib
!python3.7 -m pip install ipykernel
# !python3.7 -m pip freeze > requirements.txt
"""# Create new dataset"""
# Commented out IPython magic to ensure Python compatibility.
# %cd PatchBundle/tool/
!echo 'asd' > token.txt
!python3.7 ./PatchBundle.py filter --datasets nvd secbench mozilla secretpatch msr20 -m -v
# %cd ../..
"""# Preprocess Dataset Only into Source and Target Sets for Stats"""
!python3.7 ./CquenceR/CquenceR.py preprocess -op /tmp/dataset --no_truncation --no_onmt -v
"""# Plot Stats for Dataset"""
!python3.7 CquenceR/CquenceR.py stats -v -sp /tmp/dataset --save /content/CquenceR/plots
"""# Clean Data"""
!python3.7 ./CquenceR/CquenceR.py clean -v
"""# Preprocess Dataset for Training and Testing """
!python3.7 ./CquenceR/CquenceR.py preprocess -s train_val_test -v
!python3.7 -m pip install --upgrade torchvision==0.6.0
# Commented out IPython magic to ensure Python compatibility.
# %cd CquenceR
!git pull
# %cd ..
!which onmt_preprocess
"""# Train with GPU and Plot results (train_plots is the output folder)
If you can not run with the gpu, update the torch. For that just uncomment the next cell and run it.
"""
#!python3.7 -m pip install torch==1.6.0+cu101 torchvision==0.7.0+cu101 -f https://download.pytorch.org/whl/torch_stable.html
!python3.7 CquenceR/CquenceR.py train -v --plot --gpu
"""# Test and Plot Results (test_plots is the output folder)"""
!python3.7 CquenceR/CquenceR.py test -v --plot
import torch
# setting device on GPU if available, else CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print('Using device:', device)
print()
#Additional Info when using cuda
if device.type == 'cuda':
print(torch.cuda.get_device_name(0))
print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_reserved(0)/1024**3,1), 'GB')
| python |
"""Data Analysis
================
"""
from os.path import exists
import nixio as nix
import numpy as np
import numpy.linalg
from typing import Dict, List, Tuple, Type, Union, Set, Any, Optional, Iterator
import pandas as pd
from collections import defaultdict
from kivy_garden.collider import Collide2DPoly, CollideEllipse
from kivy_garden.painter import PaintCircle, PaintEllipse, PaintPolygon, \
PaintFreeformPolygon, PaintPoint, PaintShape
from glitter2.storage.data_file import DataFile
__all__ = (
'default_value', 'not_cached', 'AnalysisFactory', 'AnalysisSpec',
'FileDataAnalysis', 'AnalysisChannel', 'TemporalAnalysisChannel',
'EventAnalysisChannel', 'PosAnalysisChannel', 'ZoneAnalysisChannel',
'get_variable_type_optional')
def _sort_dict(d: dict) -> List[tuple]:
return list(sorted(d.items(), key=lambda x: x[0]))
def _get_flat_types(type_hint: Type) -> Tuple[Type]:
if hasattr(type_hint, '__origin__') and type_hint.__origin__ is Union:
return type_hint.__args__
return type_hint,
def _filter_default(type_hint: Type) -> List[Type]:
types = _get_flat_types(type_hint)
return [t for t in types if t != DefaultType]
known_arg_types = {
int, float, str, List[int], List[float], List[str], type(None)}
known_ret_types = {
int, float, str, List[int], List[float], List[str], Tuple[int],
Tuple[float], Tuple[str]}
def is_type_unknown(known_types, query):
return set(query) - known_types
def get_variable_type_optional(type_hint: List[Type]) -> Tuple[Type, bool]:
if len(type_hint) == 1:
return type_hint[0], False
if type(None) not in type_hint:
raise ValueError('Expected to contain none type if more than one type')
type_hint.remove(type(None))
if len(type_hint) == 1:
return type_hint[0], True
raise ValueError('Expected only one type')
class default_value(int):
pass
DefaultType = Type[default_value]
DefaultFloat = Union[float, DefaultType]
DefaultStr = Union[str, DefaultType]
not_cached = object()
class AnalysisFactory:
analysis_classes: Set[Type['AnalysisChannel']] = set()
by_name: Dict[str, Type['AnalysisChannel']] = {}
@classmethod
def register_analysis_class(cls, analysis_class: Type['AnalysisChannel']):
cls.analysis_classes.add(analysis_class)
name = f'{analysis_class.__module__}\0{analysis_class.__qualname__}'
cls.by_name[name] = analysis_class
@classmethod
def get_class_from_method(
cls, method) -> Tuple[Type['AnalysisChannel'], str]:
mod = method.__module__
cls_name, method_name = method.__qualname__.rsplit('.', maxsplit=1)
name = f'{mod}\0{cls_name}'
if name not in cls.by_name:
raise ValueError(
f'Unrecognized class {cls_name} of method {method}')
return cls.by_name[name], method_name
@classmethod
def get_classes_from_type(
cls, analysis_type: str) -> List[Type['AnalysisChannel']]:
return [c for c in cls.analysis_classes
if c.analysis_type == analysis_type]
@classmethod
def get_variables(
cls, global_vars=True, local_vars=True
) -> Dict[
str,
Tuple[List[Type['AnalysisChannel']], str, Tuple[Type, bool], Any]]:
variables = {}
all_variables = {}
for c in cls.analysis_classes:
special_args = c.spec_get_special_arg_type()
for key, (doc, tp) in c.spec_get_compute_variables().items():
if key in all_variables:
doc_, tp_ = all_variables[key]
# we allow empty doc, in which case non-empty is used
if doc and doc_ and doc != doc_ or tp != tp_:
raise ValueError(
f'Variable "{key}" of class {c} was previously '
f'defined with type "{tp_}" and doc "{doc_}", but '
f'we now got type "{tp}" and doc "{doc}"')
if doc:
all_variables[key] = doc, tp
else:
all_variables[key] = doc, tp
is_global = c.spec_get_is_global_arg(key)
if is_global and global_vars or not is_global and local_vars:
if key not in variables:
special_arg = special_args.get(key, None)
variables[key] = [c], doc, tp, special_arg
else:
classes, doc_, tp_, special_arg = variables[key]
classes.append(c)
# just in case previously we had empty doc
if doc:
variables[key] = classes, doc, tp, special_arg
return variables
@classmethod
def _get_methods_from_type(
cls, analysis_type: str, creating_methods
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
methods = {}
for c in cls.analysis_classes:
if c.analysis_type != analysis_type:
continue
special_type = c.spec_get_channel_creating_methods()
for key, (doc, tp) in c.spec_get_compute_methods().items():
if creating_methods:
if key in special_type:
methods[key] = c, doc, tp
else:
if key not in special_type:
methods[key] = c, doc, tp
return methods
@classmethod
def get_channel_creating_methods_from_type(
cls, analysis_type: str
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
return cls._get_methods_from_type(analysis_type, True)
@classmethod
def get_compute_methods_from_type(
cls, analysis_type: str
) -> Dict[str, Tuple[Type['AnalysisChannel'], str, Type]]:
return cls._get_methods_from_type(analysis_type, False)
@classmethod
def get_channel_creating_method_spec(
cls, analysis_cls: Type['AnalysisChannel'], name: str
) -> Tuple[str, Type, str, Dict[str, Tuple[Tuple[Type, bool], str]]]:
create_type = analysis_cls.spec_get_channel_creating_methods()[name]
doc, ret_type = analysis_cls.spec_get_compute_methods()[name]
special_args = analysis_cls.spec_get_special_arg_type()
variables = {}
for var, (_, tp) in analysis_cls.spec_get_compute_method_args(
name).items():
variables[var] = tp, special_args.get(var, None)
return doc, ret_type, create_type, variables
@classmethod
def get_compute_method_spec(
cls, analysis_cls: Type['AnalysisChannel'], name: str
) -> Tuple[str, Type, Dict[str, Tuple[Tuple[Type, bool], str]]]:
doc, ret_type = analysis_cls.spec_get_compute_methods()[name]
special_args = analysis_cls.spec_get_special_arg_type()
variables = {}
for var, (_, tp) in analysis_cls.spec_get_compute_method_args(
name).items():
variables[var] = tp, special_args.get(var, None)
return doc, ret_type, variables
class AnalysisSpec:
_default_args: Dict[Type['AnalysisChannel'], Dict[str, Any]] = {}
_new_channels: List[
Tuple[str, str, Type['AnalysisChannel'], str, tuple, dict]] = []
_computations: List[
Tuple[Optional[List[str]], str, Type['AnalysisChannel'], str, tuple,
dict]] = []
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._default_args = defaultdict(dict)
self._new_channels = []
self._computations = []
def add_arg_default(
self, cls: Type['AnalysisChannel'], name: str, value: Any):
self._default_args[cls][name] = value
def add_new_channel_computation(
self, channel: str, new_channel_name: str, compute_method,
*args, **kwargs):
cls, method_name = AnalysisFactory.get_class_from_method(
compute_method)
self._new_channels.append(
(channel, new_channel_name, cls, method_name, args, kwargs))
def add_computation(
self, channels: List[str], compute_method, *args,
compute_key: str = '', **kwargs):
cls, method_name = AnalysisFactory.get_class_from_method(
compute_method)
self._computations.append(
(channels, compute_key, cls, method_name, args, kwargs))
def compute_create_channels(self, analysis_object: 'FileDataAnalysis'):
default_args = self._default_args
cls_cache = {}
for channel, new_name, cls, method_name, args, kwargs in \
self._new_channels:
cache_key = cls, channel
if cache_key not in cls_cache:
obj = cls_cache[cache_key] = cls(
name=channel, analysis_object=analysis_object)
for name, value in default_args.get(cls, {}).items():
setattr(obj, name, value)
analysis_channel = cls_cache[cache_key]
brief_name = method_name
if brief_name.startswith('compute_'):
brief_name = brief_name[8:]
# get the type of channel created
create_map = \
analysis_channel.spec_get_channel_creating_methods()
ret_type = create_map[brief_name]
f = getattr(analysis_channel, method_name)
res = f(*args, **kwargs)
# add the channel to the data analysis object
add = getattr(analysis_object, f'add_{ret_type}_channel')
add(new_name, *res)
def compute(self, analysis_object: 'FileDataAnalysis') -> list:
output = []
default_args = self._default_args
cls_cache = {}
for channels, compute_key, cls, method_name, args, kwargs in \
self._computations:
if not channels:
if cls.analysis_type == 'event':
channels = analysis_object.event_channels_data.keys()
elif cls.analysis_type == 'pos':
channels = analysis_object.pos_channels_data.keys()
elif cls.analysis_type == 'zone':
channels = analysis_object.zone_channels_shapes.keys()
for channel in channels:
cache_key = cls, channel
if cache_key not in cls_cache:
obj = cls_cache[cache_key] = cls(
name=channel, analysis_object=analysis_object)
for name, value in default_args.get(cls, {}).items():
setattr(obj, name, value)
analysis_channel = cls_cache[cache_key]
brief_name = method_name
if brief_name.startswith('compute_'):
brief_name = brief_name[8:]
f = getattr(analysis_channel, method_name)
res = f(*args, **kwargs)
output.append(
(analysis_channel.analysis_type, channel, brief_name,
compute_key, res))
return output
def clear_arg_defaults(self):
self._default_args = defaultdict(dict)
def clear_new_channel_computation(self):
self._new_channels = []
def clear_computation(self):
self._computations = []
class FileDataAnalysis:
filename: str = ''
data_file: DataFile = None
nix_file: Optional[nix.File] = None
metadata: Dict = {}
video_metadata: Dict = {}
timestamps: np.ndarray = None
event_channels_data: Dict[str, Optional[np.ndarray]] = {}
pos_channels_data: Dict[str, Optional[np.ndarray]] = {}
zone_channels_shapes: Dict[str, Optional[PaintShape]] = {}
channels_metadata: Dict[str, dict] = {}
normalized_names_map: Dict[str, str] = {}
missed_timestamps = False
missing_timestamp_values = []
pixels_per_meter = 0
def __init__(self, filename, **kwargs):
super(FileDataAnalysis, self).__init__(**kwargs)
self.filename = filename
self.event_channels_data = {}
self.pos_channels_data = {}
self.zone_channels_shapes = {}
self.channels_metadata = {}
self.normalized_names_map = {}
def flatten_data(self, data_arrays) -> np.ndarray:
ordered_indices = self.data_file.timestamp_intervals_ordered_keys
if len(data_arrays) > 1:
data = [data_arrays[i] for i in ordered_indices]
return np.concatenate(data)
else:
return np.array(data_arrays[0])
def __enter__(self):
self.open_data_file()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close_data_file()
def open_data_file(self):
self.nix_file = nix.File.open(self.filename, nix.FileMode.ReadOnly)
self.data_file = DataFile(nix_file=self.nix_file)
def load_file_metadata(self, channels: Set[str] = None):
data_file = self.data_file
data_file.open_file()
self.video_metadata = data_file.video_metadata_dict
self.metadata = metadata = {}
metadata['saw_all_timestamps'] = data_file.saw_all_timestamps
metadata['glitter2_version'] = data_file.glitter2_version
metadata['ffpyplayer_version'] = data_file.ffpyplayer_version
metadata['pixels_per_meter'] = data_file.pixels_per_meter
self.pixels_per_meter = data_file.pixels_per_meter
self.missed_timestamps = not data_file.saw_all_timestamps
if self.missed_timestamps:
data_arrays_order = data_file.timestamp_intervals_ordered_keys
data = [data_file.timestamps_arrays[i] for i in data_arrays_order]
if not data:
raise ValueError('No data found in the file')
missing = [float(item[-1]) for item in data[:-1]]
if not data_file._saw_first_timestamp:
missing.insert(0, float(data[0][0]))
if not data_file._saw_last_timestamp:
missing.append(float(data[-1][-1]))
self.missing_timestamp_values = missing
else:
self.missing_timestamp_values = []
metadata = self.channels_metadata
normalized_names_map = self.normalized_names_map
for channels_data, src_channels in (
(self.event_channels_data, data_file.event_channels),
(self.pos_channels_data, data_file.pos_channels),
(self.zone_channels_shapes, data_file.zone_channels)):
for _, channel in _sort_dict(src_channels):
m = channel.channel_config_dict
name = m['name']
if channels and name not in channels:
continue
normalized_names_map[name.lower()] = name
metadata[name] = m
channels_data[name] = None
def load_file_data(self, channels: Set[str] = None):
self.load_file_metadata(channels)
data_file = self.data_file
self.timestamps = self.flatten_data(data_file.timestamps_arrays)
zone_channels_shapes = self.zone_channels_shapes
shape_cls_map = {
'PaintCircle': PaintCircle, 'PaintEllipse': PaintEllipse,
'PaintPolygon': PaintPolygon,
'PaintFreeformPolygon': PaintFreeformPolygon,
'PaintPoint': PaintPoint
}
for channels_data, src_channels in (
(self.event_channels_data, data_file.event_channels),
(self.pos_channels_data, data_file.pos_channels),
(None, data_file.zone_channels)):
for _, channel in _sort_dict(src_channels):
m = channel.channel_config_dict
name = m['name']
if channels and name not in channels:
continue
if channels_data is None:
state = m['shape_config']
cls = shape_cls_map[state['cls']]
shape = cls.create_shape_from_state(state)
zone_channels_shapes[name] = shape
else:
channels_data[name] = self.flatten_data(
channel.data_arrays)
def close_data_file(self):
if self.nix_file is None:
return
self.nix_file.close()
self.nix_file = None
def compute_data_summary(self, spec: AnalysisSpec) -> list:
# export_computed_statistics provides the header
rows = []
filename = self.filename
video_head = self.video_metadata['filename_head']
video_tail = self.video_metadata['filename_tail']
missed_timestamps = self.missed_timestamps
row = [filename, video_head, video_tail, missed_timestamps]
# first create all new data channels
spec.compute_create_channels(self)
# now compute any stats
for stat in spec.compute(self):
rows.append(row + list(stat))
return rows
@staticmethod
def export_computed_data_summary(filename: str, data: list):
"""Adds .xlsx to the name.
:param filename:
:param data:
:return:
"""
if not filename.endswith('.xlsx'):
filename += '.xlsx'
if exists(filename):
raise ValueError('"{}" already exists'.format(filename))
excel_writer = pd.ExcelWriter(filename, engine='xlsxwriter')
header = [
'data file', 'video path', 'video filename', 'missed timestamps',
'channel_type', 'channel', 'measure', 'measure_key', 'value']
df = pd.DataFrame(data, columns=header)
df.to_excel(excel_writer, sheet_name='statistics', index=False)
excel_writer.save()
def export_raw_data_to_excel(self, filename, dump_zone_collider=False):
if not filename.endswith('.xlsx'):
filename += '.xlsx'
if exists(filename):
raise ValueError('"{}" already exists'.format(filename))
excel_writer = pd.ExcelWriter(filename, engine='xlsxwriter')
if self.missed_timestamps:
# if we have timestamp discontinuities, indicate it
data = [
'Not all video frames were watched - timestamps are missing']
if self.missing_timestamp_values:
data.append('timestamps around where frames are missing:')
data.extend(self.missing_timestamp_values)
df = pd.DataFrame(data)
df.to_excel(
excel_writer, sheet_name='missing_timestamps', index=False)
file_metadata = dict(self.metadata)
file_metadata.update(self.video_metadata)
file_metadata = _sort_dict(file_metadata)
df = pd.DataFrame(file_metadata, columns=['Property', 'Value'])
df.to_excel(excel_writer, sheet_name='file_metadata', index=False)
# add sheet for all the channels metadata
metadata = []
channels_metadata = self.channels_metadata
for channel_name in self.event_channels_data:
metadata.append(('event_channel', channel_name))
metadata.extend(_sort_dict(channels_metadata[channel_name]))
for channel_name in self.pos_channels_data:
metadata.append(('pos_channel', channel_name))
metadata.extend(_sort_dict(channels_metadata[channel_name]))
for channel_name in self.zone_channels_shapes:
metadata.append(('zone_channel', channel_name))
# shape info is saved in the zone channels sheet
d = dict(channels_metadata[channel_name])
d.pop('shape_config', None)
metadata.extend(_sort_dict(d))
df = pd.DataFrame(metadata, columns=['Property', 'Value'])
df.to_excel(excel_writer, sheet_name='channels_metadata', index=False)
# add timestamps
df = pd.DataFrame(self.timestamps, columns=['timestamp'])
df.to_excel(excel_writer, sheet_name='timestamps', index=False)
# add event channels data
columns_header = []
columns = []
for channel_name, data in self.event_channels_data.items():
columns_header.append(channel_name)
columns.append(data)
df = pd.DataFrame(columns).T
df.columns = columns_header
df.to_excel(excel_writer, sheet_name='event_channels', index=False)
# add pos channels data
colliders = {}
if dump_zone_collider:
for channel_name, shape in self.zone_channels_shapes.items():
colliders[channel_name] = \
ZoneAnalysisChannel.collider_from_shape(shape)
columns_header = []
columns = []
for channel_name, data in self.pos_channels_data.items():
columns_header.append(f'{channel_name}:x')
columns_header.append(f'{channel_name}:y')
columns.append(data[:, 0])
columns.append(data[:, 1])
for zone_name, collider in colliders.items():
valid_points = data[:, 0] != -1
columns_header.append(f'{channel_name}:--:{zone_name}')
valid_points[valid_points] = collider.collide_points(
data[valid_points, :].tolist())
columns.append(valid_points)
df = pd.DataFrame(columns).T
df.columns = columns_header
df.to_excel(excel_writer, sheet_name='pos_channels', index=False)
# add zone channels metadata
shape_config = []
for channel_name in self.zone_channels_shapes:
shape_config.append(('zone_channel', channel_name))
# only save shape info
d = channels_metadata[channel_name].get('shape_config', {})
shape_config.extend(_sort_dict(d))
df = pd.DataFrame(shape_config, columns=['Property', 'Value'])
df.to_excel(excel_writer, sheet_name='zone_channels', index=False)
excel_writer.save()
def add_event_channel(self, name: str, data: np.ndarray, metadata: dict):
if name in self.channels_metadata:
raise ValueError(f'name "{name}" already exists as a channel')
d = {'name': name}
d.update(metadata)
self.channels_metadata[name] = d
self.event_channels_data[name] = data
self.normalized_names_map[name.lower()] = name
def add_pos_channel(self, name: str, data: np.ndarray, metadata: dict):
if name in self.channels_metadata:
raise ValueError(f'name "{name}" already exists as a channel')
d = {'name': name}
d.update(metadata)
self.channels_metadata[name] = d
self.pos_channels_data[name] = data
self.normalized_names_map[name.lower()] = name
def add_zone_channel(self, name: str, shape: PaintShape, metadata: dict):
if name in self.channels_metadata:
raise ValueError(f'name "{name}" already exists as a channel')
d = {'name': name, 'shape_config': shape.get_state()}
d.update(metadata)
self.channels_metadata[name] = d
self.zone_channels_shapes[name] = shape
self.normalized_names_map[name.lower()] = name
def normalized_name(self, name):
normalized_name = name.lower()
names = self.normalized_names_map
if normalized_name not in names:
raise KeyError(f'No channel named "{name}"')
return names[normalized_name]
class AnalysisChannel:
"""compute_variables and compute_methods are per-class."""
analysis_type: str = ''
analysis_object: FileDataAnalysis = None
name: str = ''
metadata: Dict = {}
_compute_variables_: Dict[str, str] = {}
"""Dict of variables names to their brief docs shown to the user.
"""
_compute_variables_cache: Dict[str, Tuple[str, Tuple[Type, bool]]] = {}
_compute_methods_: Dict[str, str] = {}
"""Dict of compute method names to their brief docs shown to the user.
The keys must exist as methods prefixed with ``compute_``.
"""
_compute_methods_cache: Dict[str, Tuple[str, Type]] = {}
_channel_creating_methods_: Dict[str, str] = {}
"""Dict for each method that returns a new channel, mapping to the type
of channel created.
"""
_special_arg_type_: Dict[str, str] = {}
"""Dict for each arg that accepts a special type, indicating what the arg
means. E.g. whether it's a event channel name etc.
"""
_compute_method_args_cache: Dict[
str, Dict[str, Tuple[str, Tuple[Type, bool]]]] = {}
def __init__(self, name: str, analysis_object: FileDataAnalysis, **kwargs):
self.analysis_object = analysis_object
self.name = name
self.metadata = analysis_object.channels_metadata[
analysis_object.normalized_name(name)]
def normalized_name(self, name):
return self.analysis_object.normalized_name(name)
@classmethod
def spec_get_compute_variables(
cls) -> Dict[str, Tuple[str, Tuple[Type, bool]]]:
if cls.__dict__.get('_compute_variables_cache', None) is not None:
return cls._compute_variables_cache
cls._compute_variables_cache = variables = {}
if '_compute_variables_' not in cls.__dict__:
return variables
annotations = cls.__annotations__
for name, value in cls._compute_variables_.items():
if name not in annotations:
raise ValueError(
f'No type annotation found for variable {name} of {cls}')
annotated_type = _filter_default(annotations[name])
unknown = is_type_unknown(known_arg_types, annotated_type)
special_arg_type = cls.spec_get_special_arg_type()
if name not in special_arg_type and unknown:
raise ValueError(
f'Type {unknown} for {name} of {cls} is not recognized')
variables[name] = value, get_variable_type_optional(annotated_type)
return variables
@classmethod
def spec_get_compute_methods(cls) -> Dict[str, Tuple[str, Type]]:
if cls.__dict__.get('_compute_methods_cache', None) is not None:
return cls._compute_methods_cache
cls._compute_methods_cache = methods = {}
if '_compute_methods_' not in cls.__dict__:
return methods
for name, value in cls._compute_methods_.items():
annotations = getattr(
getattr(cls, f'compute_{name}'), '__annotations__', {})
if 'return' not in annotations:
raise ValueError(
f'No return type annotation found for {name} of {cls}')
annotated_type = _filter_default(annotations['return'])
unknown = is_type_unknown(known_ret_types, annotated_type)
channel_methods = cls.spec_get_channel_creating_methods()
# if it doesn't create a channel and we don't recognize the type...
if name not in channel_methods and unknown:
raise ValueError(
f'Return type {unknown} for {name} of {cls} is not '
f'a understood type')
methods[name] = value, annotated_type
return methods
@classmethod
def spec_get_is_global_arg(cls, name: str) -> bool:
"""Returns whether the argument is a global argument for all methods
of the class (i.e. it was defined as a class variable), or it is
method specific with no global default value.
"""
return name in cls.__dict__
@classmethod
def spec_get_channel_creating_methods(cls) -> Dict[str, str]:
if '_channel_creating_methods_' not in cls.__dict__:
return {}
return cls._channel_creating_methods_
@classmethod
def spec_get_special_arg_type(cls) -> Dict[str, str]:
if '_special_arg_type_' not in cls.__dict__:
return {}
return cls._special_arg_type_
@classmethod
def spec_get_compute_method_args(
cls, name) -> Dict[str, Tuple[str, Tuple[Type, bool]]]:
if '_compute_method_args_cache' not in cls.__dict__:
cls._compute_method_args_cache = {}
cache = cls._compute_method_args_cache
if name not in cache:
variables = cache[name] = {}
known_variables = cls.spec_get_compute_variables()
f = getattr(cls, f'compute_{name}')
annotations = getattr(f, '__annotations__', {})
for var_name, var_type in annotations.items():
if var_name in {'return', 'self'}:
continue
if var_name not in known_variables:
raise ValueError(
f'Variable {var_name} of method {name} is not '
f'documented in the _compute_variables_ dictionary')
doc, (var_type_, optional_) = known_variables[var_name]
var_type, optional = get_variable_type_optional(
_filter_default(var_type))
if var_type != var_type_:
raise ValueError(
f'Variable {var_name} of method {name} was documented '
f'as both {var_type} and {var_type_}')
variables[var_name] = doc, (var_type, optional)
return cache[name]
def get_args(self, **kwargs) -> list:
res = []
for name, value in kwargs.items():
if value is not default_value:
res.append(value)
else:
res.append(getattr(self, name, None))
return res
def get_cache(self, prop: str, **kwargs) -> Tuple:
args = tuple(self.get_args(**kwargs))
prop_val = getattr(self, prop)
if prop_val is not None and prop_val[1] == args:
return prop_val[0], args
return not_cached, args
def get_cache_these_args(self, prop: str, **kwargs) -> Any:
args = tuple(kwargs.values())
prop_val = getattr(self, prop)
if prop_val is not None and prop_val[1] == args:
return prop_val[0]
return not_cached
class TemporalAnalysisChannel(AnalysisChannel):
data: np.ndarray = None
timestamps: np.ndarray = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.timestamps = self.analysis_object.timestamps
norm = self.analysis_object.normalized_name
self.data = getattr(
self.analysis_object,
f'{self.analysis_type}_channels_data')[norm(self.name)]
@staticmethod
def _get_active_intervals(
data: np.ndarray, timestamps: np.ndarray,
start: Optional[float] = None,
end: Optional[float] = None
) -> Dict[str, np.ndarray]:
s = 0
if start is not None:
s = np.searchsorted(timestamps, start, side='left')
e = timestamps.shape[0]
if end is not None:
e = np.searchsorted(data, end, side='right')
data = data[s:e]
timestamps = timestamps[s:e]
if data.shape[0] <= 1:
intervals = np.empty((0, 2))
indices = np.arange(0)
return {'intervals': intervals, 'timestamps': timestamps,
'mask': data, 'indices': indices, 'start': s, 'end': e}
arange = np.arange(data.shape[0])
signed_data = data.astype(np.int8)
diff = signed_data[1:] - signed_data[:-1]
pos_diff = diff == 1
starts = timestamps[1:][pos_diff]
starts_indices = arange[1:][pos_diff]
neg_diff = diff == -1
ends = timestamps[1:][neg_diff]
ends_indices = arange[1:][neg_diff]
# de we need the first index as the start (if array starts with 1)
# # of intervals is same as number of start positions
n = starts.shape[0]
if data[0] == 1:
n += 1
intervals = np.empty((n, 2))
indices = np.empty((n, 2), dtype=arange.dtype)
# interval starts at zero
if data[0] == 1:
intervals[1:, 0] = starts
intervals[0, 0] = timestamps[0]
indices[1:, 0] = starts_indices
indices[0, 0] = 0
else:
intervals[:, 0] = starts
indices[:, 0] = starts_indices
if data[-1] == 1:
intervals[:-1, 1] = ends
intervals[-1, 1] = timestamps[-1]
indices[:-1, 1] = ends_indices
indices[-1, 1] = arange[-1]
else:
intervals[:, 1] = ends
indices[:, 1] = ends_indices
return {'intervals': intervals, 'timestamps': timestamps,
'mask': data, 'indices': indices, 'start': s, 'end': e}
@staticmethod
def _compute_active_duration(intervals: np.ndarray) -> float:
return np.sum(
intervals[:, 1] - intervals[:, 0]) if intervals.shape[0] else 0.
@staticmethod
def _compute_delay_to_first(
timestamps: np.ndarray, intervals: np.ndarray) -> float:
return intervals[0, 0] - timestamps[0] if intervals.shape[0] else -1.
@staticmethod
def _compute_scored_duration(timestamps: np.ndarray) -> float:
return timestamps[-1] - timestamps[0] if timestamps.shape[0] else 0.
@staticmethod
def _compute_event_count(intervals: np.ndarray) -> int:
return intervals.shape[0]
class EventAnalysisChannel(TemporalAnalysisChannel):
analysis_type: str = 'event'
_active_duration: Tuple[float, Tuple] = None
_delay_to_first: Tuple[float, Tuple] = None
_scored_duration: Tuple[float, Tuple] = None
_event_count: Tuple[int, Tuple] = None
_active_interval: Tuple[Dict[str, np.ndarray], Tuple] = None
start: Optional[float] = None
end: Optional[float] = None
event_channels: List[str]
_compute_variables_: Dict[str, str] = {
'start': '',
'end': '',
'event_channels': '',
}
_compute_methods_: Dict[str, str] = {
'active_duration':
'The total duration, in seconds, that the event was ON/active',
'delay_to_first':
'The delay, relative to the start of the video, of the first '
'occurrence of the event',
'scored_duration':
'The duration of the video or the section that was analyzed, if '
'only a interval of the data is exported',
'event_count': 'The number of times the event occurred',
'combine_events_and':
'Creates a new event channel from the listed event channels, '
'where the new channel is active if "all" of the listed channels '
'are active',
'combine_events_or':
'Creates a new event channel from the listed event channels, '
'where the new channel is active if "any" of the listed channels '
'are active',
'event_intervals':
'The list of timestamps of the start and end of each active '
'interval. Given as [s1, e1, s2, e2, ...], where s and e indicate '
'the start and end timestamps of the intervals, if any',
}
_channel_creating_methods_: Dict[str, str] = {
'combine_events_and': 'event',
'combine_events_or': 'event',
}
_special_arg_type_: Dict[str, str] = {'event_channels': 'event'}
def get_active_intervals(
self, start: Optional[float] = None,
end: Optional[float] = None) -> Dict[str, np.ndarray]:
val = self.get_cache_these_args(
'_active_interval', start=start, end=end)
if val is not not_cached:
return val
intervals = self._get_active_intervals(
self.data, self.timestamps, start=start, end=end)
self._active_interval = intervals, (start, end)
return intervals
def compute_active_duration(
self, start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (start, end) = self.get_cache(
'_active_duration', start=start, end=end)
if val is not not_cached:
return val
intervals = self.get_active_intervals(start, end)['intervals']
val = self._compute_active_duration(intervals)
self._active_duration = val, (start, end)
return val
def compute_delay_to_first(
self, start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (start, end) = self.get_cache(
'_delay_to_first', start=start, end=end)
if val is not not_cached:
return val
active_intervals = self.get_active_intervals(start, end)
val = self._compute_delay_to_first(
active_intervals['timestamps'], active_intervals['intervals'])
self._delay_to_first = val, (start, end)
return val
def compute_scored_duration(
self, start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (start, end) = self.get_cache(
'_scored_duration', start=start, end=end)
if val is not not_cached:
return val
timestamps = self.get_active_intervals(start, end)['timestamps']
val = self._compute_scored_duration(timestamps)
self._scored_duration = val, (start, end)
return val
def compute_event_count(
self, start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> int:
val, (start, end) = self.get_cache(
'_event_count', start=start, end=end)
if val is not not_cached:
return val
intervals = self.get_active_intervals(start, end)['intervals']
val = self._compute_event_count(intervals)
self._event_count = val, (start, end)
return val
def compute_event_intervals(
self, start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> List[float]:
start, end = self.get_args(start=start, end=end)
intervals = self.get_active_intervals(start, end)['intervals']
items = np.reshape(intervals, intervals.shape[0] * 2)
return items.tolist()
def compute_combine_events_and(
self, event_channels: List[str]) -> Tuple[np.ndarray, dict]:
channels_data = self.analysis_object.event_channels_data
norm = self.analysis_object.normalized_name
arr = [channels_data[norm(name)] for name in event_channels]
arr.append(self.data)
return np.logical_and.reduce(arr, axis=0), {}
def compute_combine_events_or(
self, event_channels: List[str]) -> Tuple[np.ndarray, dict]:
channels_data = self.analysis_object.event_channels_data
norm = self.analysis_object.normalized_name
arr = [channels_data[norm(name)] for name in event_channels]
arr.append(self.data)
return np.logical_or.reduce(arr, axis=0), {}
class PosAnalysisChannel(TemporalAnalysisChannel):
analysis_type: str = 'pos'
_mean_center_distance: Tuple[float, Tuple] = None
_distance_traveled: Tuple[float, Tuple] = None
_mean_speed: Tuple[float, Tuple] = None
_active_interval: Tuple[Dict[str, np.ndarray], Tuple] = None
_colliders: Dict[str, Union[Collide2DPoly, CollideEllipse]]
start: Optional[float] = None
end: Optional[float] = None
event_channel: Optional[str]
event_channels: List[str]
zone_channel: Optional[str]
zone_channels: List[str]
_compute_variables_: Dict[str, str] = {
'start': 'The start time in video time, or nothing to start from '
'the beginning of the video',
'end': 'The end time in video time, or nothing to end at '
'the end of the video',
'event_channel': 'The event channel to use',
'event_channels': 'The listed event channels to use',
'zone_channel': 'The zone channel to use',
'zone_channels': 'The listed zone channels to use',
}
_compute_methods_: Dict[str, str] = {
'event_from_pos':
'Creates a new event channel from the pos channel, where '
'the new channel is active for time "t" if the channel was coded '
'with a position for time t',
'pos_in_any_zone':
'Creates a new event channel where the new channel is active for '
'time "t" if the position is in any of the listed zones for '
'time "t"',
'mean_center_distance':
'The mean distance of the channel to the named zone, while the '
'event channel is active, if an event channel was selected',
'distance_traveled':
'The total distance the channel traveled in pixels while the '
'event channel is active, if an event channel was selected',
'mean_speed':
'The mean speed of the channel in pixels per second while the '
'event channel is active, if an event channel was selected',
}
_channel_creating_methods_: Dict[str, str] = {
'event_from_pos': 'event', 'pos_in_any_zone': 'event'}
_special_arg_type_: Dict[str, str] = {
'event_channel': 'event', 'event_channels': 'event',
'zone_channel': 'zone', 'zone_channels': 'zone'}
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._colliders = {}
def get_collider(
self, zone_name: str) -> Union[Collide2DPoly, CollideEllipse]:
if zone_name not in self._colliders:
norm = self.analysis_object.normalized_name
shape = self.analysis_object.zone_channels_shapes[norm(zone_name)]
self._colliders[zone_name] = \
ZoneAnalysisChannel.collider_from_shape(shape)
return self._colliders[zone_name]
def get_active_intervals(
self, event_channel: Optional[str] = None,
start: Optional[float] = None,
end: Optional[float] = None) -> Dict[str, np.ndarray]:
val = self.get_cache_these_args(
'_active_interval', event_channel=event_channel, start=start,
end=end)
if val is not not_cached:
return val
norm = self.analysis_object.normalized_name
data = self.data[:, 0] != -1
if event_channel:
data = np.logical_and(
data,
self.analysis_object.event_channels_data[norm(event_channel)])
intervals = self._get_active_intervals(
data, self.timestamps, start=start, end=end)
self._active_interval = intervals, (start, end)
return intervals
def compute_event_from_pos(
self, event_channels: List[str]) -> Tuple[np.ndarray, dict]:
norm = self.analysis_object.normalized_name
channels_data = self.analysis_object.event_channels_data
arr = [channels_data[norm(name)] for name in event_channels]
arr.append(self.data[:, 0] != -1)
return np.logical_or.reduce(arr, axis=0), {}
def compute_pos_in_any_zone(
self, zone_channels: List[str]) -> Tuple[np.ndarray, dict]:
arr = []
valid_points = self.data[:, 0] != -1
points = self.data[valid_points, :].tolist()
for zone in zone_channels:
collider = self.get_collider(zone)
arr.append(collider.collide_points(points))
valid_points[valid_points] = np.logical_or.reduce(arr, axis=0)
return valid_points, {}
def compute_mean_center_distance(
self, zone_channel: DefaultStr,
event_channel: Optional[DefaultStr] = default_value,
start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (zone_channel, event_channel, start, end) = self.get_cache(
'_mean_center_distance', zone_channel=zone_channel,
event_channel=event_channel, start=start, end=end)
if val is not not_cached:
return val
intervals = self.get_active_intervals(event_channel, start, end)
collider = self.get_collider(zone_channel)
data = self.data[intervals['start']:intervals['end'], :]
data = data[intervals['mask'], :] - collider.get_centroid()
val = float(np.mean(numpy.linalg.norm(data, axis=1)))
self._mean_center_distance = val, (
zone_channel, event_channel, start, end)
return val
def compute_distance_traveled(
self, event_channel: Optional[DefaultStr] = default_value,
start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (event_channel, start, end) = self.get_cache(
'_distance_traveled', event_channel=event_channel, start=start,
end=end)
if val is not not_cached:
return val
intervals = self.get_active_intervals(event_channel, start, end)
indices = intervals['indices']
data = self.data[intervals['start']:intervals['end'], :]
val = 0
for s, e in indices:
val += np.sum(
np.linalg.norm(data[s + 1:e + 1, :] - data[s:e, :], axis=1))
val = float(val)
self._distance_traveled = val, (event_channel, start, end)
return val
def compute_mean_speed(
self, event_channel: Optional[DefaultStr] = default_value,
start: Optional[DefaultFloat] = default_value,
end: Optional[DefaultFloat] = default_value) -> float:
val, (event_channel, start, end) = self.get_cache(
'_mean_speed', event_channel=event_channel, start=start,
end=end)
if val is not not_cached:
return val
intervals = self.get_active_intervals(event_channel, start, end)
indices = intervals['indices']
interval_times = intervals['intervals']
data = self.data[intervals['start']:intervals['end'], :]
dist = 0
for s, e in indices:
dist += np.sum(
np.linalg.norm(data[s + 1:e + 1, :] - data[s:e, :], axis=1))
dt = np.sum(interval_times[:, 1] - interval_times[:, 0])
val = 0.
if dt:
val = float(dist / dt)
self._mean_speed = val, (event_channel, start, end)
return val
class ZoneAnalysisChannel(AnalysisChannel):
analysis_type: str = 'zone'
shape: PaintShape = None
_collider = None
_compute_methods_: Dict[str, str] = {
'area': 'The area of the zone in pixels',
'centroid': 'The centroid of the zone in pixels',
}
def __init__(self, **kwargs):
super().__init__(**kwargs)
norm = self.analysis_object.normalized_name
self.shape = self.analysis_object.zone_channels_shapes[norm(self.name)]
@staticmethod
def collider_from_shape(
shape: PaintShape) -> Union[Collide2DPoly, CollideEllipse]:
if isinstance(shape, PaintPolygon):
return Collide2DPoly(points=shape.points, cache=True)
elif isinstance(shape, PaintCircle):
x, y = shape.center
r = shape.radius
return CollideEllipse(x=x, y=y, rx=r, ry=r)
elif isinstance(shape, PaintEllipse):
x, y = shape.center
rx, ry = shape.radius_x, shape.radius_y
return CollideEllipse(
x=x, y=y, rx=rx, ry=ry, angle=shape.angle)
elif isinstance(shape, PaintPoint):
x, y = shape.position
return CollideEllipse(x=x, y=y, rx=1, ry=1)
else:
assert False
@property
def collider(self):
collider = self._collider
if collider is not None:
return collider
self._collider = self.collider_from_shape(self.shape)
return self._collider
def compute_area(self) -> float:
return self.collider.get_area()
def compute_centroid(self) -> Tuple[float]:
return self.collider.get_centroid()
AnalysisFactory.register_analysis_class(EventAnalysisChannel)
AnalysisFactory.register_analysis_class(PosAnalysisChannel)
AnalysisFactory.register_analysis_class(ZoneAnalysisChannel)
| python |
#!../env/bin/python
from db_models import db, ColorScheme
# define columns
columns = ['ColorSchemeName', 'NumCategories', 'CriticalValue' ,'CategoryNumber', 'RedValue', 'GreenValue', 'BlueValue', 'SchemeType']
# open file
f = open('../assets/colorbrewer.csv','r')
# generate inserts for each line
for r in f.readlines():
datadict = dict(zip(columns,r.strip().split(',')))
# insert color info
color = ColorScheme(datadict['ColorSchemeName'],
int(datadict['NumCategories']) if datadict['NumCategories'] else None,
float(datadict['CriticalValue']) if datadict['CriticalValue'] else None,
int(datadict['CategoryNumber']) if datadict['CategoryNumber'] else None,
int(datadict['RedValue']) if datadict['RedValue'] else None,
int(datadict['GreenValue']) if datadict['GreenValue'] else None,
int(datadict['BlueValue']) if datadict['BlueValue'] else None,
datadict['SchemeType'])
db.session.add(color)
db.session.commit()
# close file
f.close()
| python |
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import ode
def f(phi, current_concentrations):
# use simpler variable names
s1 = current_concentrations[0]
s2 = current_concentrations[1]
v0 = 5.0
k1 = 3.0
k2 = 2.0
change_in_s1 = v0 - k1 * s1
change_in_s2 = k1 * s1 - k2*s2
return [change_in_s1, change_in_s2]
initial_concentrations = [1.0, 0.0]
solver = ode(f).set_integrator('dopri5') # Runge-Kutta, equiv. to ode45() in MATLAB
solver.set_initial_value(initial_concentrations)
timestep = 0.01
number_of_timepoints = int(1/timestep)
timepoints = np.linspace(0, 5, number_of_timepoints)
s1 = np.zeros(number_of_timepoints)
s2 = np.zeros(number_of_timepoints)
for i in range(number_of_timepoints):
current_concentrations = solver.integrate(timepoints[i])
s1[i] = current_concentrations[0]
s2[i] = current_concentrations[1]
plt.figure()
plt.plot(timepoints,s1)
plt.plot(timepoints,s2)
plt.xlabel('Time')
plt.ylabel('Concentration')
plt.legend(['s_1', 's_2'], loc='upper left')
plt.show() | python |
"""Tools that interact with Ilab's REST database."""
import re
import copy
import traceback
from bs4 import BeautifulSoup
from ua_ilab_tools import extract_custom_forms, ilab_api, api_types
ONLY_INT_FIELDS = [
"Concentration_each_sample", "Concentration", "Volume (uL)",
"Initial_Number_Slides_or_Punches_each_sample", "Template Length",
"Template_Length_each_sample"]
SKIP_FORM_PATTERNS = [r"REQUEST A QUOTE.*", r".*NQ.*"]
class IlabConfigError(Exception):
"""The request or form has been configured or altered incorrectly."""
class IlabTools():
def __init__(self, core_id, token):
if "Bearer" not in token:
token = "Bearer " + token
auth_creds = {
"Authorization": f"{token}",
"Content-Type": "application/xml"
}
self.api = ilab_api.IlabApi(core_id, auth_creds)
def get_service_requests(self, status="processing", specific_uri=None):
"""Get the service requests with the given status from ilab's REST DB.
Keyword Arguments:
status (string):
If you want service requests with a certain status. By default,
it is 'processing'.
specific_uri (string):
If you want a specific endpoint.
Returns:
req_uri_to_soup (dict):
The dictionary that holds all of the
{service req uris:request soup}. If no service requests are
found, returns an empty dict.
"""
req_uri_to_soup = {}
if specific_uri:
get_responses = self.api.get(
f"service_requests/{specific_uri}.xml", get_all=False)
requests_soup = BeautifulSoup(get_responses[0].text, "xml")
requests_soup = requests_soup.find("service-request")
req_uri_to_soup[requests_soup.find("id").string] = requests_soup
else:
get_responses = self.api.get(
"service_requests.xml",
parameters={"states": status},
get_all=True)
# Soup all get responses (multiple pages or not).
req_paged_soups = [
BeautifulSoup(response.text, "xml") for response in get_responses]
# Get every service-request in every page.
for get_soup in req_paged_soups:
for req_soup in get_soup.find_all("service-request"):
req_uri_to_soup[req_soup.find("id").string] = req_soup
return req_uri_to_soup
def get_service_cost(self, price_id):
"""Get the cost associated with the given service_id.
Arguments:
price_id (string):
The id associated with a price.
Returns:
service_price (namedtuple):
The calculated price of the service, or None if not found.
"""
service_price = None
get_responses = self.api.get("services.xml")
for response in get_responses:
services_page_soup = BeautifulSoup(response.text, "xml")
id_soup = services_page_soup.find(string=price_id)
if id_soup:
service_soup = id_soup.find_parent("service")
price_soup = service_soup.find("price")
current_price = price_soup.find("price").string
unit = price_soup.find("unit").find("description").string
service_price = api_types.Service_Price(
price=float(current_price),
samples_per_unit=unit)
return service_price
def get_request_charges(self, req_id):
"""Get all of the charges of the req_id passed in.
Arguments:
req_id(string):
The unique string of ints that map to a request.
Returns:
charges_uri_soup (dict):
The dict of uri_to_soup of all the charges associated with that
request. Returns an empty dict if not found.
"""
get_responses = self.api.get(f"service_requests/{req_id}/charges.xml")
charge_paged_soups = [
BeautifulSoup(response.text, "xml") for response in get_responses]
charges_uri_soup = dict()
for get_soup in charge_paged_soups:
for charge in get_soup.find_all("charge"):
charges_uri_soup[charge.find("id").string] = charge
return charges_uri_soup
def get_milestones(self, request_id):
"""Get all of the milestones associated with a service request.
Arguments:
request_id (string):
The unique string of ints that map to a request.
Returns:
milestone_name_soup (dict):
Holds all {milestone name : soup of milestone}. Returns an
empty dict if not found.
"""
get_responses = self.api.get(
f"service_requests/{request_id}/milestones.xml")
milestone_paged_soups = [
BeautifulSoup(response.text, "xml") for response in get_responses]
milestone_name_soup = {}
for get_soup in milestone_paged_soups:
for milestone in get_soup.find_all("milestone"):
name_tag = milestone.find("name")
if name_tag:
milestone_name_soup[name_tag.string] = milestone
return milestone_name_soup
def get_custom_forms(self, req_id):
"""Get all of the custom forms of the req_id passed in.
Arguments:
req_id (string):
The unique string of ints that map to a request.
Returns:
forms_uri_to_soup (dict):
The dictionary that holds all of the
{custom form uris: form_soup}. Returns an empty dict if not
found.
"""
get_responses = self.api.get(
f"service_requests/{req_id}/custom_forms.xml")
form_paged_soups = [
BeautifulSoup(response.text, "xml") for response in get_responses]
forms_uri_to_soup = {}
for get_soup in form_paged_soups:
for form in get_soup.find_all("custom-form"):
forms_uri_to_soup[form.find("id").string] = form
return forms_uri_to_soup
def extract_project_info(req_soup, full_name=False):
"""Extract the relevant project info from a request.
Arguments:
req_soup (BS4 soup object):
The soup of the request.
full_name (boolean):
Whether or not to capture the entire project name or just the last
hyphenated element.
Returns:
prj_info (Project):
The required info to post a project.
"""
if full_name:
prj_name = req_soup.find("name").string
else:
prj_name = req_soup.find("name").string.split('-')[-1]
res_name = req_soup.find("owner").find("name").string
email = req_soup.find("owner").find("email").string
# NOTE: Change this line to your own institution's email domain.
if "email.arizona.edu" in email:
res_lab = "internal"
else:
res_lab = "external"
# Replace all not ascii chars with ascii ones, and any symbols with '-'.
prj_res = api_types.Researcher(
extract_custom_forms._sanitize_text(res_name.split()[0]),
extract_custom_forms._sanitize_text(res_name.split()[-1]),
extract_custom_forms._sanitize_text(res_lab),
email,
"")
prj_info = api_types.Project(prj_name, prj_res)
return prj_info
def extract_custom_form_info(req_id, form_id, form_soup):
"""Extract all of the fields passed into the form.
Arguments:
req_id (String):
The unique string of ints that map to a request (URI).
form_id (String):
The unique string of ints that map to a form.
form_soup (BeautifulSoup object):
The soup of the form you want to parse.
Returns:
form_info (CustomForm):
The CustomForm object with all of the form's fields initialized.
Raises:
TypeError:
The form has no fields configured.
ValueError:
The form has duplicate samples.
"""
# If we need any of these types, we can make new methods.
skip_types = ["charges", "file", "table", "help", "file_no_upload"]
field_strategy = {
"handsontable_grid": extract_custom_forms.grid_type,
"checkbox": extract_custom_forms.checkbox_type,
"all_others": extract_custom_forms.all_other_types}
# Find the desired custom form out of all of the form_soup.
target_form = form_soup.find(string=form_id)
target_form = target_form.find_parent("custom-form")
form_soup = target_form
form_name = form_soup.find("name").string
fields_soup = form_soup.find("fields")
form_info = api_types.CustomForm(form_name, req_id, form_id)
# Get all of the field information.
for field_soup in fields_soup.find_all("field"):
field_type = field_soup.find("type").string
if field_type in skip_types:
# Do nothing with the field types that we don't yet care about.
continue
try:
field_strategy[field_type](field_soup, form_info)
except KeyError:
field_strategy["all_others"](field_soup, form_info)
except TypeError:
raise TypeError(
f"The grid in the {form_info.name} form in request"
f" {form_info.req_id} has been filled out incorrectly. The"
f" error message is: {traceback.format_exc()}")
# Raise an error if a form doesn't have samples.
if not form_info.samples:
return form_info
if form_info.field_to_values.get("duplicate_samples"):
if form_info.field_to_values["duplicate_samples"] == "Yes":
b_samples = copy.deepcopy(form_info.samples)
for a_sample, b_sample in zip(form_info.samples, b_samples):
a_sample.name += "A"
b_sample.name += "B"
form_info.samples = form_info.samples + b_samples
extract_custom_forms.bind_container_info(form_info)
# Allows duplicate names if they have different well locations in a
# plate.
if form_info.con_type != "96 well plate":
sample_names = [sample.name for sample in form_info.samples]
if len(set(sample_names)) != len(sample_names):
raise ValueError(
f"There are two or more samples named the same thing in"
f" request {form_info.req_id}. Please review and edit your"
f" sample names.")
for name, value in form_info.field_to_values.items():
if name in ONLY_INT_FIELDS:
value = re.sub(r"[^.0-9]", "", value)
if "_each_sample" in name:
udf_name = name.replace("_each_sample", "").replace("_", " ")
for sample in form_info.samples:
sample.udf_to_value[udf_name] = value
return form_info
| python |
__copyright__ = '''
Copyright 2017 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'David Turanski'
import os,sys
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.path.abspath('.'))
from springcloudstream.grpc.stream import Processor
def echo(data):
return data
args =[
'--port','9999',
'--debug'
]
Processor(echo,args).start()
| python |
# George Adamson
# 05/19/2020
fhand1 = open('dijkstraRoute_oceanEN_RD_50km_50.txt')
fhand_out = open('dijkstraRoute_oceanEN_RD_50km_50.pg','w')
# Read in Route
lats = []
lons = []
for line in fhand1:
route_data = line.split(',')
lats.append(route_data[1])
lons.append(route_data[2].rstrip())
# Header Information
fhand_out.write('stk.v.11.7')
fhand_out.write('\n\tBEGIN GreatArc')
fhand_out.write('\n\t\tMethod DetTimeAccFromVel')
fhand_out.write('\n\t\tTimeOfFirstWaypoint 19 May 2020 16:00:00.000000000')
fhand_out.write('\n\t\tArcGranularity 5.729577951308e-001')
fhand_out.write('\n\t\tAltRef WGS84')
fhand_out.write('\n\t\tAltInterpMethod EllipsoidHeight')
fhand_out.write('\n\t\tNumberOfWaypoints 29')
fhand_out.write('\n\t\tBEGIN Waypoints')
for i in range(0,len(lats)):
time = i * 1.016507057765e4
alt = 18288.00000
fhand_out.write('\n\t\t' + str(time) + " " + str(lats[i]) + " " + str(-1*float(lons[i])) + " " + str(alt) + " " + str(0.59944444444444) + " " + str(0.000000000000e0))
fhand_out.write('\n\t\tEND Waypoints')
fhand_out.write('\n\tEND GreatArc')
# Close Files
fhand1.close()
fhand_out.close()
| python |