max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
Application/AdminModule.py | nimitpatel26/Book-Fetch | 0 | 12799851 | from random import randint
import datetime
import pymysql
import cgi
def getConnection():
return pymysql.connect(host='localhost',
user='root',
password='<PASSWORD>',
db='BookFetch')
def newBook():
Title = input("Enter the title of the new book: ")
ISBN = input("Enter the isbn of the new book: ")
ISBN13 = input ("Enter the isbn 13: ")
DPublished = input("Enter date published: ")
Quantity = input("Enter quantity: ")
Publisher = input("Enter publisher: ")
Edition = input("Edition: ")
Language = input("Language: ")
Category = input("Category: ")
Author = input("Author: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into BookDetails values (\"" + Title + "\", " + ISBN
+ ", " + ISBN13 + ", \"" + DPublished + "\", " + Quantity
+ ", \"" + Publisher + "\", " + Edition + ", \"" + Language
+ "\", \"" + Category + "\", \"" + Author + "\");")
cursor.execute(sql)
finally:
connection.close()
def newUniversity():
Name = input("Enter the name of the university: ")
RFName = input("First name of the representative: ")
RLName = input("Last name of the representative: ")
Street = input("Street: ")
City = input("City: ")
State = input("State: ")
Country = input("Country: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Universities values(\"" + Name
+ "\", \"" + RFName + "\", \"" + RLName + "\", \""
+ Street + "\", \"" + City + "\", \"" + State
+ "\", \"" + Country + "\");")
cursor.execute(sql)
finally:
connection.close()
def newDepartment():
UniversityName = input("Enter the name of the university: ")
DeptName = input("Enter the name of the department: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Departments values(\"" + UniversityName
+ "\", \"" + DeptName + "\");")
cursor.execute(sql)
finally:
connection.close()
def newCourses():
CourseName = input("Enter the name of the course: ")
UniversityName = input("Enter the name of the university: ")
DeptName = input("Enter the name of the department: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into Courses values(\"" + CourseName
+ "\", \"" + UniversityName + "\", \"" + DeptName + "\");")
cursor.execute(sql)
finally:
connection.close()
def newBAssociation():
print("All of these are foreign key constraints: ")
CourseName = input("Enter the name of the course: ")
UniversityName = input("Enter the name of the university: ")
ISBN = input("Enter the isbn of the book: ")
connection = getConnection()
connection.autocommit(True)
try:
with connection.cursor() as cursor:
sql = ("insert into CourseReq values(" + ISBN
+ ", \"" + CourseName + "\", \"" + UniversityName + "\");")
cursor.execute(sql)
finally:
connection.close()
def adminModuleMain():
welcomeMsg = ("---------------------\nAdmin Module\n---------------------")
mainOptionsMsg = (
"""Here are your options:
1) Create a new book with inventory
2) Create a new university
3) Create a new department
4) Create a new courses
5) Create a new book associations
6) Return
7) Quit
Enter [1-7]: """)
invalidInputMsg = "Invalid input, please enter a valid input."
print(welcomeMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
while(userInput < 1 or userInput > 7):
print(invalidInputMsg)
userInput = int(input(mainOptionsMsg))
print("\n")
if (userInput == 1):
newBook()
elif (userInput == 2):
newUniversity()
elif (userInput == 3):
newDepartment()
elif (userInput == 4):
newCourses()
elif (userInput == 5):
newBAssociation()
elif (userInput == 6):
return
elif (userInput == 7):
quit()
adminModuleMain() | 3.234375 | 3 |
pages/clean_box/images/sims_for_hoopla/pot_ext_shears_kappa.py | linan7788626/linan7788626.github.io | 0 | 12799852 | <filename>pages/clean_box/images/sims_for_hoopla/pot_ext_shears_kappa.py
import numpy as np
def deflection_sub_pJaffe(x0, y0, re, rc, a, x, y):
r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0)
res = re / r * (np.sqrt(rc * rc + r * r) - rc - np.sqrt(a * a + r * r) + a)
return res * (x - x0) / r, res * (y - y0) / r
def deflection_nie(x0, y0, theta, ql, re, rc, ext_shears, ext_angle, ext_kappa, x, y): # SIE lens model
tr = np.pi * (theta / 180.0) # + np.pi / 2.0
sx = x - x0
sy = y - y0
cs = np.cos(tr)
sn = np.sin(tr)
sx_r = sx * cs + sy * sn
sy_r = -sx * sn + sy * cs
psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0)
dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \
np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc))
dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \
np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0))
dx = dx_tmp * cs - dy_tmp * sn
dy = dx_tmp * sn + dy_tmp * cs
# external shear
tr2 = np.pi * (ext_angle / 180.0)
cs2 = np.cos(2.0 * tr2)
sn2 = np.sin(2.0 * tr2)
dx2 = ext_shears * (cs2 * sx + sn2 * sy)
dy2 = ext_shears * (sn2 * sx - cs2 * sy)
# external kappa
dx3 = ext_kappa * sx
dy3 = ext_kappa * sy
return dx + dx2 + dx3, dy + dy2 + dy3
def potential_nie(x0, y0, theta, ql, re, rc, ext_shears,
ext_angle, ext_kappa, x, y):
tr = np.pi * (theta / 180.0) # + np.pi / 2.0
sx = x - x0
sy = y - y0
cs = np.cos(tr)
sn = np.sin(tr)
sx_r = sx * cs + sy * sn
sy_r = -sx * sn + sy * cs
psi = np.sqrt(ql**2.0 * (rc**2.0 + sx_r**2.0) + sy_r**2.0)
dx_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \
np.arctan(np.sqrt(1.0 - ql**2.0) * sx_r / (psi + rc))
dy_tmp = (re * np.sqrt(ql) / np.sqrt(1.0 - ql**2.0)) * \
np.arctanh(np.sqrt(1.0 - ql**2.0) * sy_r / (psi + rc * ql**2.0))
pot_SIE = sx_r * dx_tmp + sy_r * dy_tmp - 0.5 * re * \
np.sqrt(ql) * rc * np.log((psi + rc)**2.0 +
(1.0 - (ql**2.0)) * (sx_r**2.0))
# external shear
tr2 = np.pi * (ext_angle / 180.0)
cs2 = np.cos(2.0 * tr2)
sn2 = np.sin(2.0 * tr2)
pot_exts = ext_shears * (sn2 * sx * sy + 0.5 * cs2 * (sx**2.0 - sy**2.0))
# external kappa
pot_kaps = ext_kappa * (sx**2.0 + sy**2.0) * 0.5
return pot_SIE + pot_exts + pot_kaps
def pot_sub_pJaffe(x0, y0, re, a, x, y):
# res =
# re*(np.sqrt(s*s+r*r)-s-np.sqrt(a*a+r*r)+a)-re*s*np.log((s+np.sqrt(s*s+r*r)/(2.0*s)))+re*a*np.log((a+np.sqrt(a*a+r*r)/(2.9*a)))
r = np.sqrt((x - x0)**2.0 + (y - y0)**2.0)
res = re * (r - np.sqrt(a * a + r * r) + a) + re * a * \
np.log((a + np.sqrt(a * a + r * r)) / (2.0 * a))
return res
#if __name__ == '__main__':
#import pylab as pl
#r = np.linspace(0.0, 3.0, 100)
#y = 1.0 / r * (np.sqrt(r * r) - np.sqrt(1.0 * 1.0 + r * r) + 1.0)
#pl.plot(r,y,'k-')
#pl.show()
| 1.820313 | 2 |
earsie_eats_blog/users/views.py | genomics-geek/earsie-eats.com | 0 | 12799853 | from django.contrib.auth import get_user_model
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse
from django.views.generic import DetailView, ListView, RedirectView, UpdateView
from allauth.socialaccount.providers.facebook.views import FacebookOAuth2Adapter
from allauth.socialaccount.providers.github.views import GitHubOAuth2Adapter
from allauth.socialaccount.providers.google.views import GoogleOAuth2Adapter
from allauth.socialaccount.providers.instagram.views import InstagramOAuth2Adapter
from allauth.socialaccount.providers.twitter.views import TwitterOAuthAdapter
from rest_auth.registration.views import SocialLoginView
from rest_auth.social_serializers import TwitterLoginSerializer
User = get_user_model()
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_detail_view = UserDetailView.as_view()
class UserListView(LoginRequiredMixin, ListView):
model = User
slug_field = "username"
slug_url_kwarg = "username"
user_list_view = UserListView.as_view()
class UserUpdateView(LoginRequiredMixin, UpdateView):
model = User
fields = ["name"]
def get_success_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
def get_object(self):
return User.objects.get(username=self.request.user.username)
user_update_view = UserUpdateView.as_view()
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail", kwargs={"username": self.request.user.username})
user_redirect_view = UserRedirectView.as_view()
# Social Apps
class FacebookLogin(SocialLoginView):
adapter_class = FacebookOAuth2Adapter
class GitHubLogin(SocialLoginView):
adapter_class = GitHubOAuth2Adapter
class GoogleLogin(SocialLoginView):
adapter_class = GoogleOAuth2Adapter
class InstagramLogin(SocialLoginView):
adapter_class = InstagramOAuth2Adapter
class TwitterLogin(SocialLoginView):
serializer_class = TwitterLoginSerializer
adapter_class = TwitterOAuthAdapter
| 2.125 | 2 |
duplicateOverSurface/duplicateOverSurface.py | minoue/miMayaPlugins | 32 | 12799854 | <reponame>minoue/miMayaPlugins<gh_stars>10-100
#
# duplicaeOnSurface.py
#
#
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <<EMAIL>> wrote this file. As long as you retain this
# notice you can do whatever you want with this stuff. If we meet some day,
# and you think this stuff is worth it, you can buy me a beer in return.
# -<NAME>
# ----------------------------------------------------------------------------
#
from maya import OpenMaya
from maya import OpenMayaUI
from maya import OpenMayaMPx
from maya import cmds
try:
from PySide.QtGui import QApplication
from PySide import QtCore
except ImportError:
from PySide2.QtWidgets import QApplication
from PySide2 import QtCore
import math
import sys
DRAGGER = "duplicateOverSurfaceDragger"
UTIL = OpenMaya.MScriptUtil()
kPluginCmdName = "duplicateOverSurface"
kRotationFlag = "-r"
kRotationFlagLong = "-rotation"
kDummyFlag = "-d"
kDummyFlagLong = "-dummy"
kInstanceFlag = "-ilf"
kInstanceFlagLong = "-instanceLeaf"
# Syntax creator
def syntaxCreator():
syntax = OpenMaya.MSyntax()
syntax.addArg(OpenMaya.MSyntax.kString)
syntax.addFlag(
kDummyFlag,
kDummyFlagLong,
OpenMaya.MSyntax.kBoolean)
syntax.addFlag(
kRotationFlag,
kRotationFlagLong,
OpenMaya.MSyntax.kBoolean)
syntax.addFlag(
kInstanceFlag,
kInstanceFlagLong,
OpenMaya.MSyntax.kBoolean)
return syntax
class DuplicateOverSurface(OpenMayaMPx.MPxCommand):
def __init__(self):
super(DuplicateOverSurface, self).__init__()
self.ANCHOR_POINT = None
self.DUPLICATED = None
self.SOURCE = None
self.SCALE_ORIG = None
self.MATRIX_ORIG = None
self.TARGET_FNMESH = None
self.MOD_FIRST = None
self.MOD_POINT = None
self.SPACE = OpenMaya.MSpace.kWorld
self.ROTATION = True
self.InstanceFlag = False
self.SHIFT = QtCore.Qt.ShiftModifier
self.CTRL = QtCore.Qt.ControlModifier
def doIt(self, args):
# Parse the arguments.
argData = OpenMaya.MArgDatabase(syntaxCreator(), args)
self.SOURCE = argData.commandArgumentString(0)
if argData.isFlagSet(kRotationFlag) is True:
self.ROTATION = argData.flagArgumentBool(kRotationFlag, 0)
if argData.isFlagSet(kInstanceFlag) is True:
self.InstanceFlag = argData.flagArgumentBool(kInstanceFlag, 0)
cmds.setToolTo(self.setupDragger())
def setupDragger(self):
""" Setup dragger context command """
try:
cmds.deleteUI(DRAGGER)
except:
pass
dragger = cmds.draggerContext(
DRAGGER,
pressCommand=self.pressEvent,
dragCommand=self.dragEvent,
releaseCommand=self.releaseEvent,
space='screen',
projection='viewPlane',
undoMode='step',
cursor='hand')
return dragger
def pressEvent(self):
button = cmds.draggerContext(DRAGGER, query=True, button=True)
# Leave the tool by middle click
if button == 2:
cmds.setToolTo('selectSuperContext')
return
# Get clicked point in viewport screen space
pressPosition = cmds.draggerContext(DRAGGER, query=True, ap=True)
x = pressPosition[0]
y = pressPosition[1]
self.ANCHOR_POINT = [x, y]
# Convert
point_in_3d, vector_in_3d = convertTo3D(x, y)
# Get MFnMesh of snap target
targetDagPath = getDagPathFromScreen(x, y)
# If draggin outside of objects
if targetDagPath is None:
return
# Get origianl scale information
self.SCALE_ORIG = cmds.getAttr(self.SOURCE + ".scale")[0]
self.MATRIX_ORIG = cmds.xform(self.SOURCE, q=True, matrix=True)
self.TARGET_FNMESH = OpenMaya.MFnMesh(targetDagPath)
transformMatrix = self.getMatrix(
point_in_3d,
vector_in_3d,
self.TARGET_FNMESH,
self.SCALE_ORIG,
self.MATRIX_ORIG)
if transformMatrix is None:
return
# Create new object to snap
self.DUPLICATED = self.getNewObject()
# Reset transform of current object
cmds.setAttr(self.DUPLICATED + ".translate", *[0, 0, 0])
location = [-i for i
in cmds.xform(self.DUPLICATED, q=True, ws=True, rp=True)]
cmds.setAttr(self.DUPLICATED + ".translate", *location)
# Can't apply freeze to instances
if self.InstanceFlag is not True:
cmds.makeIdentity(self.DUPLICATED, apply=True, t=True)
# Apply transformMatrix to the new object
cmds.xform(self.DUPLICATED, matrix=transformMatrix)
def getNewObject(self):
return cmds.duplicate(self.SOURCE, ilf=self.InstanceFlag)[0]
def dragEvent(self):
""" Event while dragging a 3d view """
if self.TARGET_FNMESH is None:
return
dragPosition = cmds.draggerContext(
DRAGGER,
query=True,
dragPoint=True)
x = dragPosition[0]
y = dragPosition[1]
modifier = cmds.draggerContext(
DRAGGER,
query=True,
modifier=True)
if modifier == "none":
self.MOD_FIRST = True
qtModifier = QApplication.keyboardModifiers()
if qtModifier == self.CTRL or qtModifier == self.SHIFT:
# If this is the first click of dragging
if self.MOD_FIRST is True:
self.MOD_POINT = [x, y]
# global MOD_FIRST
self.MOD_FIRST = False
length, degree = self.getDragInfo(x, y)
if qtModifier == self.CTRL:
length = 1.0
if qtModifier == self.SHIFT:
degree = 0.0
# Convert
point_in_3d, vector_in_3d = convertTo3D(
self.MOD_POINT[0],
self.MOD_POINT[1])
else:
point_in_3d, vector_in_3d = convertTo3D(x, y)
length = 1.0
degree = 0.0
# Get new transform matrix for new object
transformMatrix = self.getMatrix(
point_in_3d,
vector_in_3d,
self.TARGET_FNMESH,
self.SCALE_ORIG,
self.MATRIX_ORIG,
length,
degree
)
if transformMatrix is None:
return
# Apply new transform
cmds.xform(self.DUPLICATED, matrix=transformMatrix)
cmds.setAttr(self.DUPLICATED + ".shear", *[0, 0, 0])
cmds.refresh(currentView=True, force=True)
def releaseEvent(self):
self.MOD_FIRST = True
def getDragInfo(self, x, y):
""" Get distance and angle in screen space. """
start_x = self.MOD_POINT[0]
start_y = self.MOD_POINT[1]
end_x = x
end_y = y
cathetus = end_x - start_x
opposite = end_y - start_y
# Get distance using Pythagorean theorem
length = math.sqrt(
math.pow(cathetus, 2) + math.pow(opposite, 2))
try:
theta = cathetus / length
degree = math.degrees(math.acos(theta))
if opposite < 0:
degree = -degree
return cathetus, degree
except ZeroDivisionError:
return None, None
def getIntersection(self, point_in_3d, vector_in_3d, fnMesh):
""" Return a point Position of intersection..
Args:
point_in_3d (OpenMaya.MPoint)
vector_in_3d (OpenMaya.mVector)
Returns:
OpenMaya.MFloatPoint : hitPoint
"""
hitPoint = OpenMaya.MFloatPoint()
hitFacePtr = UTIL.asIntPtr()
idSorted = False
testBothDirections = False
faceIDs = None
triIDs = None
accelParam = None
hitRayParam = None
hitTriangle = None
hitBary1 = None
hitBary2 = None
maxParamPtr = 99999
# intersectPoint = OpenMaya.MFloatPoint(
result = fnMesh.closestIntersection(
OpenMaya.MFloatPoint(
point_in_3d.x,
point_in_3d.y,
point_in_3d.z),
OpenMaya.MFloatVector(vector_in_3d),
faceIDs,
triIDs,
idSorted,
self.SPACE,
maxParamPtr,
testBothDirections,
accelParam,
hitPoint,
hitRayParam,
hitFacePtr,
hitTriangle,
hitBary1,
hitBary2)
faceID = UTIL.getInt(hitFacePtr)
if result is True:
return hitPoint, faceID
else:
return None, None
def getMatrix(self,
mPoint,
mVector,
targetFnMesh,
scale_orig,
matrix_orig,
scale_plus=1,
degree_plus=0.0):
""" Return a list of values which consist a new transform matrix.
Args:
mPoint (OpenMaya.MPoint)
mVector (OpenMaya.MVector)
Returns:
list : 16 values for matrixs
"""
# Position of new object
OP, faceID = self.getIntersection(mPoint, mVector, targetFnMesh)
# If it doesn't intersect to any geometries, return None
if OP is None and faceID is None:
return None
qtMod = QApplication.keyboardModifiers()
if qtMod == (self.CTRL | self.SHIFT):
OP = getClosestVertex(OP, faceID, targetFnMesh)
# Get normal vector and tangent vector
if self.ROTATION is False:
NV = OpenMaya.MVector(
matrix_orig[4],
matrix_orig[5],
matrix_orig[6])
NV.normalize()
TV = OpenMaya.MVector(
matrix_orig[0],
matrix_orig[1],
matrix_orig[2])
TV.normalize()
else:
NV = self.getNormal(OP, targetFnMesh)
TV = self.getTangent(faceID, targetFnMesh)
# Ctrl-hold rotation
if qtMod == self.CTRL:
try:
rad = math.radians(degree_plus)
q1 = NV.x * math.sin(rad / 2)
q2 = NV.y * math.sin(rad / 2)
q3 = NV.z * math.sin(rad / 2)
q4 = math.cos(rad / 2)
TV = TV.rotateBy(q1, q2, q3, q4)
except TypeError:
pass
# Bitangent vector
BV = TV ^ NV
BV.normalize()
# 4x4 Transform Matrix
try:
x = scale_orig[0] * (scale_plus / 100 + 1.0)
y = scale_orig[1] * (scale_plus / 100 + 1.0)
z = scale_orig[2] * (scale_plus / 100 + 1.0)
TV *= x
NV *= y
BV *= z
except TypeError:
pass
finally:
matrix = [
TV.x, TV.y, TV.z, 0,
NV.x, NV.y, NV.z, 0,
BV.x, BV.y, BV.z, 0,
OP.x, OP.y, OP.z, 1
]
return matrix
def getTangent(self, faceID, targetFnMesh):
""" Return a tangent vector of a face.
Args:
faceID (int)
mVector (OpenMaya.MVector)
Returns:
OpenMaya.MVector : tangent vector
"""
tangentArray = OpenMaya.MFloatVectorArray()
targetFnMesh.getFaceVertexTangents(
faceID,
tangentArray,
self.SPACE)
numOfVtx = tangentArray.length()
x = sum([tangentArray[i].x for i in range(numOfVtx)]) / numOfVtx
y = sum([tangentArray[i].y for i in range(numOfVtx)]) / numOfVtx
z = sum([tangentArray[i].z for i in range(numOfVtx)]) / numOfVtx
tangentVector = OpenMaya.MVector()
tangentVector.x = x
tangentVector.y = y
tangentVector.z = z
tangentVector.normalize()
return tangentVector
def getNormal(self, pointPosition, targetFnMesh):
""" Return a normal vector of a face.
Args:
pointPosition (OpenMaya.MFloatPoint)
targetFnMesh (OpenMaya.MFnMesh)
Returns:
OpenMaya.MVector : tangent vector
int : faceID
"""
ptr_int = UTIL.asIntPtr()
origin = OpenMaya.MPoint(pointPosition)
normal = OpenMaya.MVector()
targetFnMesh.getClosestNormal(
origin,
normal,
self.SPACE,
ptr_int)
normal.normalize()
return normal
# Creator
def cmdCreator():
return OpenMayaMPx.asMPxPtr(DuplicateOverSurface())
def initializePlugin(mObject):
mPlugin = OpenMayaMPx.MFnPlugin(mObject, "<NAME>")
try:
mPlugin.registerCommand(kPluginCmdName, cmdCreator)
mPlugin.setVersion("0.10")
except:
sys.stderr.write("Failed to register command: %s\n" % kPluginCmdName)
raise
def uninitializePlugin(mObject):
mPlugin = OpenMayaMPx.MFnPlugin(mObject)
try:
mPlugin.deregisterCommand(kPluginCmdName)
except:
sys.stderr.write("Failed to unregister command: %s\n" % kPluginCmdName)
def convertTo3D(screen_x, screen_y):
""" Return point and vector of clicked point in 3d space.
Args:
screen_x (int)
screen_y (int)
Returns:
OpenMaya.MPoint : point_in_3d
OpenMaya.MVector : vector_in_3d
"""
point_in_3d = OpenMaya.MPoint()
vector_in_3d = OpenMaya.MVector()
OpenMayaUI.M3dView.active3dView().viewToWorld(
int(screen_x),
int(screen_y),
point_in_3d,
vector_in_3d)
return point_in_3d, vector_in_3d
def getDagPathFromScreen(x, y):
""" Args:
x (int or float)
y (int or float)
Returns:
dagpath : OpenMaya.MDagPath
"""
# Select from screen
OpenMaya.MGlobal.selectFromScreen(
int(x),
int(y),
OpenMaya.MGlobal.kReplaceList,
OpenMaya.MGlobal.kSurfaceSelectMethod)
# Get dagpath, or return None if fails
tempSel = OpenMaya.MSelectionList()
OpenMaya.MGlobal.getActiveSelectionList(tempSel)
dagpath = OpenMaya.MDagPath()
if tempSel.length() == 0:
return None
else:
tempSel.getDagPath(0, dagpath)
return dagpath
def getClosestVertex(point_orig, faceID, fnMesh):
""" Args:
point_orig (OpenMaya.MFloatPoint)
faceID (int)
fnMesh (OpenMaya.MFnMesh)
Returns:
closestPoint : OpenMaya.MPoint
"""
vertexIndexArray = OpenMaya.MIntArray()
fnMesh.getPolygonVertices(faceID, vertexIndexArray)
basePoint = OpenMaya.MPoint(point_orig)
closestPoint = OpenMaya.MPoint()
length = 99999.0
for index in vertexIndexArray:
point = OpenMaya.MPoint()
fnMesh.getPoint(index, point, OpenMaya.MSpace.kWorld)
lengthVector = point - basePoint
if lengthVector.length() < length:
length = lengthVector.length()
closestPoint = point
return closestPoint
| 2.25 | 2 |
measure_mate/migrations/0015_merge.py | niche-tester/measure-mate | 15 | 12799855 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-02-26 06:25
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('measure_mate', '0011_auto_20160225_1305'),
('measure_mate', '0014_auto_20160224_0207'),
]
operations = [
]
| 1.375 | 1 |
vit/formatter/start_remaining.py | kinifwyne/vit | 179 | 12799856 | from vit.formatter.start import Start
class StartRemaining(Start):
def format_datetime(self, start, task):
return self.remaining(start)
| 2.09375 | 2 |
mpdshell.py | SirJson/mpdshell | 0 | 12799857 | <reponame>SirJson/mpdshell
#!/usr/bin/env python3
import argparse
import asyncio
from re import DEBUG
import selectors
import socket
import sys
import threading
import time
import selectors
from datetime import datetime
from pathlib import Path
from threading import Lock
from typing import List
from prompt_toolkit import HTML
from prompt_toolkit import print_formatted_text as print
from prompt_toolkit.application import Application, application
from prompt_toolkit.completion import WordCompleter
from prompt_toolkit.contrib.regular_languages.compiler import compile
from prompt_toolkit.contrib.regular_languages.completion import \
GrammarCompleter
from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer
from prompt_toolkit.document import Document
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout.containers import (Float, FloatContainer, HSplit,
Window)
from prompt_toolkit.layout.controls import (Buffer, BufferControl,
FormattedTextControl)
from prompt_toolkit.layout.layout import Layout
from prompt_toolkit.layout.menus import CompletionsMenu
from prompt_toolkit.lexers import SimpleLexer
from prompt_toolkit.output import ColorDepth
from prompt_toolkit.styles import Style
from prompt_toolkit.widgets import SearchToolbar, TextArea
selector = selectors.SelectSelector()
loop = asyncio.SelectorEventLoop(selector)
asyncio.set_event_loop(loop)
RECV_BUFFER_SIZE = 4096
SCRIPT_HOME = Path.home() / 'mpdscripts'
DEBUGAPP = False
NOECHO = False
APP = None
mpdcmds = [
"add",
"addid",
"addtagid",
"albumart",
"channels",
"clear",
"clearerror",
"cleartagid",
"close",
"commands",
"config",
"consume",
"count",
"crossfade",
"currentsong",
"decoders",
"delete",
"deleteid",
"delpartition",
"disableoutput",
"enableoutput",
"find",
"findadd",
"getfingerprint",
"idle",
"kill",
"list",
"listall",
"listallinfo",
"listfiles",
"listmounts",
"listneighbors",
"listpartitions",
"listplaylist",
"listplaylistinfo",
"listplaylists",
"load",
"lsinfo",
"mixrampdb",
"mixrampdelay",
"mount",
"move",
"moveid",
"moveoutput",
"newpartition",
"next",
"notcommands",
"outputs",
"outputset",
"partition",
"password",
"pause",
"ping",
"play",
"playid",
"playlist",
"playlistadd",
"playlistclear",
"playlistdelete",
"playlistfind",
"playlistid",
"playlistinfo",
"playlistmove",
"playlistsearch",
"plchanges",
"plchangesposid",
"previous",
"prio",
"prioid",
"random",
"rangeid",
"readcomments",
"readmessages",
"readpicture",
"rename",
"repeat",
"replay_gain_mode",
"replay_gain_status",
"rescan",
"rm",
"save",
"search",
"searchadd",
"searchaddpl",
"seek",
"seekcur",
"seekid",
"sendmessage",
"setvol",
"shuffle",
"single",
"stats",
"status",
"sticker",
"stop",
"subscribe",
"swap",
"swapid",
"tagtypes",
"toggleoutput",
"unmount",
"unsubscribe",
"update",
"urlhandlers",
"volume"]
internalcmds = {
"exec": lambda s, x: s.runscript(x),
"scripts": lambda s, x: listscripts(s, x),
"help": lambda s, x: apphelp(s, x),
"mpchelp": lambda s, x: mpchelp(s, x),
"reset": lambda s, x: resetterm(s,x)
}
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.next_call = time.time()
self.start()
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self.next_call += self.interval
self._timer = threading.Timer(
self.next_call - time.time(), self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
class MPDClient(object):
def __init__(self, hostname: str, port: int):
self.selector = selectors.DefaultSelector()
self._inbuffer = []
self._outbuffer = []
self._echobuffer = []
self.server = hostname
self.port = port
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((hostname, port))
data = self.socket.recv(RECV_BUFFER_SIZE)
self.initmsg = str(data, 'utf-8')
self.socket_lock = Lock()
self.state_lock = Lock()
self._io_lock = Lock()
self.socket.setblocking(False)
self.selector.register(
self.socket, selectors.EVENT_READ | selectors.EVENT_WRITE, self.onsocketready)
self._remote_closed = False
self.dbg_lastmask = 0x0
def data_available(self) -> bool:
with self._io_lock:
return len(self._inbuffer) > 0
def echo_available(self) -> bool:
with self._io_lock:
return len(self._echobuffer) > 0
def peek_inbuffer(self) -> int:
return len(self._inbuffer)
def peek_outbuffer(self) -> int:
return len(self._outbuffer)
def peek_echobuffer(self) -> int:
return len(self._echobuffer)
def pop_message(self) -> str:
with self._io_lock:
msg = self._inbuffer.pop()
if str(msg).strip() == 'OK':
return None
else:
return msg
def pop_echo(self) -> str:
with self._io_lock:
return self._echobuffer.pop()
def runscript(self, param):
params = param.split(' ')
file = params[0]
with open(SCRIPT_HOME / file) as mpcscript:
data = mpcscript.read()
return self.send(data)
def disconnect(self, *argv):
with self.socket_lock:
try:
self.socket.sendall(bytes('close', 'utf-8'))
except BaseException as ex:
print("Connection closed by remote: {}".format(ex))
finally:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
def ping(self) -> bool:
self.send('ping')
def ping_unchecked(self):
try:
self.send('ping')
except BaseException:
with self.state_lock:
self._remote_closed = True
def force_closed(self):
with self.state_lock:
return self._remote_closed
def poll(self):
events = self.selector.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
def send(self, message: str):
with self._io_lock:
self._outbuffer.append(message)
def onsocketready(self, connection, mask):
self.dbg_lastmask = mask
if mask & selectors.EVENT_READ:
self._receive(connection)
if mask & selectors.EVENT_WRITE:
self._transmit(connection)
def _receive(self, connection):
chunks = []
with self._io_lock:
data = connection.recv(RECV_BUFFER_SIZE)
if data:
chunks.append(data)
self._inbuffer.append(str(b''.join(chunks), 'utf-8'))
def _transmit(self, connection):
with self._io_lock:
while len(self._outbuffer) > 0:
msg = self._outbuffer.pop()
command = str(msg + '\n')
connection.sendall(bytes(command, 'utf-8'))
def local_echo(self, message):
with self._io_lock:
self._echobuffer.append(message)
def close(self):
with self.socket_lock:
self.socket.shutdown(socket.SHUT_WR)
self.socket.close()
def create_grammar():
return compile(
r"""
(?P<exec>\![a-z]+) |
((?P<exec>\![a-z]+)\s(?P<execparam>[a-zA-Z0-9.\/\\\-\_\s]+)\s*) |
(?P<func>[a-z]+) |
((?P<func>[a-z]+)\s(?P<params>\+?[a-zA-Z0-9.\/\:\\\-\_\s]+)\s*)
"""
)
def mpchelp(mpd, _param):
output = ''
output += "=== MPC Commands ===\n"
for c in mpdcmds:
output += str(c) + "\n"
mpd.local_echo(output)
def apphelp(mpd, _param):
output = ''
output += "=== Shell Commands ===\n"
for c in internalcmds.keys():
output += str(c) + "\n"
mpd.local_echo(output)
def resetterm(mpd, _param):
APP.reset()
mpd.local_echo("Terminal reset!")
def listscripts(mpd, _param):
output = f'=== Available mpd shell scripts in "{SCRIPT_HOME}" ==='
files = list(SCRIPT_HOME.glob("*.ncs"))
for file in files:
output += ' - ' + file.name + "\n"
output += f'\n\n----\n=> Total: {len(files)}'
mpd.local_echo(output)
def gen_style() -> Style:
base00 = '#000000'
base01 = '#202020'
base02 = '#303030'
base03 = '#505050'
base04 = '#909090'
base05 = '#bfbfbf'
base06 = '#e0e0e0'
base07 = '#ffffff'
base08 = '#eb008a'
base09 = '#f29333'
base0A = '#f8ca12'
base0B = '#FF6236'
base0C = '#00aabb'
base0D = '#0e5a94'
base0E = '#b31e8d'
base0F = '#7a2d00'
baseA0 = '#242424'
baseA1 = '#06A191'
return Style.from_dict(
{
"function": base0D,
"parameter": base08,
"exec": base0E,
"execparam": base09,
"trailing-input": base0F,
"output": base0B,
"debug": f"bg:{base01} {base0A}",
"input": f"bg:{base01} {base04}",
"linetoken": base0C,
"line": base03,
"base": f"bg:{baseA0} {base05}",
"toolbar": f"bg:{base01} {baseA1}",
"title": f"bg:{base02} #90A4AE",
"c1": "#FF5722",
"c2": "#D4E157",
"c3": "#9575CD",
"c4": "#4CAF50",
"c5": "#9C27B0"
})
def invalid_input(msg="Invalid command"):
return msg
def get_line_prefix(lineno, wrap_count):
return HTML('<linetoken><b>»</b></linetoken> ')
def get_netdbg_prefix(lineno, wrap_count):
return HTML('<linetoken>NETTICK: </linetoken> ')
def get_socketdbg_prefix(lineno, wrap_count):
return HTML('<linetoken>SOCKET:</linetoken> ')
def get_echodbg_prefix(lineno, wrap_count):
return HTML('<linetoken>SYSECHO:</linetoken> ')
def main():
global DEBUGAPP, NOECHO, APP
parser = argparse.ArgumentParser()
parser.add_argument("host", help="The host of your MPD instance")
parser.add_argument("-p", "--port", help="The port on which MPD is running (default: 6600)",
type=int, default=6600, required=False)
parser.add_argument("-s", "--secret", help="Initialize connection with this password (default: None)",
type=str, required=False)
parser.add_argument("-d", "--debug", help="Show internal debug info (default: 0)",
type=bool, default=False, required=False)
parser.add_argument("-a", "--alive-tick", help="How many seconds between a keep a live should be waited. (default: 3)",
type=int, default=3, required=False)
parser.add_argument("-n", "--no-echo", help="Own commands don't get written into the output view (default: 0)",
type=bool, default=False, required=False)
parser.add_argument("-b", "--buffer-size", help="The size of one TCP buffer. A message might get broken into multiple buffer if the size isn't big enough or your network can't support it. For optimal performance choose a size with the power of two. (default: 4096)",
type=int, default=4096, required=False)
args = parser.parse_args()
DEBUGAPP = args.debug
alive_tick = args.alive_tick
port = args.port
print(f"Connecting to {args.host}@{port}...")
mpd = MPDClient(args.host, port)
grammar = create_grammar()
intro_text = HTML(f"Connected to: <c1>{mpd.server}@{mpd.port}</c1> ❯ <c2>{mpd.initmsg}</c2>")
client_settings = HTML(f"Keep alive tick: <c4>{alive_tick}</c4> | TCP buffer: <c4>{RECV_BUFFER_SIZE}</c4> | Echo enabled: <c4>{str(not NOECHO)}</c4>")
help_text = HTML(f"Exit: <c4>[Control-C]</c4> | Scroll up: <c4>[PageUp]</c4> | Scroll down: <c4>[PageDown]</c4> | App command prefix: <c4>[!]</c4> <b>(try !help)</b>")
lexer = GrammarLexer(
grammar,
lexers={
"func": SimpleLexer("class:function"),
"params": SimpleLexer("class:parameter"),
"exec": SimpleLexer("class:exec"),
"execparam": SimpleLexer("class:execparam"),
},
)
commands = []
commands.extend(mpdcmds)
keywords = WordCompleter(commands)
intern_keywords = WordCompleter(internalcmds.keys())
completer = GrammarCompleter(
grammar,
{
"func": keywords,
"exec": intern_keywords
},
)
search_field = SearchToolbar() # For reverse search.
output_field = Buffer()
netdbg_buffer = Buffer()
socketdbg_buffer = Buffer()
echodbg_buffer = Buffer()
input_field = TextArea(
height=1,
lexer=lexer,
completer=completer,
prompt="❯ ",
style="class:input",
multiline=False,
wrap_lines=False,
search_field=search_field,
)
lineup = Window(height=1, char="▁", style="class:line")
linedown = Window(height=1, char="▔", style="class:line")
debugnotice = Window(
FormattedTextControl(
HTML("<b>== Debug Info ==</b>")
),
height=1,
style="class:title",
)
nettickwnd = Window(
BufferControl(buffer=netdbg_buffer),
height=1,
get_line_prefix=get_netdbg_prefix,
wrap_lines=False,
style="class:debug")
socketwnd = Window(
BufferControl(buffer=socketdbg_buffer),
height=1,
get_line_prefix=get_socketdbg_prefix,
wrap_lines=False,
style="class:debug")
echownd = Window(
BufferControl(buffer=echodbg_buffer),
height=1,
get_line_prefix=get_echodbg_prefix,
wrap_lines=False,
style="class:debug")
debugzone = HSplit([])
if args.debug:
debugzone = HSplit([
lineup,
debugnotice,
lineup,
nettickwnd,
socketwnd,
echownd,
linedown])
container = FloatContainer(
content=HSplit(
[
Window(
FormattedTextControl(
intro_text
),
height=1,
style="class:title",
),
Window(
FormattedTextControl(
client_settings
),
height=1,
style="class:title",
),
linedown,
debugzone,
Window(
BufferControl(buffer=output_field),
get_line_prefix=get_line_prefix,
wrap_lines=False,
style="class:output"),
lineup,
input_field,
search_field,
linedown,
lineup,
Window(
FormattedTextControl(
help_text
),
height=1,
style="class:toolbar",
),
]
),
floats=[
Float(
xcursor=True,
ycursor=True,
content=CompletionsMenu(max_height=32, scroll_offset=1),
)
],
style="class:base"
)
def netdebug_print(msg):
if not DEBUGAPP:
return
netdbg_buffer.document = Document(
text=msg, cursor_position=0
)
def sockdebug_print(msg):
if not DEBUGAPP:
return
socketdbg_buffer.document = Document(
text=msg, cursor_position=0
)
def echodbg_print(msg):
if not DEBUGAPP:
return
echodbg_buffer.document = Document(
text=msg, cursor_position=0
)
def indent(text: str, spaces=2):
output = ''
for l in text.splitlines():
output += ' ' * spaces + l + '\n'
return output
def accept(buff):
if mpd.force_closed():
application.exit(result="Connection reset by peer")
try:
match = grammar.match(buff.text)
if match:
params = match.variables()
execcmd = params.get("exec")
if execcmd is not None:
params = params.get("execparam")
funcptr = internalcmds.get(
execcmd[1:], lambda s, x: invalid_input("Unknown internal command"))
funcptr(mpd, params)
else:
cmd = params.get("func")
if cmd not in mpdcmds:
mpd.local_echo(invalid_input())
else:
mpd.local_echo(buff.text)
mpd.send(buff.text)
if buff.text == "close":
application.exit()
else:
mpd.local_echo(invalid_input())
except BaseException as e:
tb = sys.exc_info()[2]
mpd.local_echo("\n\nError: {}\n\tFrame: {}\n\tInstruction: {}\n\tLine: {}".format(
e, tb.tb_frame, tb.tb_lasti, tb.tb_lineno))
input_field.accept_handler = accept
# The key bindings.
kb = KeyBindings()
@kb.add("pageup")
def onpageup(_event):
output_field.cursor_position -= 500
@kb.add("pagedown")
def onpagedown(_event):
output_field.cursor_position += 500
@kb.add("c-c")
@kb.add("c-q")
def _(event):
"""Pressing Ctrl-Q or Ctrl-C will exit the user interface."""
event.app.exit()
####
# Here happens the main loop sort of
####
def netpoll():
if not mpd:
return
sockdebug_print(
f"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}")
mpd.poll()
netdebug_print(
f"[{ datetime.now().isoformat()}] netbuffer: input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})")
echodbg_print(
f"[{ datetime.now().isoformat()}] echobuffer: {mpd.peek_echobuffer()}")
####################################
# SECTION: READBACK COLLECT
####################################
sockdebug_print(
f"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}")
recv_output = ''
while mpd.data_available():
message = mpd.pop_message()
if message:
isonow = datetime.now().isoformat(timespec='seconds')
recv_output += indent(f'\n[{isonow}] {message}\n')
netdebug_print(
f"[{ datetime.now().isoformat()}] netbuffer (DRAIN): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})")
####################################
# SECTION: READBACK ECHOs
####################################
sockdebug_print(
f"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}")
local_output = ''
while mpd.echo_available():
echodbg_print(
f"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}")
echomsg = mpd.pop_echo()
if echomsg:
isonow = datetime.now().isoformat(timespec='seconds')
local_output += f'\n{echomsg}\n'
echodbg_print(
f"[{ datetime.now().isoformat()}] echobuffer (DRAIN): {mpd.peek_echobuffer()}")
####################################
# SECTION WRITE TO TTY
####################################
sockdebug_print(
f"[{ datetime.now().isoformat()}] mask: {mpd.dbg_lastmask}")
new_text = output_field.text
if recv_output != '':
netdebug_print(
f"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})")
echodbg_print(
f"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}")
new_text += recv_output
if local_output != '':
netdebug_print(
f"[{ datetime.now().isoformat()}] netbuffer (DRAW): input({mpd.peek_inbuffer()}) output({mpd.peek_outbuffer()})")
echodbg_print(
f"[{ datetime.now().isoformat()}] echobuffer (DRAW): {mpd.peek_echobuffer()}")
new_text += local_output
if recv_output != '' or local_output != '':
output_field.document = Document(
text=new_text, cursor_position=len(new_text))
application.invalidate()
####################################
# netpoll() end
####################################
autoping = RepeatedTimer(3.0, lambda x: x.ping_unchecked(), mpd)
autopoll = RepeatedTimer(1.0, netpoll)
# Run application.
application = Application(
layout=Layout(container, focused_element=input_field),
key_bindings=kb,
style=gen_style(),
mouse_support=True,
full_screen=True,
enable_page_navigation_bindings=False,
color_depth=ColorDepth.TRUE_COLOR
)
APP = application
if args.secret is not None:
mpd.send(f"password {args.secret}")
application.run()
autoping.stop()
autopoll.stop()
mpd.disconnect()
if __name__ == '__main__':
main()
| 1.765625 | 2 |
FlaskApp/utils.py | robertavram/project5 | 7 | 12799858 | import random
import string
import other_info
import re
from dicttoxml import dicttoxml
def make_csrf_state(size):
''' Makes a CSRF state by randomly choosing uppercase letters and digits '''
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in xrange(size))
def valid_item_name(item_name):
''' Test item name for bad words or format etc
-not fully implemented for this project'''
if len(item_name) > 50:
return False;
return True
def valid_item_description(item_description):
'''test item description for bad words or format etc
-not fully implemented for this project'''
if len(item_description) > 1000:
return False
return True
def allowed_file(filename):
''' Checks if an image file has the right extension '''
ALLOWED_EXTENSIONS = set(['jpg', 'jpeg', 'gif', "png"])
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def valid_link(link):
''' Checks if the link provided is a valid link format
- not fully implemented for this project'''
pat = re.compile("/^(https?:\/\/)?([\da-z\.-]+)\.([a-z\.]{2,6})([\/\w \.-]*)*\/?$/")
pat.match(link)
if not pat:
return False
return True if link else False
def test_new_item(item_dict):
''' takes an item dictionary and checks if all fields are properly filled
returns: tuple (Bool: Success, string: Error) '''
# for now just test the surface things
category = item_dict.get("category")
if not category in other_info.item_categories:
return (False, "Category is invalid")
name = item_dict.get("name")
if not name or not valid_item_name(name) == True:
return (False, "Name not valid")
description = item_dict.get("description")
if not valid_item_description(description) == True:
return (False, "Description not valid")
link = item_dict.get("link")
if link and not valid_link(item_dict["link"]):
return (False, "Link is not valid")
return (True, None)
def test_item_prop(item_dict):
''' Tests all the properties passed in the item_dict
and checks if they are valid for updating the item '''
my_valid_vars = ['name', 'category', 'description', 'link']
for kw in item_dict:
if kw not in my_valid_vars:
return (False, "You are trying to update a property that doesn't exist: %s"%kw)
if kw == 'name' and not valid_item_name(item_dict[kw]):
return (False, "Name not valid")
if kw == 'description' and not valid_item_description(item_dict[kw]):
return (False, "Description not valid")
if kw == 'category' and item_dict[kw] not in other_info.item_categories:
return (False, "Category not valid")
if kw == 'link' and not valid_link(item_dict[kw]):
return (False, "Link not valid")
return (True, None)
def remove_special_characters(my_string):
return ''.join(e for e in my_string if e.isalnum()).lower()
def get_cat_regex():
return "(?i)"+'|'.join(other_info.item_categories)
def makexml(my_dict):
return dicttoxml(my_dict)
| 2.921875 | 3 |
npde.py | marghetis/npde | 37 | 12799859 | import numpy as np
import tensorflow as tf
import tensorflow.contrib.distributions as tfd
from integrators import ODERK4, SDEEM
from kernels import OperatorKernel
from gpflow import transforms
from param import Param
float_type = tf.float64
jitter0 = 1e-6
class NPODE:
def __init__(self,Z0,U0,sn0,kern,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPODE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
jitter: Float of jitter level
whiten: Boolean. Currently we perform the optimization only in the
white domain
summ: Boolean for Tensorflow summary
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
self.name = 'npode'
self.whiten = whiten
self.kern = kern
self.jitter = jitter
with tf.name_scope("NPDE"):
Z = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
U = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
sn = Param(np.array(sn0),
name = "sn",
summ = summ,
fixed = fix_sn,
transform = transforms.Log1pe())
self.Z = Z()
self.U = U()
self.sn = sn()
self.D = U.shape[1]
self.integrator = ODERK4(self)
self.fix_Z = fix_Z
self.fix_sn = fix_sn
self.fix_U = fix_U
def f(self,X,t=[0]):
""" Implements GP interpolation to compute the value of the differential
function at location(s) X.
Args:
X: TxD tensor of input locations, T is the number of locations.
Returns:
TxD tensor of differential function (GP conditional) computed on
input locations
"""
U = self.U
Z = self.Z
kern = self.kern
N = tf.shape(X)[0]
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Z, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
f = tf.matmul(A, U, transpose_a=True)
# transformation for "id - rbf" kernel
if not kern.ktype == "id" and not kern.ktype == "kr" :
f = tf.reshape(f,[N,D])
return f
def build_prior(self):
if self.kern.ktype == "id" or self.kern.ktype == "kr":
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U[:,0]))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U[:,0]),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.add_n([mvn.log_prob(self.U[:,d]) for d in range(self.kern.ndims)])
else:
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.U))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.U),
covariance_matrix=self.kern.K(self.Z,self.Z))
probs = tf.reduce_sum(mvn.log_prob(tf.squeeze(self.U)))
return probs
def forward(self,x0,ts):
return self.integrator.forward(x0=x0,ts=ts)
def predict(self,x0,t):
""" Computes the integral and returns the path
Args:
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
ODE solution computed at t, tensor of size [len(t),len(x0)]
"""
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
integrator = ODERK4(self)
path = integrator.forward(x0,t)
path = path[0]
return path
def Kzz(self):
kern = self.kern
Z = self.Z
M = tf.shape(Z)[0]
D = tf.shape(Z)[1] # dim of state
if kern.ktype == "id":
Kzz = kern.K(Z) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Z) + tf.eye(M*D, dtype=float_type) * self.jitter
return Kzz
def U(self):
U = self.U
if self.whiten:
Lz = tf.cholesky(self.Kzz())
U = tf.matmul(Lz,U)
return U
def __str__(self):
rep = 'noise variance: ' + str(self.sn.eval()) + \
'\nsignal variance: ' + str(self.kern.sf.eval()) + \
'\nlengthscales: ' + str(self.kern.ell.eval())
return rep
class NPSDE(NPODE):
def __init__(self,Z0,U0,sn0,kern,diffus,s=1,jitter=jitter0,
summ=False,whiten=True,fix_Z=False,fix_U=False,fix_sn=False):
""" Constructor for the NPSDE model
Args:
Z0: Numpy matrix of initial inducing points of size MxD, M being the
number of inducing points.
U0: Numpy matrix of initial inducing vectors of size MxD, M being the
number of inducing points.
sn0: Numpy vector of size 1xD for initial signal variance
kern: Kernel object for GP interpolation
diffus: BrownianMotion object for diffusion GP interpolation
s: Integer parameterizing how denser the integration points are
jitter: Float of jitter level
summ: Boolean for Tensorflow summary
whiten: Boolean. Currently we perform the optimization only in the
white domain
fix_Z: Boolean - whether inducing locations are fixed or optimized
fix_U: Boolean - whether inducing vectors are fixed or optimized
fix_sn: Boolean - whether noise variance is fixed or optimized
"""
super().__init__(Z0,U0,sn0,kern,jitter=jitter,
summ=summ,whiten=whiten,fix_Z=fix_Z,fix_U=fix_U,fix_sn=fix_sn)
self.name = 'npsde'
self.diffus = diffus
self.integrator = SDEEM(self)
def build_prior(self):
pf = super().build_prior()
pg = self.diffus.build_prior()
return pf + pg
def g(self,ts,Nw=1):
return self.diffus.g(ts=ts,Nw=Nw)
def forward(self,x0,ts,Nw=1):
return self.integrator.forward(x0=x0,ts=ts,Nw=Nw)
def sample(self,x0,t,Nw):
""" Draws random samples from a learned SDE system
Args:
Nw: Integer number of samples
x0: Python/numpy array of initial value
t: Python/numpy array of time points the integral is evaluated at
Returns:
Tensor of size [Nw,len(t),len(x0)] storing samples
"""
# returns (Nw, len(t), D)
x0 = np.asarray(x0,dtype=np.float64).reshape((1,-1))
t = [t]
path = self.integrator.forward(x0,t,Nw)
path = path[0]
return path
def __str__(self):
return super().__str__() + self.diffus.__str__()
class BrownianMotion:
def __init__(self,sf0,ell0,Z0,U0,whiten=False,summ=False,
fix_ell=True,fix_sf=True,fix_Z=True,fix_U=False):
with tf.name_scope('Brownian'):
Zg = Param(Z0,
name = "Z",
summ = False,
fixed = fix_Z)
Ug = Param(U0,
name = "U",
summ = False,
fixed = fix_U)
self.kern = OperatorKernel(sf0=sf0,
ell0=ell0,
ktype="id",
name='Kernel',
summ=summ,
fix_ell=fix_ell,
fix_sf=fix_sf)
self.Zg = Zg()
self.Ug = Ug()
self.jitter = 1e-6
self.whiten = whiten
self.fix_Z = fix_Z
self.fix_U = fix_U
def g(self,X,t):
""" generates state dependent brownian motion
Args:
X: current states (in rows)
t: current time (used if diffusion depends on time)
Returns:
A tensor of the same shape as X
"""
Ug = self.Ug
Zg = self.Zg
kern = self.kern
if not kern.ktype == "id":
raise NotImplementedError()
M = tf.shape(Zg)[0]
D = tf.shape(X)[1]
if kern.ktype == "id":
Kzz = kern.K(Zg) + tf.eye(M, dtype=float_type) * self.jitter
else:
Kzz = kern.K(Zg) + tf.eye(M*D, dtype=float_type) * self.jitter
Lz = tf.cholesky(Kzz)
Kzx = kern.K(Zg, X)
A = tf.matrix_triangular_solve(Lz, Kzx, lower=True)
if not self.whiten:
A = tf.matrix_triangular_solve(tf.transpose(Lz), A, lower=False)
g = tf.matmul(A, Ug, transpose_a=True)
dw = tf.random_normal(tf.shape(X),dtype=float_type)
return g*dw
def __str__(self):
rep = '\ndiff signal variance: ' + str(self.kern.sf.eval()) + \
'\ndiff lengthscales: ' + str(self.kern.ell.eval())
return rep
def build_prior(self):
if self.whiten:
mvn = tfd.MultivariateNormalDiag(
loc=tf.zeros_like(self.Ug))
else:
mvn = tfd.MultivariateNormalFullCovariance(
loc=tf.zeros_like(self.Ug),
covariance_matrix=self.kern.K(self.Zg,self.Zg))
return tf.reduce_sum(mvn.log_prob(self.Ug)) | 2.28125 | 2 |
python/raft/Leader.py | chenzhaoplus/vraft | 23 | 12799860 | <gh_stars>10-100
import time
from random import randrange
import grequests
from NodeState import NodeState
from client import Client
from cluster import HEART_BEAT_INTERVAL, ELECTION_TIMEOUT_MAX
import logging
from monitor import send_state_update, send_heartbeat
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
class Leader(NodeState):
def __init__(self, candidate):
super(Leader, self).__init__(candidate.node)
self.current_term = candidate.current_term
self.commit_index = candidate.commit_index
self.last_applied_index = candidate.last_applied_index
self.entries = candidate.entries
self.stopped = False
self.followers = [peer for peer in self.cluster if peer != self.node]
self.election_timeout = float(randrange(ELECTION_TIMEOUT_MAX / 2, ELECTION_TIMEOUT_MAX))
def heartbeat(self):
while not self.stopped:
logging.info(f'{self} send heartbeat to followers')
logging.info('========================================================================')
send_heartbeat(self, HEART_BEAT_INTERVAL)
client = Client()
with client as session:
posts = [
grequests.post(f'http://{peer.uri}/raft/heartbeat', json=self.node, session=session)
for peer in self.followers
]
for response in grequests.map(posts, gtimeout=HEART_BEAT_INTERVAL):
if response is not None:
logging.info(f'{self} got heartbeat from follower: {response.json()}')
else:
logging.info(f'{self} got heartbeat from follower: None')
logging.info('========================================================================')
time.sleep(HEART_BEAT_INTERVAL)
def __repr__(self):
return f'{type(self).__name__, self.node.id, self.current_term}'
| 2.359375 | 2 |
getDOBBoilerData.py | dtom90/pygotham_boiler_miner | 0 | 12799861 | import urllib.request
from bs4 import BeautifulSoup
def getDOBBoilerData( boroNum, houseNum, houseStreet ):
url = requestToDOBUrl( boroNum, houseNum, houseStreet )
soup = urlToSoup( url )
if hasDOBData( soup ):
return extractDOBDataFromSoup( soup )
else:
return "Invalid Query"
def requestToDOBUrl( boroNum, houseNum, houseStreet ):
return ("http://a810-bisweb.nyc.gov/bisweb/PropertyProfileOverviewServlet" +
"?boro=" + str(boroNum) +
"&houseno=" + str(houseNum) +
"&street=" + houseStreet.replace(' ','+'))
def urlToSoup( url ):
"Takes in URL and returns a soup object of the contents."
webpage = urllib.request.urlopen( url )
soup = BeautifulSoup( webpage.read(), "html.parser" )
# soup.unicode
return soup
def hasDOBData( soup ):
"Checks to see whether DEP data exist for a given application number."
tables = soup.find_all("table")
return tables[1].get_text().find("NO RECORD") == -1
def extractDOBDataFromSoup( soup ):
"""
Takes in data structure from BeautifulSoup and parses for DOB Boiler Data.
We assume that the soup has been prescreened to ensure that data exist.
"""
allUrls = soup.find_all('a')
#get the url with the reference to the "BoilerComplianceQueryServlet".
#There should be exactly one such url.
for i in allUrls:
if i['href'].find("BoilerComplianceQueryServlet") != -1:
url = "http://a810-bisweb.nyc.gov/bisweb/" + i['href']
soup2 = urlToSoup(url)
boilerTables = soup2.find_all('table')
records = list()
for row in boilerTables[3].find_all('tr'): #grab the table with boiler data
records.append(row.get_text().strip('\n').split('\n'))
return records
| 3.515625 | 4 |
srunner/drl_code/scenario_utils/kinetics.py | liuyuqi123/scenario_runner | 0 | 12799862 | """
Some methods for kenetics.s
"""
import carla
import numpy as np
import math
def get_speed(vehicle):
"""
Get speed consider only 2D velocity.
"""
vel = vehicle.get_velocity()
return math.sqrt(vel.x ** 2 + vel.y ** 2) # + vel.z ** 2)
def set_vehicle_speed(vehicle, speed: float):
"""
Set vehicle to a target speed.
Velocity vector coincide vehicle x-axis.
:param:speed in m/s
"""
# set a initial speed for ego vehicle
transform = vehicle.get_transform()
# transform matrix from actor coord system to world system
trans_matrix = get_transform_matrix(transform) # actor2world
# target velocity in local coordinate system, in m/s
target_vel = np.array([[speed], [0.], [0.]])
# target velocity in world coordinate system
target_vel_world = np.dot(trans_matrix, target_vel)
target_vel_world = np.squeeze(target_vel_world)
# in carla.Vector3D
target_velocity = carla.Vector3D(
x=target_vel_world[0],
y=target_vel_world[1],
z=target_vel_world[2],
)
#
vehicle.set_target_velocity(target_velocity)
def angle_reg(angle):
"""
Regularize angle into certain bound.
default range is [-pi, pi]
"""
while True:
if -np.pi <= angle <= np.pi:
return angle
if angle < -np.pi:
angle += 2 * np.pi
else:
angle -= 2 * np.pi
def get_transform_matrix(transform: carla.Transform):
"""
Get and parse a transformation matrix by transform.
Matrix is from Actor coord system to the world coord system.
:param transform:
:return trans_matrix: transform matrix in ndarray
"""
# original trans matrix in list
_T = transform.get_matrix()
# transform matrix from Actor system to world system
trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return trans_matrix
def get_inverse_transform_matrix(transform: carla.Transform):
"""
Get inverse transform matrix from a transform class.
Inverse transform refers to from world coord system to actor coord system.
"""
_T = transform.get_inverse_matrix()
# transform matrix from Actor system to world system
inverse_trans_matrix = np.array([[_T[0][0], _T[0][1], _T[0][2]],
[_T[1][0], _T[1][1], _T[1][2]],
[_T[2][0], _T[2][1], _T[2][2]]])
return inverse_trans_matrix
def vector2array(vector: carla.Vector3D):
"""
Transform carla.Vector3D instance to ndarray
"""
array = np.array([vector.x, vector.y, vector.z])
return array
def get_vehicle_kinetic(vehicle: carla.Vehicle):
"""
todo unfinished
Get kinetics of ego vehicle.
todo use a class to encapsulate all methods about getting kinetics
"""
kinetic_dict = {}
transform = vehicle.get_transform()
vehicle.get_acceleration()
vehicle.get_angular_velocity()
def get_distance_along_route(wmap, route, target_location):
"""
Calculate the distance of the given location along the route
Note: If the location is not along the route, the route length will be returned
:param wmap: carla.Map of current world
:param route: list of tuples, (carla.Transform, RoadOption)
:param target_location:
"""
covered_distance = 0
prev_position = None
found = False
# Don't use the input location, use the corresponding wp as location
target_location_from_wp = wmap.get_waypoint(target_location).transform.location
for trans, _ in route:
# input route is transform
position = trans.location
location = target_location_from_wp
# Don't perform any calculations for the first route point
if not prev_position:
prev_position = position
continue
# Calculate distance between previous and current route point
interval_length_squared = ((prev_position.x - position.x) ** 2) + ((prev_position.y - position.y) ** 2)
distance_squared = ((location.x - prev_position.x) ** 2) + ((location.y - prev_position.y) ** 2)
# Close to the current position? Stop calculation
if distance_squared < 1.0:
break
if distance_squared < 400 and not distance_squared < interval_length_squared:
# Check if a neighbor lane is closer to the route
# Do this only in a close distance to correct route interval, otherwise the computation load is too high
starting_wp = wmap.get_waypoint(location)
wp = starting_wp.get_left_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_left_lane()
wp = starting_wp.get_right_lane()
while wp is not None:
new_location = wp.transform.location
new_distance_squared = ((new_location.x - prev_position.x) ** 2) + (
(new_location.y - prev_position.y) ** 2)
if np.sign(starting_wp.lane_id) != np.sign(wp.lane_id):
break
if new_distance_squared < distance_squared:
distance_squared = new_distance_squared
location = new_location
else:
break
wp = wp.get_right_lane()
if distance_squared < interval_length_squared:
# The location could be inside the current route interval, if route/lane ids match
# Note: This assumes a sufficiently small route interval
# An alternative is to compare orientations, however, this also does not work for
# long route intervals
curr_wp = wmap.get_waypoint(position)
prev_wp = wmap.get_waypoint(prev_position)
wp = wmap.get_waypoint(location)
if prev_wp and curr_wp and wp:
if wp.road_id == prev_wp.road_id or wp.road_id == curr_wp.road_id:
# Roads match, now compare the sign of the lane ids
if (np.sign(wp.lane_id) == np.sign(prev_wp.lane_id) or
np.sign(wp.lane_id) == np.sign(curr_wp.lane_id)):
# The location is within the current route interval
covered_distance += math.sqrt(distance_squared)
found = True
break
covered_distance += math.sqrt(interval_length_squared)
prev_position = position
return covered_distance, found
| 3.9375 | 4 |
src/oci/adm/models/application_dependency_vulnerability_summary.py | pabs3/oci-python-sdk | 0 | 12799863 | <reponame>pabs3/oci-python-sdk<filename>src/oci/adm/models/application_dependency_vulnerability_summary.py
# coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ApplicationDependencyVulnerabilitySummary(object):
"""
An Application Dependency Vulnerability represents a single dependency in our application.
An Application Dependency Vulnerability can be associated with eventual Vulnerabilities.
Each Application Dependency is uniquely defined by a nodeId and lists eventual dependencies that this element depends on.
"""
def __init__(self, **kwargs):
"""
Initializes a new ApplicationDependencyVulnerabilitySummary object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param gav:
The value to assign to the gav property of this ApplicationDependencyVulnerabilitySummary.
:type gav: str
:param node_id:
The value to assign to the node_id property of this ApplicationDependencyVulnerabilitySummary.
:type node_id: str
:param application_dependency_node_ids:
The value to assign to the application_dependency_node_ids property of this ApplicationDependencyVulnerabilitySummary.
:type application_dependency_node_ids: list[str]
:param vulnerabilities:
The value to assign to the vulnerabilities property of this ApplicationDependencyVulnerabilitySummary.
:type vulnerabilities: list[oci.adm.models.Vulnerability]
:param is_found_in_knowledge_base:
The value to assign to the is_found_in_knowledge_base property of this ApplicationDependencyVulnerabilitySummary.
:type is_found_in_knowledge_base: bool
"""
self.swagger_types = {
'gav': 'str',
'node_id': 'str',
'application_dependency_node_ids': 'list[str]',
'vulnerabilities': 'list[Vulnerability]',
'is_found_in_knowledge_base': 'bool'
}
self.attribute_map = {
'gav': 'gav',
'node_id': 'nodeId',
'application_dependency_node_ids': 'applicationDependencyNodeIds',
'vulnerabilities': 'vulnerabilities',
'is_found_in_knowledge_base': 'isFoundInKnowledgeBase'
}
self._gav = None
self._node_id = None
self._application_dependency_node_ids = None
self._vulnerabilities = None
self._is_found_in_knowledge_base = None
@property
def gav(self):
"""
**[Required]** Gets the gav of this ApplicationDependencyVulnerabilitySummary.
Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version).
:return: The gav of this ApplicationDependencyVulnerabilitySummary.
:rtype: str
"""
return self._gav
@gav.setter
def gav(self, gav):
"""
Sets the gav of this ApplicationDependencyVulnerabilitySummary.
Unique Group Artifact Version (GAV) identifier (Group:Artifact:Version).
:param gav: The gav of this ApplicationDependencyVulnerabilitySummary.
:type: str
"""
self._gav = gav
@property
def node_id(self):
"""
**[Required]** Gets the node_id of this ApplicationDependencyVulnerabilitySummary.
Unique identifier of an Application Dependency node.
:return: The node_id of this ApplicationDependencyVulnerabilitySummary.
:rtype: str
"""
return self._node_id
@node_id.setter
def node_id(self, node_id):
"""
Sets the node_id of this ApplicationDependencyVulnerabilitySummary.
Unique identifier of an Application Dependency node.
:param node_id: The node_id of this ApplicationDependencyVulnerabilitySummary.
:type: str
"""
self._node_id = node_id
@property
def application_dependency_node_ids(self):
"""
**[Required]** Gets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.
List of (Application Dependencies) node identifiers on which this node depends.
:return: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.
:rtype: list[str]
"""
return self._application_dependency_node_ids
@application_dependency_node_ids.setter
def application_dependency_node_ids(self, application_dependency_node_ids):
"""
Sets the application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.
List of (Application Dependencies) node identifiers on which this node depends.
:param application_dependency_node_ids: The application_dependency_node_ids of this ApplicationDependencyVulnerabilitySummary.
:type: list[str]
"""
self._application_dependency_node_ids = application_dependency_node_ids
@property
def vulnerabilities(self):
"""
**[Required]** Gets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary.
List of vulnerabilities for the Application Dependency.
:return: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary.
:rtype: list[oci.adm.models.Vulnerability]
"""
return self._vulnerabilities
@vulnerabilities.setter
def vulnerabilities(self, vulnerabilities):
"""
Sets the vulnerabilities of this ApplicationDependencyVulnerabilitySummary.
List of vulnerabilities for the Application Dependency.
:param vulnerabilities: The vulnerabilities of this ApplicationDependencyVulnerabilitySummary.
:type: list[oci.adm.models.Vulnerability]
"""
self._vulnerabilities = vulnerabilities
@property
def is_found_in_knowledge_base(self):
"""
**[Required]** Gets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.
Indicates if the artifact is found in the knowledge base.
:return: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.
:rtype: bool
"""
return self._is_found_in_knowledge_base
@is_found_in_knowledge_base.setter
def is_found_in_knowledge_base(self, is_found_in_knowledge_base):
"""
Sets the is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.
Indicates if the artifact is found in the knowledge base.
:param is_found_in_knowledge_base: The is_found_in_knowledge_base of this ApplicationDependencyVulnerabilitySummary.
:type: bool
"""
self._is_found_in_knowledge_base = is_found_in_knowledge_base
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 2.25 | 2 |
scripts/typing-summary.py | AlexWaygood/typing | 1,145 | 12799864 | <gh_stars>1000+
#!/usr/bin/env python3
"""
Generate a summary of last week's issues tagged with "topic: feature".
The summary will include a list of new and changed issues and is sent each
Monday at 0200 CE(S)T to the typing-sig mailing list. Due to limitation
with GitHub Actions, the mail is sent from a private server, currently
maintained by @srittau.
"""
from __future__ import annotations
import datetime
from dataclasses import dataclass
from typing import Any, Iterable, Sequence
import requests
ISSUES_API_URL = "https://api.github.com/repos/python/typing/issues"
ISSUES_URL = "https://github.com/python/typing/issues?q=label%3A%22topic%3A+feature%22"
ISSUES_LABEL = "topic: feature"
SENDER_EMAIL = "Typing Bot <<EMAIL>>"
RECEIVER_EMAIL = "<EMAIL>"
@dataclass
class Issue:
number: int
title: str
url: str
created: datetime.datetime
user: str
pull_request: bool = False
def main() -> None:
since = previous_week_start()
issues = fetch_issues(since)
new, updated = split_issues(issues, since)
print_summary(since, new, updated)
def previous_week_start() -> datetime.date:
today = datetime.date.today()
return today - datetime.timedelta(days=today.weekday() + 7)
def fetch_issues(since: datetime.date) -> list[Issue]:
"""Return (new, updated) issues."""
j = requests.get(
ISSUES_API_URL,
params={
"labels": ISSUES_LABEL,
"since": f"{since:%Y-%m-%d}T00:00:00Z",
"per_page": "100",
"state": "open",
},
headers={"Accept": "application/vnd.github.v3+json"},
).json()
assert isinstance(j, list)
return [parse_issue(j_i) for j_i in j]
def parse_issue(j: Any) -> Issue:
number = j["number"]
title = j["title"]
url = j["html_url"]
created_at = datetime.datetime.fromisoformat(j["created_at"][:-1])
user = j["user"]["login"]
pull_request = "pull_request" in j
assert isinstance(number, int)
assert isinstance(title, str)
assert isinstance(url, str)
assert isinstance(user, str)
return Issue(number, title, url, created_at, user, pull_request)
def split_issues(
issues: Iterable[Issue], since: datetime.date
) -> tuple[list[Issue], list[Issue]]:
new = []
updated = []
for issue in issues:
if issue.created.date() >= since:
new.append(issue)
else:
updated.append(issue)
new.sort(key=lambda i: i.number)
updated.sort(key=lambda i: i.number)
return new, updated
def print_summary(
since: datetime.date, new: Sequence[Issue], changed: Sequence[Issue]
) -> None:
print(f"From: {SENDER_EMAIL}")
print(f"To: {RECEIVER_EMAIL}")
print(f"Subject: Opened and changed typing issues week {since:%G-W%V}")
print()
print(generate_mail(new, changed))
def generate_mail(new: Sequence[Issue], changed: Sequence[Issue]) -> str:
if len(new) == 0 and len(changed) == 0:
s = (
"No issues or pull requests with the label 'topic: feature' were opened\n"
"or updated last week in the typing repository on GitHub.\n\n"
)
else:
s = (
"The following is an overview of all issues and pull requests in the\n"
"typing repository on GitHub with the label 'topic: feature'\n"
"that were opened or updated last week, excluding closed issues.\n\n"
"---------------------------------------------------\n\n"
)
if len(new) > 0:
s += "The following issues and pull requests were opened last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in new)
s += "\n---------------------------------------------------\n\n"
if len(changed) > 0:
s += "The following issues and pull requests were updated last week: \n\n"
s += "".join(generate_issue_text(issue) for issue in changed)
s += "\n---------------------------------------------------\n\n"
s += (
"All issues and pull requests with the label 'topic: feature'\n"
"can be viewed under the following URL:\n\n"
)
s += ISSUES_URL
return s
def generate_issue_text(issue: Issue) -> str:
s = f"#{issue.number:<5} "
if issue.pull_request:
s += "[PR] "
s += f"{issue.title}\n"
s += f" opened by @{issue.user}\n"
s += f" {issue.url}\n"
return s
if __name__ == "__main__":
main()
| 2.84375 | 3 |
pi/ch08_rfid_read/SimpleMFRC522.py | simonmonk/hacking2 | 10 | 12799865 | import MFRC522
import RPi.GPIO as GPIO
class SimpleMFRC522:
READER = None;
TAG = { 'id' : None, 'text' : ''};
KEY = [0xFF,0xFF,0xFF,0xFF,0xFF,0xFF]
def __init__(self):
self.READER = MFRC522.MFRC522()
def read(self):
tag = self.read_no_block()
while not tag:
tag = self.read_no_block()
return tag
def read_no_block(self):
(status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)
if status != self.READER.MI_OK:
return None
(status, uid) = self.READER.MFRC522_Anticoll()
if status != self.READER.MI_OK:
return None
self.TAG['id'] = self.uid_to_num(uid)
self.READER.MFRC522_SelectTag(uid)
status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid)
if status == self.READER.MI_OK:
text = self.READER.MFRC522_Read(8)
if text:
self.TAG['text'] = ''.join(chr(i) for i in text)
self.READER.MFRC522_StopCrypto1()
return self.TAG
def write(self, sector, text):
tag = self.write_no_block(8, text)
while not tag:
tag = self.write_no_block(8, text)
return tag
def write_no_block(self, sector, text):
(status, TagType) = self.READER.MFRC522_Request(self.READER.PICC_REQIDL)
if status != self.READER.MI_OK:
return None
(status, uid) = self.READER.MFRC522_Anticoll()
if status != self.READER.MI_OK:
return None
self.TAG['id'] = self.uid_to_num(uid)
self.READER.MFRC522_SelectTag(uid)
status = self.READER.MFRC522_Auth(self.READER.PICC_AUTHENT1A, 8, self.KEY, uid)
self.READER.MFRC522_Read(8)
if status == self.READER.MI_OK:
data = bytearray()
data.extend(text.ljust(16))
self.READER.MFRC522_Write(8, data)
text = self.READER.MFRC522_Read(8)
if text:
self.TAG['text'] = ''.join(chr(i) for i in text)
self.READER.MFRC522_StopCrypto1()
return self.TAG
def uid_to_num(self, uid):
n = 0
for i in range(0, 5):
n = n * 256 + uid[i]
return n
| 2.8125 | 3 |
bigcode-fetcher/bigcode_fetcher/downloader.py | sourcery-ai-bot/bigcode-tools | 6 | 12799866 | <gh_stars>1-10
import os.path as path
from concurrent.futures import ThreadPoolExecutor
import subprocess
import logging
import json
from bigcode_fetcher.project import Project
def download_git_project(project, output_dir, full_fetch=False):
command = ["git", "clone"]
if not full_fetch:
command += ["--depth", "1"]
command += [project.clone_url, output_dir]
subprocess.run(command)
def download_project(project, output_base_dir, full_fetch=False):
try:
output_dir = path.join(output_base_dir, project.full_name)
if path.isdir(output_dir):
logging.info("%s already exists", project.full_name)
return False
download_git_project(project, output_dir, full_fetch=full_fetch)
return True
except Exception as e: # pylint: disable=broad-except
logging.warning("could not download %s: %s", project.full_name, e)
def download_projects(projects, output_dir, full_fetch=False):
with ThreadPoolExecutor() as executor:
executor.map(lambda p: download_project(p, output_dir, full_fetch=full_fetch), projects)
def load_projects_from_file(input_file):
with open(input_file, "r") as f:
return [Project(project) for project in json.load(f)]
def download_projects_command(args):
projects = load_projects_from_file(args.input_file)
download_projects(projects, args.output_dir)
| 2.515625 | 3 |
main_v2.py | armsp/covid19-vis | 5 | 12799867 | import os
import glob
import json
import logging as lg
from pathlib import Path
from datetime import date, datetime
import yaml
import requests
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.dates import date2num, DateFormatter
import matplotlib.transforms as transforms
from jinja2 import Environment, FileSystemLoader
from jhu_handler import melt_data, get_jhu_stats, get_india_stats_from_jhu
from mohfw_handler import mohfw_data_to_df, add_lat_lon, get_mohfw_stats, extract_clean_df
from chloropleth import make_chloropleth_json
from clean import add_clean_state_data
lg.basicConfig(level=lg.DEBUG, format=("[%(asctime)s] [%(levelname)8s] %(filename)s - %(message)s"), datefmt="%d-%b-%Y %I:%M:%S %p")#, filename='log.txt', filemode='a+'
template_loader = FileSystemLoader('./templates')
template_env = Environment(loader=template_loader)
TEMPLATE = "template.html"
template = template_env.get_template(TEMPLATE)
sns.set(style="ticks")
sns.set_context("paper", rc={"font.size":8,"axes.titlesize":9,"axes.labelsize":10,"lines.linewidth": 1.5,'lines.markersize':3})#paper,talk,notebook
fig, ax = plt.subplots()
covid_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_time_series')
cases_path = os.path.join(covid_data_path, 'time_series_covid19_confirmed_global.csv')
recoveries_path = os.path.join(covid_data_path, 'time_series_covid19_recovered_global.csv')
deaths_path = os.path.join(covid_data_path, 'time_series_covid19_deaths_global.csv')
Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'timeseries_records')).mkdir(parents=True, exist_ok=True)
Path(os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'statewise_distribution')).mkdir(parents=True, exist_ok=True)
mohfw_data_df = mohfw_data_to_df()
table_df = extract_clean_df(mohfw_data_df)
table_df = add_lat_lon(table_df)
#print("Table DF")
#print(table_df)
if not table_df.empty:
table_df.to_csv(f'./datasets/statewise_distribution/{str(date.today())}.csv', sep=',', encoding='utf-8', index=False)
else:
lg.warning("Failed to write statewise distribution file. Map will use old file even though new data is available")
in_cases_df, in_recoveries_df, in_deaths_df = get_india_stats_from_jhu(cases_path, recoveries_path, deaths_path)
# Transforming data to a format lineplot likes
final_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)
final_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
## Using data that is larger
live_cases = in_cases_df
live_recoveries = in_recoveries_df
live_deaths = in_deaths_df
date_today_str = date.today().strftime("%-m/%-d/%y")
print(f"Today's date is = {date_today_str}")
date_today = date.today()
print(date_today)
#check date in index
live_cases_latest_date = live_cases.columns[-1]
live_recoveries_latest_date = live_recoveries.columns[-1]
live_deaths_latest_date = live_deaths.columns[-1]
#get today's stats from mohfw
mohfw_stats = get_mohfw_stats(table_df)
print(mohfw_stats)
#compare dates
live_cases_latest_date = datetime.strptime(live_cases_latest_date, "%m/%d/%y").date()
live_recoveries_latest_date = datetime.strptime(live_recoveries_latest_date, "%m/%d/%y").date()
live_deaths_latest_date = datetime.strptime(live_deaths_latest_date, "%m/%d/%y").date()
print(live_cases_latest_date, live_recoveries_latest_date, live_deaths_latest_date)
if date_today > live_cases_latest_date:
if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):
print(mohfw_stats['in_stats']['cases'], int(live_cases.iloc[:,-1:].iloc[0]))
live_cases[date_today_str] = mohfw_stats['in_stats']['cases']# new column in live with mohfw value
elif date_today == live_cases_latest_date:
if mohfw_stats['in_stats']['cases'] > int(live_cases.iloc[:,-1:].iloc[0]):
live_cases.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['cases']
if date_today > live_recoveries_latest_date:
print(mohfw_stats['in_stats']['recovered'], int(live_recoveries.iloc[:,-1:].iloc[0]))
if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):
live_recoveries[date_today_str] = mohfw_stats['in_stats']['recovered']
elif date_today == live_recoveries_latest_date:
if mohfw_stats['in_stats']['recovered'] > int(live_recoveries.iloc[:,-1:].iloc[0]):
live_recoveries.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['recovered']
if date_today > live_deaths_latest_date:
if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):
live_deaths[date_today_str] = mohfw_stats['in_stats']['deaths']
elif date_today == live_deaths_latest_date:
if mohfw_stats['in_stats']['deaths'] > int(live_deaths.iloc[:,-1:].iloc[0]):
live_deaths.iloc[:,-1:].iloc[0] = mohfw_stats['in_stats']['deaths']
print(live_cases)
print(live_deaths)
print(live_recoveries)
plot_df = melt_data(live_cases, live_deaths, live_recoveries)
#plot_df['index'] = plot_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))
plot_df.to_csv(f'./datasets/timeseries_records/live_cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
jhu_df = melt_data(in_cases_df, in_deaths_df, in_recoveries_df)
#jhu_df['index'] = jhu_df['index'].apply(lambda x: datetime.strptime(x, '%m/%d/%y'))
jhu_df.to_csv(f'./datasets/timeseries_records/cases_deaths_recoveries_timeseries.csv', sep=',', encoding='utf-8', index=False)
# Make plot
ax = plt.axes()
kwargs = {'markeredgewidth': 0.25}
sns.lineplot(x='index', y='value', hue='category', hue_order=['cases', 'recoveries', 'deaths'], style='category', palette={'cases': 'Red', 'recoveries': 'Green', 'deaths': 'Gray'}, dashes=False, data=plot_df, markers={'deaths': 'X', 'recoveries': 'd', 'cases': 'o'}, ax=ax, **kwargs)
# Draw horizontal lines at max values
cases_max = int(plot_df['value'].where(plot_df['category'] == 'cases').max())
deaths_max = int(plot_df['value'].where(plot_df['category'] == 'deaths').max())
recoveries_max = int(plot_df['value'].where(plot_df['category'] == 'recoveries').max())
ax.axhline(cases_max, ls='dotted', linewidth=0.5)
ax.axhline(deaths_max, ls='dotted', linewidth=0.5)
ax.axhline(recoveries_max, ls='dotted', linewidth=0.5)
#'-', '--', '-.', ':', 'None', ' ', '', 'solid', 'dashed', 'dashdot', 'dotted'
plt.title('COVID-19 Cases, Recoveries & Deaths Graph')
ax.set(xlabel='Time ->', ylabel='Cases / Deaths')
ax.xaxis.label.set_visible(False)
ax.yaxis.label.set_visible(False)
ax.legend(labels=['Confirmed Cases', 'Recoveries', 'Deaths'], frameon=False)#loc='upper left'
myFmt = DateFormatter("%d %b") #myFmt = DateFormatter("%d %b %y")
ax.xaxis.set_major_formatter(myFmt)
#ax.set(xticks=final_df['index'].values)
ax.grid(color='#f3f3f3', linestyle=':', linewidth=0.5)##cdcdcd #f3f3f3 #D3D3D3
ratio = 0.5
ax.set_aspect(1.0/ax.get_data_ratio()*ratio)
plt.xticks(fontsize=5, rotation=0)#, ha='right')
#plt.yticks(fontsize=6)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
#ax.spines['left'].set_edgecolor('gray')
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
ax.tick_params(axis="x", direction='in', length=3, width=0.5)
ax.get_yaxis().set_visible(False)
ax.spines['bottom'].set_linewidth(0.5)
ax.spines['left'].set_linewidth(0.5)
#trans = transforms.blended_transform_factory(ax.get_yticklabels()[0].get_transform(), ax.transData)
#ax.text(0, cases_max, color="red", s=cases_max, transform=trans, ha="right", va="center")
#ax.text(0, deaths_max, color="red", s=deaths_max, transform=trans, ha="right", va="center")
ax.text(0.01, cases_max, cases_max, color="red", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
ax.text(0.01, deaths_max, deaths_max, color="red", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
ax.text(0.01, recoveries_max, recoveries_max, color="green", transform=ax.get_yaxis_transform(), ha="left", va="bottom")
#ax.annotate(cases_max, [ax.get_xticks()[-1], cases_max], va='bottom', ha='right', color='red')
#ax.annotate(deaths_max, [ax.get_xticks()[-1], deaths_max], va='bottom', ha='left', color='red')
xt = ax.get_xticks().tolist()
last_x_tick = date2num(plot_df['index'].values[-1])
if xt[-1] > last_x_tick:
xt.pop(-1)
else:
if abs(xt[-1] - last_x_tick) < (xt[1] - xt[0])/2:
xt.pop(-1)
#xt = np.append(xt, last_x_tick)
xt.append(last_x_tick)
#xtl = xt.tolist()
ax.set_xticks(xt)
ax.axvline(last_x_tick, ls='dotted', linewidth=0.5)
plt.savefig("graph.svg", format='svg', dpi=1200, bbox_inches='tight')
#plt.show()
# Make index.html
# accquire latest statistics
covid_daily_reports_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid-data', 'csse_covid_19_data', 'csse_covid_19_daily_reports')
jhu_stats = get_jhu_stats(covid_daily_reports_path)
#Compare JHU Stats with MoHFW stats for india
if mohfw_stats['in_stats']['cases'] > jhu_stats['in_stats']['cases']:
in_cases_greater = mohfw_stats['in_stats']['cases']
else:
in_cases_greater = jhu_stats['in_stats']['cases']
if mohfw_stats['in_stats']['deaths'] > jhu_stats['in_stats']['deaths']:
in_deaths_greater = mohfw_stats['in_stats']['deaths']
else:
in_deaths_greater = jhu_stats['in_stats']['deaths']
if mohfw_stats['in_stats']['recovered'] > jhu_stats['in_stats']['recovered']:
in_recovered_greater = mohfw_stats['in_stats']['recovered']
else:
in_recovered_greater = jhu_stats['in_stats']['recovered']
#world stats
w_confirmed = jhu_stats['w_stats']['cases']
w_deaths = jhu_stats['w_stats']['deaths']
w_recovered = jhu_stats['w_stats']['recovered']
## read resource yaml
with open('resources.yaml') as fs:
resources = yaml.load(fs, yaml.SafeLoader)
# add clean datasets
state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets')
print("adding clean datasets")
add_clean_state_data(state_data_path)
#clean_state_data_path = os.path.join(os.environ['GITHUB_WORKSPACE'], 'covid19-in', 'datasets', 'clean_daily_statewise_distribution')
#map_json = make_chloropleth_json(clean_state_data_path)
# Get ready to pass data to template
stats_dict = {'w_cases': w_confirmed, 'w_deaths': w_deaths, 'w_recovered': w_recovered, 'i_cases': in_cases_greater, 'i_deaths': in_deaths_greater , 'i_recovered': in_recovered_greater}
commit_info_dict = {'current_time': datetime.now().strftime("%B %d, %Y at %I:%M %p"), 'commit_sha': os.environ['GITHUB_SHA']}
state_info = {'link': f"https://github.com/armsp/covid19.in/blob/master/datasets/statewise_distribution/{str(date.today())}.csv"}
namespace = {'statistics': stats_dict, 'safety_resources': resources['SAFETY & PREVENTION'], 'about': resources['Virus & the Disease'], 'fakes': resources['Fads, Fake News & Scams'], 'misc': resources['Miscellaneous'], 'commit_info': commit_info_dict, 'state_info': state_info}
#,'c_map': map_json
rendered_html = template.render(**namespace)
with open("index.html", "w+") as f:
f.write(rendered_html) | 1.929688 | 2 |
src/congram.py | marvintau/congram | 0 | 12799868 | <filename>src/congram.py
# -*- coding: utf-8 -*-
import os
import sys
import itertools
import numpy as np
from color_schemes import color_func
def get_term_size():
rows, columns = os.popen('stty size', 'r').read().split()
return int(rows), int(columns)
class Pos:
def __init__(self, row, col):
self.row = row
self.col = col
def __add__(self, pos):
return Pos(self.row + pos.row, self.col + pos.col)
def __mul__(self, pos_time):
if type(pos_time) is tuple:
return Pos(self.row * pos_time[0], self.col * pos_time[1])
return Pos(self.row * pos_time.row, self.col * pos_time.row)
def __str__(self):
return "{%d, %d}" % (self.row, self.col)
class Color:
def __init__(self, r, g, b):
self.r = int(r)
self.g = int(g)
self.b = int(b)
def __add__(self, inc):
if type(inc) == inc and len(inc) == 3:
return Color(self.r + inc[0], self.g + inc[1], self.b + inc[2])
elif type(inc) == Color:
return Color(self.r + inc.r, self.g + inc.g, self.b + inc.b)
elif type(inc) == int:
return Color(self.r + inc, self.g + inc, self.b + inc)
else:
raise TypeError("operand type must be either 3-tuple or Color")
def __mul__(self, inc):
if type(inc) == tuple and len(inc) == 3:
return Color(self.r * inc[0], self.g * inc[1], self.b * inc[2])
elif type(inc) == float:
return Color(int(self.r * inc), int(self.g * inc), int(self.b * inc))
else:
raise TypeError("operand type must be either 3-tuple or int")
def __str__(self):
return "{%d, %d, %d}" % (self.r, self.g, self.b)
class CharColor:
def __init__(self, fore, back=None):
if type(fore) == tuple and len(fore) == 3:
self.fore = Color(*fore)
else:
self.fore = fore
if back == None :
self.back = Color(0,0,0)
elif type(back) == tuple and len(back) == 3:
self.back = Color(*back)
else:
self.back = back
def __add__(self, inc):
if type(inc) == tuple:
if len(inc) == 2:
return CharColor(self.fore + inc[0], self.back + inc[1])
elif len(inc) == 3:
return CharColor(self.fore + inc, self.back + inc)
else:
raise TypeError("operand type must be either 3-tuple or 2-tuple")
elif type(inc) is int:
return CharColor(self.fore + inc, self.back + inc)
else:
raise TypeError("operand type must be tuple")
def __mul__(self, inc):
if type(inc) == tuple:
if len(inc) == 2:
return CharColor(self.fore * inc[0], self.back * inc[1])
elif len(inc) == 3:
return CharColor(self.fore * inc, self.back * inc)
else:
raise TypeError("operand type must be either 3-tuple or 2-tuple")
elif type(inc) is float:
return CharColor(self.fore * inc, self.back * inc)
else:
raise TypeError("operand type must be tuple")
def __str__(self):
return str(self.fore) + " " + str(self.back)
class Rect:
"""
Rect: Draw a rectangle area with given fore/back color and text content
records position, size, color and content only.
"""
def __init__(self, pos, color, text):
self.pos = pos
self.color = color
self.text = text
class Canvas:
rows, cols = get_term_size()
# graphic elements hold by Canvas.
elems = []
# for successively adding elements
current_line = 0
def add_text(self, text, color, anchor=None):
if anchor is None:
anchor = Pos(self.current_line, (self.cols - len(text)) / 2)
color = color * (0.5, 1.)
self.add_empty_line(anchor)
self.elems.append(Rect(anchor, color, text))
self.current_line += 2
def add_empty_line(self, pos):
self.elems.append(Rect(Pos(pos.row, 0), CharColor((0,0,0)), " "*self.cols))
def add_frame(self, size, anchor,
sides=("left", "right", "top", "bottom"),
x_tick_range=None,
y_tick_range=None,
x_rep=None,
y_rep=None,
x_off=None,
y_off=None):
color = CharColor((255, 255, 255))
for l in range(size.row+1):
self.add_empty_line(Pos(l, 0) + anchor)
tick_char = u"│"
if x_off is not None and x_rep is not None:
if (l + x_off) % x_rep == 0:
tick_char = u"├"
if "left" in sides:
self.elems.append(Rect(Pos(l, 0)+anchor, color, tick_char))
if "right" in sides:
self.elems.append(Rect(Pos(l, size.col)+anchor, color, u"│"))
for l in range(1,size.col):
tick_char = u"─"
if y_off is not None and y_rep is not None:
if (l + y_off) % y_rep == 0:
tick_char = u"┴"
if "top" in sides:
self.elems.append(Rect(Pos(0, l)+anchor, color, u"─"))
if "bottom" in sides:
self.elems.append(Rect(Pos(size.row, l)+anchor, color, tick_char))
self.elems.append(Rect(anchor, color, u"┌"))
self.elems.append(Rect(anchor+Pos(size.row, 0), color, u"└"))
self.elems.append(Rect(anchor+Pos(size.row, size.col), color, u"┘"))
self.elems.append(Rect(anchor+Pos(0, size.col), color, u"┐"))
def add_grid(self, table, color_func, anchor=None):
cell_size = 0
min_cell = min([min(c) for c in table])
max_cell = max([max(c) for c in table])
# Get reformed string and calculate max length
for row in table:
for cell in row:
try:
new_cell = "%1.2f" % cell
except TypeError:
new_cell = cell
if cell_size < len(new_cell):
cell_size = len(new_cell)
cell_size += 2
if anchor is None:
anchor = Pos(self.current_line, (self.cols - len(table[0]) * cell_size - 7) / 3)
self.add_frame(Pos(len(table)*3+3, len(table[0])*cell_size+5), anchor,
x_rep=3, x_off=0, y_rep=cell_size, y_off=0)
def add_cell(cell, anchor, pos, isBlank=False):
pos = pos * (1, cell_size) + anchor + Pos(0, 2)
back = color_func((cell-min_cell)/(max_cell-min_cell))
color = CharColor(back, back) * (1., 0.5)
cell = "" if isBlank else "%1.2f" % cell if type(cell) is float else cell
cell = cell.rjust(cell_size - 2)
self.elems.append(Rect(pos, color, " " + cell + " "))
# Add each cell into element table
# and calculates the max cell length
cell_anchor = anchor + Pos(2, 1)
for [row_num, row] in enumerate(table):
for [col_num, cell] in enumerate(row):
add_cell(cell, cell_anchor, Pos(row_num*3+0, col_num), True)
add_cell(cell, cell_anchor, Pos(row_num*3+1, col_num))
add_cell(cell, cell_anchor, Pos(row_num*3+2, col_num), True)
# Add a thermometer on the right side
thermo_left = len(table[0]) * cell_size + 10
for line in range(1, len(table) * 3 + 6):
pos = Pos(line, thermo_left) + anchor
back = color_func(1.0 - 1.0 * line / (len(table) * 3+3))
color = CharColor(Color(0, 0, 0), back)
self.elems.append(Rect(pos, color, " "))
self.current_line += len(table)*3 + 4
def add_hist(self, hist, color_func, anchor=None):
max_val = max(hist[0])
height = 30
bar_width = 5
if anchor is None:
anchor = Pos(self.current_line, (self.cols - len(hist[0]) * bar_width) / 2)
self.add_frame(Pos(height + 3, len(hist[0])*bar_width + 5), anchor,
x_rep=3, x_off=0, y_rep=bar_width, y_off=0)
hist_anchor = anchor + Pos(2, 3)
for line in range(height):
for ith, val in enumerate(hist[0]):
pos = Pos(line, ith*bar_width) + hist_anchor
if height * (1 - val/max_val) < line:
color = color_func(val/max_val)
self.elems.append(Rect(pos, CharColor(color, color*2), " "))
self.current_line += 30
def render_line(self, line_num, is_reset=False):
"""
render elements in single line
"""
# Find all elements to be rendered in current line
elems_inline = [elem for elem in self.elems if elem.pos.row == line_num]
visible_parts = []
def visible_check((A_left, A_right, _), (B_left, B_right, B_id)):
# compare the left/right bound of new element with each
# existing bound.
A_left_shaded = A_left <= B_left
A_right_shaded = A_right >= B_right
A_left_dodged = A_right < B_left
A_right_dodged = A_left > B_right
# Four cases of shading:
# 1. dodged: the fore and back element doesn't overlap
# 2. shaded: fore element shaded at left or right bound of back
# element.
# 3. split: fore element splits back element into two visible
# parts.
if A_left_dodged or A_right_dodged: # dodged
return ((B_left, B_right, B_id),)
elif not (A_left_shaded or A_right_shaded): # splitted
return ((B_left, A_left, B_id),(A_right, B_right, B_id))
elif (A_left_shaded and A_right_shaded): # fully shaded
return []
else: # partially shaded
if A_left_shaded:
return ((A_right, B_right, B_id),)
if A_right_shaded:
return ((B_left, A_left, B_id),)
for elem_i, elem in enumerate(elems_inline):
elem_bound = (elem.pos.col, elem.pos.col + len(elem.text), elem_i)
for i, part in enumerate(visible_parts):
visible_parts[i] = visible_check(elem_bound, part)
visible_parts.append((elem_bound,))
# list flatten operation by itertools.chain flatten both list and
# tuple (and all iterables), thus we have to coat it with one more
# tuple in order to maintain the form.
visible_parts = list(itertools.chain.from_iterable(visible_parts))
visible_parts = sorted(visible_parts, key=lambda x:x[0])
# handles if no elements in this line
strokes = "" if visible_parts == [] else " " * visible_parts[0][0]
COLOR_RESET = '\x01\x1b[0m\x02'
for part in visible_parts:
elem = elems_inline[part[2]]
color = elem.color
text = elem.text[part[0] - elem.pos.col : part[1] - elem.pos.col]
strokes += self.stroke(text, color)
strokes += COLOR_RESET if is_reset else ""
sys.stdout.write(strokes + COLOR_RESET)
sys.stdout.write("\n")
def render(self, is_reset=False):
sys.stdout.flush()
sys.stdout.write("\n")
for line in range(self.rows):
self.render_line(line, is_reset)
def stroke(self, text, c):
COLOR_FORE = 38
COLOR_BACK = 48
color_seq = '\x01\x1b[{z};2;{r};{g};{b}m\x02'
fore = color_seq.format(z=COLOR_FORE, r=c.fore.r, g=c.fore.g, b=c.fore.b)
back = color_seq.format(z=COLOR_BACK, r=c.back.r, g=c.back.g, b=c.back.b)
return fore+back+text
if __name__ == "__main__":
c = Canvas()
grid = np.random.random_sample(((7, 10)))
hist = np.random.random_sample(((15, 1)))
c.add_text("This is a heatmap example", CharColor(color_func["Plum"](0.9)))
c.add_grid(grid.tolist(), color_func["Plum"])
c.add_text("This is a histogram example", CharColor(color_func["BlueGreenYellow"](0.9)))
#c.add_hist(grid.tolist(), color_func["BlueGreenYellow"])
c.render(True)
| 3.09375 | 3 |
webui/chart.py | twmarshall/tbd | 0 | 12799869 | <reponame>twmarshall/tbd
import matplotlib
# prevents pyplot from trying to connect to x windowing
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
webui_root = "webui/"
points = []
for i in range(1, len(sys.argv)):
points.append(sys.argv[i])
plt.plot(points)
plt.ylabel('some numbers')
plt.savefig(webui_root + "tasks.png")
| 2.421875 | 2 |
5-1.py | xeno14/advent_of_code2018 | 0 | 12799870 | with open("input/5.txt") as f:
#with open("input/5.test") as f:
poly = f.read().strip()
def is_reactable(x,y):
return x.lower()==y.lower() and x.islower() != y.islower()
assert(not is_reactable("a", "a"))
assert(not is_reactable("a", "B"))
assert(is_reactable("a", "A"))
print(len(poly))
result = ""
for p in poly:
if len(result)==0:
result += p
continue
q = result[-1] # tail
if is_reactable(p, q):
result = result[:-1] # remove tail
else:
result += p
print(len(result))
| 3.53125 | 4 |
luizalabs/core/migrations/0011_auto_20190911_0550.py | LucasSRocha/django_rest_llabs | 0 | 12799871 | <gh_stars>0
# Generated by Django 2.1.12 on 2019-09-11 05:50
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20190910_1022'),
]
operations = [
migrations.AlterUniqueTogether(
name='product',
unique_together={('wishlist', 'product_id')},
),
]
| 1.375 | 1 |
scripts/scraper.py | brainglobe/brainglobe-web | 1 | 12799872 | <reponame>brainglobe/brainglobe-web<filename>scripts/scraper.py
from loguru import logger
from rich import print
from rich.table import Table
from mdutils.mdutils import MdUtils
import semanticscholar as sch
from myterial import pink, blue_light
'''
Searches google scholar for papers using brainglobe's tools
'''
AUTHORS = (
'34308754', # <NAME>
'3853277', # <NAME>
'8668066', # <NAME>
)
KEYWORDS = ('brainglobe', 'brainrender', 'cellfinder', 'brainreg')
def fetch_citations():
'''
Fetches citations semantic scholar, for each author in the list
get all publications and only keep the ones relevant for brainglobe.
Then, use these publications to find papers citing them
'''
citations = []
brainglobe_papers = dict(
id = [],
year = [],
title = [],
authors = [],
link=[],
)
citing_brainglobe = dict(
id = [],
year = [],
title = [],
authors = [],
link=[],
)
# loop over authors
logger.info('Getting brainglobe papers')
for author_n, author_id in enumerate(AUTHORS):
added = 0
logger.debug(f'Fetching for author {author_n+1}/{len(AUTHORS)}')
author = sch.author(author_id)
logger.debug(f'Found {len(author["papers"])} papers for {author["name"]}')
if not len(author.keys()):
raise ValueError('Could not fetch author data, probably an API timeout error, wait a bit.')
# loop over papers
for paper in author['papers']:
paper = sch.paper(paper['paperId'])
if not paper or paper['abstract'] is None:
logger.debug(f' skipping paper {paper["title"]} because it has not abstract')
continue
matched_keywords = [kw for kw in KEYWORDS if kw in paper['abstract'].lower()]
# add it to the list of brainglobe papers
if matched_keywords:
if paper['corpusId'] in brainglobe_papers['id']:
logger.debug(f' skipping paper: {paper["title"]} to avoid duplicates')
continue # skip duplicates
logger.debug(f'Found brainglobe paper: "{paper["title"]}" @ "{paper["venue"]}" with |{paper["numCitedBy"]}| citations')
brainglobe_papers['id'].append(paper['corpusId'])
brainglobe_papers['year'].append(str(paper['year']))
brainglobe_papers['authors'].append([auth['name'] for auth in paper['authors']])
brainglobe_papers['title'].append(paper['title'])
brainglobe_papers['link'].append(paper['url'])
citations.append(paper['citations'])
added += 1
else:
logger.debug(f'Paper NOT belonging to brainglobe: "{paper["title"]}" @ "{paper["venue"]}" with |{paper["numCitedBy"]}| citations')
logger.debug(f'Added {added}/{len(author["papers"])} papers for {author["name"]}')
logger.info(f'Found {len(brainglobe_papers["id"])} brainglobe papers')
logger.info('Getting papers citing our work')
for paper_citations in citations:
for paper in paper_citations:
if paper['paperId'] in citing_brainglobe['id']:
continue # avoid duplicates
citing_brainglobe['id'].append(paper['paperId'])
citing_brainglobe['year'].append(str(paper['year']))
citing_brainglobe['title'].append(paper['title'])
citing_brainglobe['authors'].append([auth['name'] for auth in paper['authors']])
citing_brainglobe['link'].append(paper['url'])
logger.info(f'Found {len(citing_brainglobe["id"])} papers citing brainglobe')
return {**brainglobe_papers, **citing_brainglobe}
def print_citations(citations):
'''
prints a list of citations as a rich tble
'''
tb = Table(box=None, header_style=f'bold {pink}')
tb.add_column('Year', justify='right', style='dim')
tb.add_column('Title', style=blue_light)
tb.add_column('Authors')
for n in range(len(citations['id'])):
tb.add_row(
citations['year'][n],
citations['title'][n],
', '.join(citations['authors'][n]),
)
print(tb)
def make_citations_markdown(citations):
'''
Replaces ./_pages/references.md to update with the most recent
citations of papers using/citing brainglobe
'''
logger.debug('Updating markdown file')
# create markdown file
mdFile = MdUtils(file_name='_pages/references.md')
# add metadata & header
mdFile.write(text="""
---
permalink: /references
author_profile: true
title: "References"
---
""")
mdFile.new_header(level=1, title='Papers citing BrainGlobe tools ')
years = sorted(set(citations['year']))
for adding_year in years:
mdFile.new_header(level=2, title=adding_year)
# add papers
for n in range(len(citations['id'])):
year = citations['year'][n]
link = citations['link'][n]
if year != adding_year:
continue
mdFile.new_header(level=3, title=
mdFile.new_inline_link(link=link, text=citations['title'][n])
)
# add 'in the press'
mdFile.write("""
# BrainGlobe reported in press/online
### [Why These Python Coders are Joining the napari Community](https://cziscience.medium.com/why-these-python-coders-are-joining-the-napari-community-c0af6bb6ee3a)
_Chan Zuckerberg Science Initiative (Medium), June 2021_
### [Using deep learning to aid 3D cell detection in whole brain microscopy images](https://www.sainsburywellcome.org/web/blog/using-deep-learning-aid-3d-cell-detection-whole-brain-microscopy-images)
_Sainsbury Wellcome Centre Blog, June 2021_
### [Brainrender: visualising brain data in 3D](https://www.sainsburywellcome.org/web/blog/brainrender-visualising-brain-data-3d)
_Sainsbury Wellcome Centre Blog, March 2021_
### [Cellfinder: Harnessing the power of deep learning to map the brain](https://www.sainsburywellcome.org/web/blog/cellfinder-harnessing-power-deep-learning-map-brain)
_Sainsbury Wellcome Centre Blog, April 2020_
### [The best neuroscience stories from April 2020](https://www.scientifica.uk.com/neurowire/the-best-neuroscience-stories-from-april-2020)
_NeuroWire (Scientifica), April 2020_
""")
# save
mdFile.create_md_file()
# remove extra empty lines at top of file
with open('_pages/references.md', 'r') as fin:
content = fin.read()
with open('_pages/references.md', 'w') as fout:
fout.write(content.replace('\n\n\n\n', ''))
if __name__ == '__main__':
citations = fetch_citations()
# print_citations(citations)
make_citations_markdown(citations) | 2.515625 | 3 |
src/Sample_DataAnalysis/data_generator.py | techmentry/techmentry-python-bootcamp | 0 | 12799873 | import requests
from requests.compat import urljoin
import json
import os
import datetime
# https://www.covid19api.dev/#intro
# creating a static dictionary for all the month in the 3 letter format. as this is the only
# sure way of getting it correct without having to do a lot of date parsing.
months = {
1: "jan",
2: "feb",
3: "mar",
4: "apr",
5: "may",
6: "jun",
7: "jul",
8: "aug",
9: "sep",
10: "oct",
11: "nov",
12: "dec"
}
# create a global variable for the bearer token.
# what is a bearer token? in simple words, it is a token we get when we authenticate with the server. when we send it to the server with the
bearer_token = ""
api_def = None
def read_api_def():
# we store this API definition in a file just so that whenever there is a change to the API we don't have to touch the code. We can just
# change the API endpoint, and the code should work the same way.
global api_def
# read this API definition from the file in the config folder, and then store it for later use.
api_def_file_path = os.path.join(
os.path.dirname(__file__), "config", "api-def.json")
with open(api_def_file_path, "r") as f:
api_def = json.load(f)
def generate_token(force=False):
# The covid 19 tracking API we want to use requires us to authenticate with some form of username and password. To this request,
# the API returns a bearer token, which in simpler terms is a way for it to know who is making a request and if that person can
# use that endpoint. It also helps keep a track of the number of requests a user has made and also manage telemetry.
global bearer_token
# the token that the server sends us has a lifetime of ~55 hours. Hence, we don't need to regenerate it. We can just store the token
# and load it the next time we bring up our script. However, it is to be noted that once 55 hours are up, we need to regenerate the token.
# you can write some code trivially by storing the date and time the token was generated on in the json file itself, and then using it with the code below
# to check if the token present is valid or not. If it is not, then you can refresh it. See a simple example below
token_file_path = os.path.join(
os.path.dirname(__file__), "config", "token.json")
# check if we have a valid token already in the file
if force == False and os.path.exists(token_file_path):
with open(token_file_path) as token_file:
# is the time difference between now and the date time the token was fetched > 50 hours? if no, then continue using this token
token_details = json.load(token_file)
token_load_dt_tm = datetime.datetime.strptime(
token_details["timestamp"], "%m/%d/%Y, %H:%M:%S")
if (datetime.datetime.now() - token_load_dt_tm).seconds < (50 * 3600):
return
# okay we either need to fetch a token from scratch or need a new one since the old one expired
auth_params = {
"username": api_def["username"],
"password": api_def["password"]
}
auth_response = requests.post(
url=urljoin(api_def["root_url"], api_def["api_defs"]["gen_token"]), data=auth_params)
# please take a look at REST response codes - https://developer.mozilla.org/en-US/docs/Web/HTTP/Status
if auth_response.status_code == 200:
bearer_token = json.loads(
auth_response.content.decode("utf-8"))["Document"]
auth_token = {
"token": bearer_token,
"timestamp": datetime.datetime.now().strftime("%m/%d/%Y, %H:%M:%S")
}
with open(token_file_path, "w") as f:
json.dump(auth_token, f, indent=4)
else:
print("A problem occurred. Code: {}, Message: {}".format(
auth_response.status_code, auth_response.content.decode("utf-8")))
raise Exception("Problem with auth")
def get_global_daily_report(month: int, year: int):
# get today's date information
today = datetime.datetime.today()
# check if the month is valid
if month not in months.keys():
raise Exception(
"Invalid month range! please choose a month range between 1-12")
# check if the date range supplied actually makes sense. Covid data is tabulated from Jan 2020 till today.
if year < 2020 or year > today.year or (year == today.year and month > today.month):
raise Exception(
"Invalid date range! No valid data prior to Jan 2020 or in the future. Please choose a month and year between and including, Jan 2020 and current month and year")
# connect to the server to get the data. we also need to
api_req_param = api_def["api_defs"]["global_daily_reports"].format(
mon=str(months[month]), yyyy=year)
auth_token = {
"Authorization": "Bearer {0}".format(bearer_token)
}
stats_response = requests.get(
url=urljoin(api_def["root_url"], api_req_param), headers=auth_token)
return (
stats_response.status_code,
stats_response.content.decode("utf-8")
)
def init():
read_api_def()
generate_token()
def main(mon=0, year=0):
init()
mon = datetime.datetime.today().month if mon == 0 else mon
year = datetime.datetime.today().year if year == 0 else year
response = get_global_daily_report(mon, year)
if response[0] == 200:
with open(os.path.join(os.path.dirname(__file__), "data", "data_{}{}.json".format(mon, year)), "w") as f:
json.dump(json.loads(response[1]), f, indent=4)
if __name__ == "__main__":
main()
| 3.25 | 3 |
src/gui.py | LambWolf777/Pathfinding | 0 | 12799874 | <filename>src/gui.py<gh_stars>0
""" This module handles the user interface (GUI) for the pathfinding visualizer.
It handles the function for clicking on buttons, using input buttons, displaying stats, popups and setting
start/end nodes
It also creates all buttons by the init_gui function
"""
from pickle import load, dump
from os import getcwd, mkdir
from os.path import join, exists
import tkinter
from tkinter import filedialog
from random import randrange
from typing import *
import pygame as pg
import config as cfg
import constants as cst
from classes import *
folder_path = getcwd()
grid_path = join(folder_path, "Grids")
if not exists(grid_path):
mkdir(grid_path)
def remove_from_root(*attributes, **kwargs) -> None:
""" Removes the popup_gui from the root/parent Gui.
Disable the grid from receiving input and redraw it to cover the popup
necessary kwargs:
- 'root': Parent/root Gui
- 'child': Child Gui to remove from Parent
:param attributes: Used to specify "grid" if it needs to be redrawn
:param kwargs: See the necessary kwargs above
:return: None
"""
try:
kwargs["root"].objects.remove(kwargs["child"])
kwargs["root"].draw_all(*attributes)
except KeyError: pass
class Gui:
text_input = ""
def __init__(self, dict_object: Dict[str, Any], **kwargs: Any) -> None:
""" Creates a gui window or screen, if a Gui object is added to its objects, it will pass down its click
events to it recursively until used.
Specific kwargs:
- external=True Allows clicks outside the Gui's objects
- ext_close=True Removes Gui from parent Gui on external clicks
:param dict_object: all objects in dict_object MUST have a display() method and can have an is_clicked() method
to handle clicks on Buttons (see classes.py for prefabricated classes to use)
:param kwargs: add attributes to the gui, used for dependency injection.
"""
self.objects = []
self.events = []
for name, obj in dict_object.items():
setattr(self, name, obj)
setattr(self, f"{obj.__class__}_group", [])
self.objects.append(obj)
for obj in self.objects:
self.__dict__[f"{obj.__class__}_group"].append(obj)
self.__dict__.update(kwargs)
def draw_all(self, *attributes: str) -> None:
""" Call display() method on all of its objects.
:param attributes: Call the display() method on additional attributes of the gui
:return: None
"""
for obj in self.objects:
obj.display()
for key in attributes:
self.__dict__[key].display()
def handle_events(self, *additional: Any) -> None:
""" Handle click and keyboard input events by redistributing to handle click or handle input methods.
TYPING: *additional: (event.type, function(event, *args) -> None , *args), ...
:param additional: Allows entering specific (event.type, function, (*args)) tuples to handle other events. The
function will receive parameters (event, args). (I couldn't get the typing right...)
:return: None
"""
for event in self.events:
if event.type == pg.MOUSEBUTTONDOWN:
self.handle_clicks(pg.mouse.get_pos())
elif event.type == pg.KEYDOWN:
self.handle_input(event)
for user_event, func, *args in additional:
if event.type == user_event:
func(event, *args)
self.events.clear()
def handle_input(self, event: pg.event.Event):
""" Process Keyboard user input, the entered text is stored as a class attribute.
A TextInputButton must be activated for this function to run, once the Enter key is pressed,
it's confirm_input(self.text_input) method will be called with the injected input.
:param event: must be of type pg.KEYDOWN event
:return: None
"""
for button in self.objects:
if button.__class__ is TextInputButton and button.is_activated:
if event.key == pg.K_BACKSPACE:
if len(Gui.text_input) <= 1:
Gui.text_input = ""
else:
Gui.text_input = Gui.text_input[:-1]
button.dict["value"] = Gui.text_input
elif event.key == pg.K_RETURN:
button.confirm_input(Gui.text_input)
Gui.text_input = ""
else:
Gui.text_input += event.unicode
button.dict["value"] = Gui.text_input
button.display()
def handle_clicks(self, mouse_pos: Tuple[int, int], root: 'Gui' = None) -> bool:
""" Handle clicking events, will recursively pass down click events to child Gui if one is in its objects
(LIMITED TO ONE CHILD GUI). If any of its objects is clicked, the object's is_clicked() method will be called
if it has one.
:param mouse_pos: Coordinates of the cursor
:param root: parent Gui, allows the child to remove itself from the parent's objects once terminated
:return: True if click was used, else False
"""
def check_priority() -> Union[DropDownButton, Gui]:
""" Check if any of the Gui's objects require priority on clicks (currently only for DropDownButton and
any child Gui that might be spawned during the program
:return: object with priority
"""
priority_to = None
for obj in self.objects:
if isinstance(obj, Gui):
priority_to = obj
return priority_to
def handle_priority(priority_obj: Union[DropDownButton, Gui]) -> bool:
""" If priority object was found look if any clicks affect it, clicking outside of a DropDownButton's rect
is allowed and clicks will be registered, but is forbidden for child Gui
:param priority_obj: The object with priority
:return: True if click was used, else False
"""
used = False
if isinstance(priority_obj, Gui):
# Inject parent Gui dependency as root, to allow the child Gui to remove itself
# from the parent's objects when it is terminated
used = priority_obj.handle_clicks(mouse_pos, root=self)
try:
if not used and not priority_obj.external:
used = True
if not used and priority_obj.ext_close:
priority_obj.src_butt.is_activated = False
remove_from_root(root=self, child=priority_obj)
except AttributeError:
pass
return used
click = pg.Rect(mouse_pos, (1, 1))
click_used = False
prio = check_priority()
if prio:
click_used = handle_priority(prio)
# Clicking outside the child-most Gui is forbidden
if not click_used:
for button in self.objects:
try:
if not button.is_disabled:
if click.colliderect(button.rect):
click_used = True
button.is_clicked(gui=self, root=root)
elif button.is_activated and button.__class__ is TextInputButton:
button.confirm_input(self.text_input)
except AttributeError:
pass
return click_used
# TODO: alot of setter functions could be shifted into a get_data(gui) method by the pathfinder to unclutter this
# module and remove some LOC...
def init_gui(pathfinder_obj: Any, grid_obj: Grid) -> Gui:
""" Initialise the main Gui for the visualizer module, most dependency issues are fixed by injecting the
necessary objects as arguments.
First define the necessary functions for all buttons that will be added to the Gui.
Second, create a dict of all the objects to be added to the Gui as "attribute": object.
The dict is defined one line at a time because all Button's position depend on the previous ones
Last we create the Gui from the dict, with pathfinder and grid added as kwargs.
:param pathfinder_obj: Pathfinder object to link to the Gui (class is not typed to avoid import)
:param grid_obj: Grid object to link to the Gui
:return: Gui object
"""
# Button functions for particular cases:
def random_walls(self: GridButton) -> None:
""" Function for the random walls button, 10% of the nodes in the grid will become walls
:param self: random_walls button object. For GridButton, this parameter is always injected in is_clicked
:return: None
"""
self.is_activated = False
for column in grid_obj.all_nodes:
for node in column:
if randrange(11) == 0:
if node is not grid_obj.start and node is not grid_obj.end:
node.status |= Node.WALL
cst.dirty_fills.append(node.get_fill())
def disp_moves_func(arg: bool) -> None:
""" Function for the display moves Checkbox. Switches the bool of pathfinder.display_steps attribute.
Disables the run interval and wait time buttons of the main_gui if display_steps if False, and display
the buttons the show change.
:param arg: display_moves_button.is_activated, For Checkboxes this parameter is always injected in is_clicked
:return: None
"""
pathfinder_obj.display = arg
# if "wait_time_button" in gui.__dict__.keys() and "run_interval_button" in gui.__dict__.keys():
try:
main_gui_handler.wait_time_button.is_disabled = not arg
main_gui_handler.run_interval_button.is_disabled = not arg
main_gui_handler.wait_time_button.display()
main_gui_handler.run_interval_button.display()
except KeyError:
pass
def diago_func(arg: bool) -> None:
""" Function for the diago_allowed Checkbox. Switches the bool of pathfinder.diago attribute.
:param arg: diago_button.is_activated, For Checkboxes this parameter is always injected in is_clicked
:return: None
"""
pathfinder_obj.diago = arg
def apply_rsr_func(arg: bool) -> None:
""" Function for the apply_rsr Checkbox. Switches the bool of pathfinder.apply_rsr attribute.
:param arg: apply_rsr_button.is_activated, For Checkboxes this parameter is always injected in is_clicked
:return: None
"""
pathfinder_obj.apply_rsr = arg
def set_algo(self: AlgoButton) -> None:
""" Set the pathfinder.algo attribute to the algorithm associated with the AlgoButton
:param self: inject reference to self. For AlgoButton, this parameter is always injected in is_clicked
:return: None
"""
pathfinder_obj.algo = self.algo
main_gui_handler.dropdown_algo.is_activated = False
remove_from_root(root=main_gui_handler, child=algo_gui)
def generate() -> None:
""" Calls the generate method of the grid object, and injects the n_wide and n_high dependencies from
the main_gui's grid_n_wide and grid_n_high TextInputButtons' values
:return: None
"""
grid_obj.generate(main_gui_handler.grid_n_wide_button.dict["value"],
main_gui_handler.grid_n_high_button.dict["value"])
def play_pause(arg: bool = None) -> None:
""" Switches the pathfinder.running attribute on and off on every press if run conditions are met.
Call pathfinder.init_search methode if pathfinder.search_is_init is False.
Disable Buttons that cannot be used during pathfinding
:param arg: Not needed, but is included for the functions of other StateButtons
:return: None
"""
def disable_buttons() -> None:
""" Disable Buttons that cannot be used during pathfinding
:return: None
"""
for obj in main_gui_handler.objects:
if obj.__class__ is not StateButton and obj is not main_gui_handler.exit_button:
try:
obj.is_disabled = True
except AttributeError:
continue
obj.display()
def check_conditions() -> bool:
""" Check that an algorithm is defined, the grid has a starting node and an ending node.
If no end or start node is defined, adds a popup Gui to the main_gui
:return: True if pathfinder is ready to run
"""
if pathfinder_obj.algo:
if grid_obj.start:
if grid_obj.end:
return True
else:
pg.event.post(pg.event.Event(cst.NO_END, announcement="No end Node!"))
else:
pg.event.post(pg.event.Event(cst.NO_START, announcement="No start Node!"))
return False
if check_conditions():
pathfinder_obj.running = not pathfinder_obj.running
# could add pause/unpause timers...
if not pathfinder_obj.search_is_init:
disable_buttons()
if not pathfinder_obj.display or main_gui_handler.run_interval_button.dict["value"] == -1:
# update display to show disabled buttons before pathfinder starts looping
handle_display()
pg.display.flip()
pathfinder_obj.init_search()
def reset(partial: bool = False) -> None:
""" Stops the pathfinder, and reset all necessary attributes, also reset the grid.
If partial, leaves walls, start and end nodes as is
:param partial: True if resetting search, False if resetting grid
:return: None"""
pathfinder_obj.running = False
if not partial:
if grid_obj.start is not None:
temp = grid_obj.start
grid_obj.start = None
temp.is_start = False
cst.dirty_fills.append(temp.get_fill())
if grid_obj.end is not None:
temp = grid_obj.end
grid_obj.end = None
temp.is_end = False
cst.dirty_fills.append(temp.get_fill())
pathfinder_obj.search_is_init = False
pathfinder_obj.dijkstra_cost_so_far = 0
pathfinder_obj.running = False
pathfinder_obj.path_found = False
pathfinder_obj.frontier = []
pathfinder_obj.queue.clear()
pathfinder_obj.to_be_removed = []
pathfinder_obj.shortest_path = []
pathfinder_obj.run_timer = 0
pathfinder_obj.start_time = 0
pathfinder_obj.end_time = 0
pathfinder_obj.neighbors_prep_dt = 0
pathfinder_obj.rsr_prep_dt = 0
pathfinder_obj.algo_dt = 0
for column in grid_obj.all_nodes:
for node in column:
node.neighbors = None
node.came_from = None
if node.update_color() is not cst.BLACK:
if not partial:
node.status &= ~(Node.WALL | Node.END | Node.START)
node.status &= ~(Node.SYM_RECT | Node.BORDER |
Node.VISITED | Node.PATH)
cst.dirty_fills.append(node.get_fill())
for obj in main_gui_handler.objects:
try:
obj.is_disabled = False
obj.display()
except AttributeError: # (Backgrounds)
continue
main_gui_handler.run_interval_button.is_disabled = \
main_gui_handler.run_interval_button.is_disabled = \
not main_gui_handler.display_moves_button.is_activated
# TODO: try resetting the focus to pygame
def save() -> None:
""" Save the Grid object as a Pickle file in the Grids folder (or other)
:return: None
"""
tkinter.Tk().withdraw()
direct = filedialog.asksaveasfilename(initialdir=grid_path, defaultextension=".pickle")
if direct:
save_object = {"start": grid_obj.start, "end": grid_obj.end, "grid": grid_obj.all_nodes}
with open(direct, "wb") as file:
dump(save_object, file)
def load_grid() -> None:
""" Load a grid object from the Grids folder (or other), update values, scale the grid and
show all changes
:return: None
"""
def scale_and_draw() -> None:
""" Scale the grid object to fit current screen size, draw the grid
:return: None
"""
# scale grid to screen, as well as possible, might make grid go out of borders
nodes_width = grid_obj.width / main_gui_handler.grid_n_wide_button.dict["value"]
nodes_height = grid_obj.height / main_gui_handler.grid_n_high_button.dict["value"]
start_height = 25
# Substracting the first because it will be incremented during the loop
position_y = start_height - nodes_height
position_x = cfg.button_background_rect.width - nodes_width
for column in grid_obj.all_nodes:
position_x += nodes_width
for node in column:
position_y += nodes_height
node.height = nodes_height
node.width = nodes_width
node.position = (position_x, position_y)
node.rect = pg.rect.Rect(node.position, (node.width, node.height))
position_y = start_height - nodes_height
grid_obj.display()
def update_values(save_object: dict) -> None:
""" Updates the attributes of the grid object and the values of the grid_n_wide and grid_n_high buttons
and display changes
:param save_object: save object loaded from pickle file
:return: None
"""
grid_obj.all_nodes = save_object["grid"]
grid_obj.start = save_object["start"]
grid_obj.end = save_object["end"]
main_gui_handler.grid_n_wide_button.dict["value"] = len(grid_obj.all_nodes)
main_gui_handler.grid_n_high_button.dict["value"] = len(grid_obj.all_nodes[0])
main_gui_handler.grid_n_wide_button.display()
main_gui_handler.grid_n_high_button.display()
tkinter.Tk().withdraw()
direct = filedialog.askopenfilename(initialdir=grid_path)
if direct:
with open(direct, "rb") as file:
save_object_ = load(file)
update_values(save_object_)
scale_and_draw()
def exit_func() -> None:
""" Exit program.
:return: None
"""
pg.event.post(pg.event.Event(pg.QUIT))
# creating GUI #####################################################################################################
main_gui: Dict[str, Union[pg.Rect, GridButton, AlgoButton, Checkbox, DropDownButton, TextInputButton, StateButton,
SystemButton, Background]] = dict()
# It's a bit ugly doing it like this but it's the only way I know to keep reference to the previous entry.
# Also I wanted to make a flexible GUI object to be able to use it elsewhere (pop-ups)
main_gui["button_background_rect"] = Background(cst.LIGHT_GREY, cfg.button_background_rect)
# grid placement buttons
main_gui["start_node_button"] = GridButton((15, 25), "Place Start")
main_gui["end_node_button"] = GridButton((main_gui["start_node_button"].rect.right + 5,
main_gui["start_node_button"].rect.top), "Place End")
main_gui["draw_walls_button"] = GridButton((15, main_gui["start_node_button"].rect.bottom + 10),
"Draw walls")
main_gui["erase_walls_button"] = GridButton((main_gui["draw_walls_button"].rect.right + 5,
main_gui["draw_walls_button"].rect.top), "Erase walls")
main_gui["random_walls_button"] = GridButton((15, main_gui["draw_walls_button"].rect.bottom + 10),
"Random walls", func=random_walls)
# algo buttons
algo_buttons = [AlgoButton((0, 0), "Flood Fill", "bfs", active_color=cst.BLACK, rounded=False, func=set_algo),
AlgoButton((0, 0), "A*", "astar", active_color=cst.BLACK, rounded=False, func=set_algo),
AlgoButton((0, 0), "Dijkstra", "dijkstra", active_color=cst.BLACK, rounded=False, func=set_algo)]
algo_buttons[0].is_activated = True
algo_gui = Gui({f"{button.algo}": button for button in algo_buttons}, external=True, ext_close=True)
main_gui["dropdown_algo"] = DropDownButton((15, main_gui["random_walls_button"].rect.bottom + 30), "Algo: ",
algo_buttons, child_gui=algo_gui)
main_gui["diago_button"] = Checkbox("Diagonal moves", (15, main_gui["dropdown_algo"].rect.bottom + 10),
False, diago_func)
main_gui["apply_rsr_button"] = Checkbox("Apply RSR", (15, main_gui["diago_button"].rect.bottom + 10),
False, apply_rsr_func)
main_gui["display_moves_button"] = Checkbox("Display moves", (15, main_gui["apply_rsr_button"].rect.bottom + 10),
True, disp_moves_func)
main_gui["run_interval_button"] = TextInputButton({"min": -1, "max": 9999, "default": 0, "value": 0},
(15, main_gui["display_moves_button"].rect.bottom + 10), 40,
"Run: ")
main_gui["wait_time_button"] = TextInputButton({"min": 0, "max": 9999, "default": 0, "value": 0},
(main_gui["run_interval_button"].rect.right + 5,
main_gui["display_moves_button"].rect.bottom + 10), 40, "Wait: ")
main_gui["reset_button"] = StateButton((15, main_gui["run_interval_button"].rect.bottom + 30),
"Reset Grid", reset)
main_gui["reset_search_button"] = StateButton((main_gui["reset_button"].rect.right + 5,
main_gui["reset_button"].rect.top), "Reset Search", reset, True)
main_gui["play_pause_button"] = StateButton((15, main_gui["reset_search_button"].rect.bottom + 10),
"Play/Pause", play_pause)
main_gui["grid_n_wide_button"] = TextInputButton({"min": 3, "max": cfg.window.get_width() - 205 - 25,
"default": 100, "value": 100},
(15, main_gui["play_pause_button"].rect.bottom + 30), 50,
"Nodes in width: ", func=generate)
main_gui["grid_n_high_button"] = TextInputButton({"min": 3, "max": cfg.window.get_height() - 125 - 25,
"default": 100, "value": 100},
(15, main_gui["grid_n_wide_button"].rect.bottom + 10), 40,
"Nodes in height: ", func=generate)
main_gui["brush_size_button"] = TextInputButton({"min": 1, "max": 200, "default": 1, "value": 1},
(15, main_gui["grid_n_high_button"].rect.bottom + 10), 30,
"Brush size: ")
main_gui["save_grid_button"] = SystemButton((15, main_gui["brush_size_button"].rect.bottom + 30),
"Save Grid", save)
main_gui["load_grid_button"] = SystemButton((main_gui["save_grid_button"].rect.right + 5,
main_gui["save_grid_button"].rect.top), "Load Grid", load_grid)
main_gui["exit_button"] = SystemButton((15, main_gui["load_grid_button"].rect.bottom + 30), "Exit", exit_func)
main_gui_handler = Gui(main_gui, pathfinder=pathfinder_obj, grid=grid_obj)
return main_gui_handler
def handle_display() -> None:
""" Does all the fills and blits to the window then clear channel, fills are made before blits.
All the program's fill and blits orders are appended to one of the lists in cst.to_display
(see constants.py module), for special cases there is an early and a late channel.
:return: None
"""
for group in cst.to_display:
for i, j in group:
if i.__class__ is pg.Surface:
cfg.window.blit(i, j)
else:
cfg.window.fill(i, j)
group.clear()
def pop_up(announcement: str) -> Gui:
""" Creates a Pop-up window Gui with a single OK button to dismiss the message and remove the Gui from its parent
Gui.
Use as follows: from the main Gui, on event: main_Gui.objects.append(pop_up("hello"))
:param announcement: Text to be displayed on the popup window
:return: A Gui object representing the popup window
"""
def ok_func(root: Gui, child: Gui) -> None:
""" Removes the popup_gui from the root/parent Gui.
Disable the grid from receiving input and redraw it to cover the popup
:param root: Parent/root Gui
:param child: Child Gui to remove from parent
:return: None
"""
root.grid.disabled = pg.time.get_ticks() + 100
remove_from_root("grid", root=root, child=child)
text_surf = cst.big_text_font.render(announcement, True, cst.RED)
bg_width = 2 * text_surf.get_width()
bg_height = 4 * text_surf.get_height()
text_obj = (text_surf, ((bg_width - text_surf.get_width()) / 2, (bg_height - text_surf.get_height()) / 3))
dimension_butt = Button((0, 0), "OK")
ok_button = OkButton(((cfg.window.get_width() - dimension_butt.rect.w) / 2,
(cfg.window.get_height() - dimension_butt.rect.h) / 2 + 100 / 4), "OK", func=ok_func)
background = Background(cst.DARK_GREY, pg.Rect(((cfg.window.get_width() - bg_width) / 2,
(cfg.window.get_height() - bg_height) / 2), (bg_width, bg_height)),
text_obj)
popup = Gui({"popup_bg": background, "ok_butt": ok_button})
popup.draw_all()
return popup
class StatsHandler:
def __init__(self, background: Background, increment: int = 200, **kwargs: Stat) -> None:
""" Creates a Singleton Stats handler for displaying Stat objects on a Background
(Background is important so the anti aliased text does not become opaque.
:param background: Background object where the stats will be displayed (positioning is not automatic)
:param increment: Delay between updates of the stats in ms
:param kwargs: add stat objects as attributes of the stat handler ("attribute" = object) and in a list of stats
"""
self.__dict__.update(kwargs)
self.stats = [obj for obj in self.__dict__.values() if obj.__class__ is Stat]
self.background = background
self.chrono = 0
self.increment = increment
def display(self) -> None:
""" Display all Stat object in self.stats.
:return: None
"""
self.background.display()
for stat in self.stats:
stat.display()
def timer(self) -> bool:
""" Handles timing of the stats handler
:return: True if it's time to display
"""
if pg.time.get_ticks() >= self.chrono:
self.chrono += self.increment
return True
return False
def main(self) -> None:
""" Main loop of the stats handler, it's the only thing that needs to be called once it has been initialised
:return: None
"""
if self.timer():
self.display()
def init_stats(pathfinder: Any) -> StatsHandler:
""" Initialise the StatsHandler object, with injected dependency to the pathfinder to access stats values.
First define the getter functions for the Stat objects
Then Instantiate the Stat object as kwargs for the StatsHandler
:param pathfinder: Pathfinder object of the program (Singleton) (class not typed to avoid import)
:return: StatsHandler object
"""
# defining stats getters
def get_algo_dt() -> float:
""" Get algorithm process time from the pathfinder or the time since it started processing"""
return round(pathfinder.algo_dt, 2)
def get_neighbor_dt() -> float:
""" Get the time taken for preprocessing the grid's nodes' neighbors from the pathfinder"""
return round(pathfinder.neighbors_prep_dt, 2)
def get_rsr_dt() -> float:
""" Get the time taken for preprocessing Rectangular Symmetry Reduction from the pathfinder"""
return round(pathfinder.rsr_prep_dt, 2)
def get_path_len() -> float:
""" Get the lenght of the shortest path found by the pathfinder"""
return len(pathfinder.shortest_path)
def get_fps() -> float:
""" Get the fps of the program"""
return round(cfg.clock.get_fps(), 1)
stat_handler = StatsHandler(
background=Background(cst.LIGHT_GREY, cfg.stats_background_rect), increment=200,
process_time=Stat("Process time (ms): ", cst.BLACK,
(cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 15), get_algo_dt),
neighbor_prep_time=Stat("Neighbors Preprocess (ms): ", cst.BLACK, (cfg.stats_background_rect.x + 15,
cfg.stats_background_rect.y + 35), get_neighbor_dt),
rsr_prep_time=Stat("RSR Preprocess (ms): ", cst.BLACK,
(cfg.stats_background_rect.x + 15, cfg.stats_background_rect.y + 55), get_rsr_dt),
fps_stat=Stat("FPS: ", cst.BLACK,
(cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 15), get_fps),
path_length=Stat("Path length: ", cst.BLACK,
(cfg.stats_background_rect.x + 300, cfg.stats_background_rect.y + 35), get_path_len))
return stat_handler
| 3.1875 | 3 |
src/examples/aircraft.py | pillmuncher/hornet | 0 | 12799875 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 <NAME> <<EMAIL>>
__version__ = '0.2.5a'
__date__ = '2016-08-11'
__author__ = '<NAME> <<EMAIL>>'
__license__ = 'MIT'
import pprint
from hornet import *
from hornet.symbols import (
side, left, right, wing, segment, segments, section, sections, point,
Side, Id, S, Ss, W
)
def make_wing(db):
db.tell(
wing(Side, wing(side(Side), Ss)) <<
segments(Side, Ss),
segments(Side, segments(Ss)) <<
findall(segment(Id, S), segment(Side, Id) & sections(Id, S), Ss),
sections(Id, sections(Ss)) <<
findall(section(S), section(Id, S), Ss),
segment(left, 1),
segment(left, 2),
segment(right, 3),
segment(right, 4),
section(1, [point(1, 2), point(3, 4)]),
section(1, [point(5, 6), point(7, 8)]),
section(2, [point(2, 3), point(4, 5)]),
section(2, [point(6, 7), point(8, 9)]),
section(3, [point(11, 12), point(13, 14)]),
section(3, [point(15, 16), point(17, 18)]),
section(4, [point(12, 13), point(14, 15)]),
section(4, [point(16, 17), point(18, 19)]),
)
def ask_wing(db, side):
for subst in db.ask(wing(side, W)):
pprint.pprint(subst[W])
db = Database()
make_wing(db)
ask_wing(db, left)
ask_wing(db, right)
| 2.515625 | 3 |
random/random_util_test.py | ljszalai/pyaster | 0 | 12799876 | import unittest
import random_util
class MyTestCase(unittest.TestCase):
def test_visualize_results(self):
column_width = 20
print("Generated id:".rjust(column_width, ' ') + random_util.generate_id())
print("Generated uuid:".rjust(column_width, ' ') + random_util.generate_uuid())
print("Generated token:".rjust(column_width, ' ') + random_util.generate_token())
if __name__ == '__main__':
unittest.main()
| 2.90625 | 3 |
accounts/migrations/0001_initial.py | UniversitaDellaCalabria/IdM | 2 | 12799877 | <filename>accounts/migrations/0001_initial.py
# Generated by Django 2.1.4 on 2019-02-18 14:37
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('dn', models.CharField(blank=True, default='', max_length=254)),
('is_active', models.BooleanField(default=True, verbose_name='attivo')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email address')),
('matricola', models.CharField(blank=True, max_length=6, null=True, verbose_name='Matricola/Codice di identificazione')),
('first_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Nome')),
('last_name', models.CharField(blank=True, max_length=30, null=True, verbose_name='Cognome')),
('codice_fiscale', models.CharField(blank=True, max_length=16, null=True, verbose_name='Codice Fiscale')),
('gender', models.CharField(blank=True, choices=[('male', 'Maschio'), ('female', 'Femmina'), ('other', 'Altro')], max_length=12, null=True, verbose_name='Genere')),
('location', models.CharField(blank=True, max_length=30, null=True, verbose_name='Place of birth')),
('birth_date', models.DateField(blank=True, null=True, verbose_name='Date of birth')),
('access_notification', models.BooleanField(default=True, help_text='enable email send', verbose_name='Send Email notification accesses')),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'ordering': ['username'],
'verbose_name_plural': 'Utenti Unical ID',
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
]
| 2.15625 | 2 |
utility/runner.py | theBraindonor/chicago-crime-arrests | 1 | 12799878 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
A common training and evaluation runner to allow for easy and consistent model creation and evalutation
"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "Copyright 2019, <NAME>"
__license__ = "Creative Commons Attribution-ShareAlike 4.0 International License"
__version__ = "1.0"
import pandas as pd
from collections import Counter
from skopt import BayesSearchCV
from sklearn.base import clone
from sklearn.externals.joblib import Parallel, delayed
from sklearn.model_selection import StratifiedKFold, train_test_split
from sklearn.utils import shuffle
from utility import batch_predict, batch_predict_proba, EvaluationFrame, Evaluator, Logger, use_project_path, batch_fit_classifier
def crossfold_classifier(estimator, transformer, x_train, y_train, train_index, test_index,
record_predict_proba, verbose, fit_increment, warm_start, max_iters, random_state):
"""
This method allows for training to be done using the joblib parallelism in scikit learn. Overall a hacky
method to allow for incremental training. Really needs to be refactored into a cleaner form.
"""
if hasattr(x_train, 'iloc'):
x_fold_train, x_fold_test = x_train.iloc[train_index], x_train.iloc[test_index]
else:
x_fold_train, x_fold_test = x_train[train_index], x_train[test_index]
if hasattr(y_train, 'iloc'):
y_fold_train, y_fold_test = y_train.iloc[train_index], y_train.iloc[test_index]
else:
y_fold_train, y_fold_test = y_train[train_index], y_train[test_index]
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_fold_train, y_fold_train = shuffle(x_fold_train, y_fold_train, random_state=random_state)
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(estimator, x_fold_train, y_fold_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_fold_train = transformer.transform(x_fold_train)
estimator.fit(x_fold_train, y_fold_train)
y_fold_test_predict = batch_predict(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_frame = EvaluationFrame(y_fold_test, y_fold_test_predict)
fold_predict_proba_frame = None
if record_predict_proba:
y_fold_test_predict_proba = batch_predict_proba(estimator, x_fold_test, transformer=transformer, verbose=False)
fold_predict_proba_frame = EvaluationFrame(y_fold_test, y_fold_test_predict_proba)
return Evaluator.evaluate_classifier_fold(fold_predict_frame, fold_predict_proba_frame)
class Runner:
"""
The runner supports bare estimator fitting and searvh-based fitting. By default it will make use of the a
BayesianSearchCV to perform hyperparameter tuning. Ensures everything is cleanly logged, evaluated, and pickled.
"""
def __init__(
self,
name,
df,
target,
estimator,
hyper_parameters=None):
self.name = name
self.df = df
self.target = target
self.estimator = estimator
self.hyper_parameters = hyper_parameters
self.trained_estimator = None
def run_classification_experiment(
self,
sample=None,
random_state=None,
test_size=0.20,
multiclass=False,
record_predict_proba=False,
sampling=None,
cv=5,
verbose=True,
transformer=None,
fit_increment=None,
warm_start=False,
max_iters=None,
n_jobs=-1):
use_project_path()
logger = Logger('%s.txt' % self.name)
evaluator = Evaluator(logger)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if transformer is not None:
logger.time_log('Fitting Transformer...')
transformer.fit(x_train)
logger.time_log('Transformer Fit Complete.\n')
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
if self.hyper_parameters is not None:
self.estimator.set_params(**self.hyper_parameters.params)
if cv is not None:
kfold = StratifiedKFold(n_splits=cv, random_state=random_state)
logger.time_log('Cross Validating Model...')
fold_scores = Parallel(n_jobs=n_jobs, verbose=3)(
delayed(crossfold_classifier)(
clone(self.estimator),
transformer,
x_train, y_train,
train_index, test_index,
record_predict_proba, verbose,
fit_increment, warm_start, max_iters, random_state
)
for train_index, test_index in kfold.split(x_train, y_train)
)
logger.time_log('Cross Validation Complete.\n')
logger.time_log('Training Model...')
if fit_increment is not None:
if max_iters is not None:
for iter in range(max_iters):
x_iter_train, y_iter_train = shuffle(x_train, y_train, random_state=random_state)
batch_fit_classifier(self.estimator, x_iter_train, y_iter_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
batch_fit_classifier(self.estimator, x_train, y_train, transformer=transformer, increment=fit_increment, verbose=verbose)
else:
if transformer is not None:
x_train_transformed = transformer.transform(x_train)
self.estimator.fit(x_train_transformed, y_train)
else:
self.estimator.fit(x_train, y_train)
logger.time_log('Training Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(self.estimator, x_train, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(self.estimator, x_test, transformer=transformer, verbose=verbose)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(self.estimator, x_test, transformer=transformer, verbose=verbose)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
if cv is not None:
evaluator.evaluate_fold_scores(fold_scores)
evaluator.evaluate_classifier_result(
self.estimator,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
if self.hyper_parameters is not None:
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = self.estimator
def run_classification_search_experiment(
self,
scoring,
sample=None,
random_state=None,
test_size=0.20,
n_jobs=-1,
n_iter=2,
cv=5,
verbose=3,
multiclass=False,
record_predict_proba=False,
sampling=None):
use_project_path()
logger = Logger('%s.txt' % self.name)
search = BayesSearchCV(
self.estimator,
self.hyper_parameters.search_space,
n_jobs=n_jobs,
n_iter=n_iter,
cv=cv,
verbose=verbose,
scoring=scoring,
return_train_score=True
)
data_frame = self.df
if sample is not None:
data_frame = data_frame.sample(n=sample, random_state=random_state)
x_train, x_test, y_train, y_test = train_test_split(data_frame, data_frame[self.target], test_size=test_size)
if sampling is not None:
logger.time_log('Starting Data Re-Sampling...')
logger.log('Original Training Shape is %s' % Counter(y_train))
x_new, y_new = sampling.fit_resample(x_train, y_train)
logger.log('Balanced Training Shape is %s' % Counter(y_new))
if hasattr(x_train, 'columns'):
x_new = pd.DataFrame(x_new, columns=x_train.columns)
x_train, y_train = x_new, y_new
logger.time_log('Re-Sampling Complete.\n')
logger.time_log('Shuffling Re-Sampled Data.\n')
x_train, y_train = shuffle(x_train, y_train, random_state=random_state)
logger.time_log('Shuffling Complete.\n')
logger.time_log('Starting HyperParameter Search...')
results = search.fit(x_train, y_train)
logger.time_log('Search Complete.\n')
logger.time_log('Testing Training Partition...')
y_train_predict = batch_predict(results.best_estimator_, x_train)
logger.time_log('Testing Complete.\n')
train_evaluation_frame = EvaluationFrame(y_train, y_train_predict)
logger.time_log('Testing Holdout Partition...')
y_test_predict = batch_predict(results.best_estimator_, x_test)
logger.time_log('Testing Complete.\n')
test_evaluation_frame = EvaluationFrame(y_test, y_test_predict)
test_evaluation_frame.save('%s_predict.p' % self.name)
test_proba_evaluation_frame = None
if record_predict_proba:
logger.time_log('Testing Holdout Partition (probability)...')
y_test_predict_proba = batch_predict_proba(results.best_estimator_, x_test)
test_proba_evaluation_frame = EvaluationFrame(y_test, y_test_predict_proba)
test_proba_evaluation_frame.save('%s_predict_proba.p' % self.name)
logger.time_log('Testing Complete.\n')
evaluator = Evaluator(logger)
evaluator.evaluate_classifier_result(
results,
test_evaluation_frame,
train=train_evaluation_frame,
test_proba=test_proba_evaluation_frame,
multiclass=multiclass
)
logger.close()
self.hyper_parameters.params = results.best_params_
self.hyper_parameters.save('%s_params.p' % self.name)
self.trained_estimator = results.best_estimator_
| 2.40625 | 2 |
almanak/file/__init__.py | clausjuhl/almanak | 0 | 12799879 | from almanak.file import compress, decompress, extract, fileinfo
__ALL__ = ['compress', 'decompress', 'extract', 'fileinfo'] | 1.34375 | 1 |
src/user_lib/connection_manager.py | crehmann/CO2Logger | 0 | 12799880 | from utime import ticks_ms
import network
import time
from umqtt.simple import MQTTClient
STATE_DISCONNECTED = 0
STATE_WLAN_CONNECTING = 1
STATE_WLAN_CONNECTED = 2
STATE_MQTT_CONNECTING = 3
STATE_MQTT_CONNECTED = 4
WLAN_CONNECTION_TIMEOUT_MS = 30 * 1000
MQTT_CONNECTION_TIMEOUT_MS = 30 * 1000
class ConnectionManager:
def __init__(self):
self._wlan = network.WLAN(network.STA_IF)
self._wlanSsid = None
self._wlanPassword = None
self._wlanConnectingTimestamp = None
self._mqtt = None
self._mqttConnectingTimestamp = None
self._state = STATE_DISCONNECTED
self._data = {}
def configureWlan(self, ssid, password):
self._wlanSsid = ssid
self._wlanPassword = password
def configureMqtt(self, mqttClientId, mqttServer, mqttUsername, mqttPassword):
self._mqtt = MQTTClient(mqttClientId, mqttServer, 0, mqttUsername, mqttPassword)
def initConnection(self):
if self._state == STATE_DISCONNECTED:
self.__connectWlan()
def publish(self, topic, data):
# keeping only the latest value
self._data[topic] = data
self.__flush()
def update(self):
if self._state > STATE_WLAN_CONNECTING \
and not self._wlan.isconnected:
self._state = STATE_DISCONNECTED
if self._state == STATE_WLAN_CONNECTING:
self.__updateWlanConnectingState()
if self._state == STATE_WLAN_CONNECTED:
self.__updateWlanConnectedState()
if self._state == STATE_MQTT_CONNECTING:
self.__updateMqttConnectingState()
def __connectWlan(self):
if self._wlanSsid:
print("connecting to wlan...")
self._wlanConnectingTimestamp = ticks_ms()
self._state = STATE_WLAN_CONNECTING
try:
self._wlan.active(True)
self._wlan.disconnect()
self._wlan.connect(self._wlanSsid, self._wlanPassword)
except Exception as ex:
self.__printException(ex)
def __updateWlanConnectingState(self):
if ticks_ms() - self._wlanConnectingTimestamp > WLAN_CONNECTION_TIMEOUT_MS:
print("Could not connect to wlan. Falling back to disconnected state")
self._state = STATE_DISCONNECTED
elif self._wlan.isconnected() \
and not self._wlan.ifconfig()[0]=='0.0.0.0':
self._state = STATE_WLAN_CONNECTED
print("wlan connected")
def __updateWlanConnectedState(self):
if self._mqtt:
print("connecting to mqtt")
self._state = STATE_MQTT_CONNECTING
self._mqttConnectingTimestamp = ticks_ms()
try:
self._mqtt.connect()
except Exception as ex:
self.__printException(ex)
def __updateMqttConnectingState(self):
if ticks_ms() - self._mqttConnectingTimestamp > MQTT_CONNECTION_TIMEOUT_MS:
print("MQTT connection failed.")
self._state = STATE_WLAN_CONNECTED
else:
try:
self._mqtt.ping()
self._state = STATE_MQTT_CONNECTED
self.__flush()
print("mqtt connection established")
except Exception as ex:
self.__printException(ex)
def __flush(self):
if self._state == STATE_MQTT_CONNECTED:
try:
for key in list(self._data):
self._mqtt.publish(key, self._data[key])
del self._data[key]
except Exception as ex:
self._state = STATE_WLAN_CONNECTED
self.__printException(ex)
def __printException(self, ex):
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
print(message)
| 2.9375 | 3 |
setup.py | karlcow/ymir | 1 | 12799881 | <reponame>karlcow/ymir<filename>setup.py<gh_stars>1-10
import distutils.core
distutils.core.setup(
name='Ymir',
author='<NAME>',
author_email='<EMAIL>',
version='0.1dev',
packages=['ymir', ],
license='LICENSE.txt',
url='http://pypi.python.org/pypi/Ymir/',
description='script to manage La Grange blog http://www.la-grange.net/',
long_description=open('README.txt').read(),
)
| 1.164063 | 1 |
snli/wae-stochastic/gl.py | yq-wen/probabilistic_nlg | 28 | 12799882 | <reponame>yq-wen/probabilistic_nlg
config_fingerprint = None
config = None
log_writer = None
isTrain = True | 0.855469 | 1 |
code/ch08-outbound-text-messages/db/__all_models.py | rcastleman/twilio-and-sendgrid-python-course | 29 | 12799883 | <reponame>rcastleman/twilio-and-sendgrid-python-course<gh_stars>10-100
# noinspection PyUnresolvedReferences
from db import order
# noinspection PyUnresolvedReferences
from db import user
| 1.015625 | 1 |
MyInfo/models.py | hhauer/myinfo | 2 | 12799884 | <reponame>hhauer/myinfo
from django.db import models
from localflavor.us.models import USStateField, PhoneNumberField
from MyInfo.validators import validate_psu_phone
import logging
logger = logging.getLogger(__name__)
# PSU Mailcode
class Mailcode(models.Model):
code = models.CharField(max_length=40)
description = models.CharField(max_length=255)
def __str__(self):
return self.code + " -- " + self.description
# For departmental dropdown choices.
class Department(models.Model):
name = models.CharField(max_length=255, unique=True)
def __str__(self):
return self.name
# Buildings
class Building(models.Model):
name = models.CharField(max_length=255, unique=True)
code = models.CharField(max_length=10, unique=True)
def __str__(self):
return self.name
# Directory information for users with psuPublish = y. Upstream logic assumes that
# all fields except psu_uuid are to be rendered and editable.
class DirectoryInformation(models.Model):
COMPANY_CHOICES = (
('Portland State University', 'Portland State University'),
('Portland State University Foundation', 'PSU Foundation'),
)
psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True, editable=False)
company = models.CharField(max_length=50, choices=COMPANY_CHOICES, null=True, blank=True,
default="Portland State University")
telephone = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone])
fax = models.CharField(max_length=32, null=True, blank=True, validators=[validate_psu_phone])
job_title = models.CharField(max_length=128, null=True, blank=True)
department = models.ForeignKey(Department, null=True, blank=True)
office_building = models.ForeignKey(Building, null=True, blank=True)
office_room = models.CharField(max_length=10, null=True, blank=True)
mail_code = models.ForeignKey(Mailcode, null=True, blank=True)
street_address = models.CharField(max_length=150, null=True, blank=True, default="1825 SW Broadway")
city = models.CharField(max_length=50, null=True, blank=True, default="Portland")
state = USStateField(blank=True, null=True, default="OR")
zip_code = models.CharField(max_length=10, null=True, blank=True, default="97201")
def __str__(self): # pragma: no cover
return self.psu_uuid
# Password reset contact information.
class ContactInformation(models.Model):
psu_uuid = models.CharField(unique=True, max_length=36, primary_key=True)
cell_phone = PhoneNumberField(blank=True, null=True)
alternate_email = models.EmailField(max_length=254, blank=True, null=True)
def __str__(self):
return self.psu_uuid
# Maintenance notice.
class MaintenanceNotice(models.Model):
start_display = models.DateTimeField()
end_display = models.DateTimeField()
message = models.TextField()
def __str__(self): # pragma: no cover
return "Maintenance starting: " + str(self.start_display) | 2.5 | 2 |
shapeutils/django/serializers.py | slavas62/shape-utils | 0 | 12799885 | <reponame>slavas62/shape-utils
from django.contrib.gis.geos import GEOSGeometry
from django.db import models
import json
import datetime
import decimal
class JSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime.date):
return obj.isoformat()
if isinstance(obj, datetime.date):
return obj.isoformat()
if isinstance(obj, decimal.Decimal):
return str(obj)
if isinstance(obj, GEOSGeometry):
return json.JSONDecoder().decode(obj.geojson)
if isinstance(obj, models.Model):
return str(obj)
return super(JSONEncoder, self).default(obj)
def geojsondata(features, geometry_field='geometry'):
data = {
'type': 'FeatureCollection',
'features': [],
}
for entry in features:
feature = {
'type': 'Feature',
'geometry': entry.get(geometry_field, None),
'properties': {},
}
for k, v in entry.iteritems():
if k != geometry_field:
feature['properties'][k] = v
data['features'].append(feature)
return data
def geojson(*args, **kwargs):
encoder = JSONEncoder()
data = geojsondata(*args, **kwargs)
return encoder.encode(data)
| 2.34375 | 2 |
c3po/__init__.py | yashchau1303/C-3PO | 34 | 12799886 | """__init__ module for project root."""
| 1.179688 | 1 |
app/helpers/pointdeletion.py | B-tronics/KinemAutomation | 0 | 12799887 | import cv2
import math
POINTS = []
class PointFilter:
def __init__(self, points):
self._points = points
def deletePoints(self, event, xCoordinate, yCoordinate, flags, params):
if event == cv2.EVENT_RBUTTONDOWN:
diff = list()
for point in self._points:
xd = math.pow((point[0] - xCoordinate), 2)
yd = math.pow((point[1] - yCoordinate), 2)
d = math.sqrt(xd + yd)
diff.append(d)
pointToDelete = diff.index(min(diff))
self._points.pop(pointToDelete) | 2.890625 | 3 |
doc/examples/nonlinear_from_rest/submit_multiple_check_RB.py | snek5000/snek5000-cbox | 0 | 12799888 | import numpy as np
from fluiddyn.clusters.legi import Calcul2 as Cluster
from critical_Ra_RB import Ra_c_RB as Ra_c_RB_tests
prandtl = 1.0
dim = 2
dt_max = 0.005
end_time = 30
nb_procs = 10
nx = 8
order = 10
stretch_factor = 0.0
Ra_vert = 1750
x_periodicity = False
z_periodicity = False
cluster = Cluster()
cluster.commands_setting_env = [
"PROJET_DIR=/fsnet/project/meige/2020/20CONVECTION",
"source /etc/profile",
"source $PROJET_DIR/miniconda3/etc/profile.d/conda.sh",
"conda activate env-snek",
"export NEK_SOURCE_ROOT=$HOME/Dev/snek5000/lib/Nek5000",
"export PATH=$PATH:$NEK_SOURCE_ROOT/bin",
"export FLUIDSIM_PATH=$PROJET_DIR/numerical/",
]
for aspect_ratio, Ra_c_test in Ra_c_RB_tests.items():
ny = int(nx * aspect_ratio)
if nx * aspect_ratio - ny:
continue
Ra_vert_nums = np.logspace(np.log10(Ra_c_test), np.log10(1.04 * Ra_c_test), 4)
for Ra_vert_num in Ra_vert_nums:
command = (
f"run_simul_check_from_python.py -Pr {prandtl} -nx {nx} --dim {dim} "
f"--order {order} --dt-max {dt_max} --end-time {end_time} -np {nb_procs} "
f"-a_y {aspect_ratio} --stretch-factor {stretch_factor} "
f"--Ra-vert {Ra_vert_num}"
)
if x_periodicity:
command += " --x-periodicity"
elif z_periodicity:
command += " --z-periodicity"
print(command)
name_run = f"RB_asp{aspect_ratio:.3f}_Ra{Ra_vert_num:.3e}_Pr{prandtl:.2f}_msh{nx*order}x{round(nx*aspect_ratio)*order}"
cluster.submit_script(
command,
name_run=name_run,
nb_cores_per_node=nb_procs,
omp_num_threads=1,
ask=False,
)
| 1.882813 | 2 |
cicada2/operator/daemon/types.py | herzo175/cicada-2 | 11 | 12799889 | <reponame>herzo175/cicada-2
from typing import Dict, List, TypedDict
class Dependency(TypedDict):
name: str
labels: Dict[str, str]
statuses: List[str]
class SetupConfig(TypedDict):
pvc: str
mountPath: str
remotePath: str
localPath: str
class Spec(TypedDict):
dependencies: List[Dependency]
ioConfig: Dict[str, str]
engineConfig: Dict[str, str]
tests: SetupConfig
reports: SetupConfig
class Metadata(TypedDict):
name: str
namespace: str
class TestEngineBody(TypedDict):
metadata: Metadata
spec: Spec
| 2.140625 | 2 |
programs/foldtest_magic.py | yamamon75/PmagPy | 2 | 12799890 | #!/usr/bin/env python
import os
import sys
import numpy as np
import matplotlib
if matplotlib.get_backend() != "TKAgg":
matplotlib.use("TKAgg")
import pandas as pd
from matplotlib import pyplot as plt
import pmagpy.pmag as pmag
import pmagpy.pmagplotlib as pmagplotlib
from pmag_env import set_env
import operator
OPS = {'<' : operator.lt, '<=' : operator.le,
'>' : operator.gt, '>=': operator.ge, '=': operator.eq}
def main():
"""
NAME
foldtest_magic.py
DESCRIPTION
does a fold test (Tauxe, 2010) on data
INPUT FORMAT
pmag_specimens format file, er_samples.txt format file (for bedding)
SYNTAX
foldtest_magic.py [command line options]
OPTIONS
-h prints help message and quits
-f sites formatted file [default for 3.0 is sites.txt, for 2.5, pmag_sites.txt]
-fsa samples formatted file
-fsi sites formatted file
-exc use criteria to set acceptance criteria (supported only for data model 3)
-n NB, set number of bootstraps, default is 1000
-b MIN, MAX, set bounds for untilting, default is -10, 150
-fmt FMT, specify format - default is svg
-sav saves plots and quits
-DM NUM MagIC data model number (2 or 3, default 3)
OUTPUT
Geographic: is an equal area projection of the input data in
original coordinates
Stratigraphic: is an equal area projection of the input data in
tilt adjusted coordinates
% Untilting: The dashed (red) curves are representative plots of
maximum eigenvalue (tau_1) as a function of untilting
The solid line is the cumulative distribution of the
% Untilting required to maximize tau for all the
bootstrapped data sets. The dashed vertical lines
are 95% confidence bounds on the % untilting that yields
the most clustered result (maximum tau_1).
Command line: prints out the bootstrapped iterations and
finally the confidence bounds on optimum untilting.
If the 95% conf bounds include 0, then a pre-tilt magnetization is indicated
If the 95% conf bounds include 100, then a post-tilt magnetization is indicated
If the 95% conf bounds exclude both 0 and 100, syn-tilt magnetization is
possible as is vertical axis rotation or other pathologies
"""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
kappa = 0
dir_path = pmag.get_named_arg("-WD", ".")
nboot = int(float(pmag.get_named_arg("-n", 1000))) # number of bootstraps
fmt = pmag.get_named_arg("-fmt", "svg")
data_model_num = int(float(pmag.get_named_arg("-DM", 3)))
if data_model_num == 3:
infile = pmag.get_named_arg("-f", 'sites.txt')
orfile = 'samples.txt'
site_col = 'site'
dec_col = 'dir_dec'
inc_col = 'dir_inc'
tilt_col = 'dir_tilt_correction'
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
crit_col = 'criterion'
critfile = 'criteria.txt'
else:
infile = pmag.get_named_arg("-f", 'pmag_sites.txt')
orfile = 'er_samples.txt'
site_col = 'er_site_name'
dec_col = 'site_dec'
inc_col = 'site_inc'
tilt_col = 'site_tilt_correction'
dipkey, azkey = 'sample_bed_dip', 'sample_bed_dip_direction'
crit_col = 'pmag_criteria_code'
critfile = 'pmag_criteria.txt'
if '-sav' in sys.argv:
plot = 1
else:
plot = 0
if '-b' in sys.argv:
ind = sys.argv.index('-b')
untilt_min = int(sys.argv[ind+1])
untilt_max = int(sys.argv[ind+2])
else:
untilt_min, untilt_max = -10, 150
if '-fsa' in sys.argv:
orfile = pmag.get_named_arg("-fsa", "")
elif '-fsi' in sys.argv:
orfile = pmag.get_named_arg("-fsi", "")
if data_model_num == 3:
dipkey, azkey = 'bed_dip', 'bed_dip_direction'
else:
dipkey, azkey = 'site_bed_dip', 'site_bed_dip_direction'
else:
if data_model_num == 3:
orfile = 'sites.txt'
else:
orfile = 'pmag_sites.txt'
orfile = pmag.resolve_file_name(orfile, dir_path)
infile = pmag.resolve_file_name(infile, dir_path)
critfile = pmag.resolve_file_name(critfile, dir_path)
df = pd.read_csv(infile, sep='\t', header=1)
# keep only records with tilt_col
data = df.copy()
data = data[data[tilt_col].notnull()]
data = data.where(data.notnull(), "")
# turn into pmag data list
data = list(data.T.apply(dict))
# get orientation data
if data_model_num == 3:
# often orientation will be in infile (sites table)
if os.path.split(orfile)[1] == os.path.split(infile)[1]:
ordata = df[df[azkey].notnull()]
ordata = ordata[ordata[dipkey].notnull()]
ordata = list(ordata.T.apply(dict))
# sometimes orientation might be in a sample file instead
else:
ordata = pd.read_csv(orfile, sep='\t', header=1)
ordata = list(ordata.T.apply(dict))
else:
ordata, file_type = pmag.magic_read(orfile)
if '-exc' in sys.argv:
crits, file_type = pmag.magic_read(critfile)
SiteCrits = []
for crit in crits:
if crit[crit_col] == "DE-SITE":
SiteCrits.append(crit)
#break
# get to work
#
PLTS = {'geo': 1, 'strat': 2, 'taus': 3} # make plot dictionary
if not set_env.IS_WIN:
pmagplotlib.plot_init(PLTS['geo'], 5, 5)
pmagplotlib.plot_init(PLTS['strat'], 5, 5)
pmagplotlib.plot_init(PLTS['taus'], 5, 5)
if data_model_num == 2:
GEOrecs = pmag.get_dictitem(data, tilt_col, '0', 'T')
else:
GEOrecs = data
if len(GEOrecs) > 0: # have some geographic data
num_dropped = 0
DIDDs = [] # set up list for dec inc dip_direction, dip
for rec in GEOrecs: # parse data
dip, dip_dir = 0, -1
Dec = float(rec[dec_col])
Inc = float(rec[inc_col])
orecs = pmag.get_dictitem(
ordata, site_col, rec[site_col], 'T')
if len(orecs) > 0:
if orecs[0][azkey] != "":
dip_dir = float(orecs[0][azkey])
if orecs[0][dipkey] != "":
dip = float(orecs[0][dipkey])
if dip != 0 and dip_dir != -1:
if '-exc' in sys.argv:
keep = 1
for site_crit in SiteCrits:
crit_name = site_crit['table_column'].split('.')[1]
if crit_name and crit_name in rec.keys() and rec[crit_name]:
# get the correct operation (<, >=, =, etc.)
op = OPS[site_crit['criterion_operation']]
# then make sure the site record passes
if op(float(rec[crit_name]), float(site_crit['criterion_value'])):
keep = 0
if keep == 1:
DIDDs.append([Dec, Inc, dip_dir, dip])
else:
num_dropped += 1
else:
DIDDs.append([Dec, Inc, dip_dir, dip])
if num_dropped:
print("-W- Dropped {} records because each failed one or more criteria".format(num_dropped))
else:
print('no geographic directional data found')
sys.exit()
pmagplotlib.plot_eq(PLTS['geo'], DIDDs, 'Geographic')
data = np.array(DIDDs)
D, I = pmag.dotilt_V(data)
TCs = np.array([D, I]).transpose()
pmagplotlib.plot_eq(PLTS['strat'], TCs, 'Stratigraphic')
if plot == 0:
pmagplotlib.draw_figs(PLTS)
Percs = list(range(untilt_min, untilt_max))
Cdf, Untilt = [], []
plt.figure(num=PLTS['taus'])
print('doing ', nboot, ' iterations...please be patient.....')
for n in range(nboot): # do bootstrap data sets - plot first 25 as dashed red line
if n % 50 == 0:
print(n)
Taus = [] # set up lists for taus
PDs = pmag.pseudo(DIDDs)
if kappa != 0:
for k in range(len(PDs)):
d, i = pmag.fshdev(kappa)
dipdir, dip = pmag.dodirot(d, i, PDs[k][2], PDs[k][3])
PDs[k][2] = dipdir
PDs[k][3] = dip
for perc in Percs:
tilt = np.array([1., 1., 1., 0.01*perc])
D, I = pmag.dotilt_V(PDs*tilt)
TCs = np.array([D, I]).transpose()
ppars = pmag.doprinc(TCs) # get principal directions
Taus.append(ppars['tau1'])
if n < 25:
plt.plot(Percs, Taus, 'r--')
# tilt that gives maximum tau
Untilt.append(Percs[Taus.index(np.max(Taus))])
Cdf.append(float(n) / float(nboot))
plt.plot(Percs, Taus, 'k')
plt.xlabel('% Untilting')
plt.ylabel('tau_1 (red), CDF (green)')
Untilt.sort() # now for CDF of tilt of maximum tau
plt.plot(Untilt, Cdf, 'g')
lower = int(.025*nboot)
upper = int(.975*nboot)
plt.axvline(x=Untilt[lower], ymin=0, ymax=1, linewidth=1, linestyle='--')
plt.axvline(x=Untilt[upper], ymin=0, ymax=1, linewidth=1, linestyle='--')
tit = '%i - %i %s' % (Untilt[lower], Untilt[upper], 'Percent Unfolding')
print(tit)
plt.title(tit)
if plot == 0:
pmagplotlib.draw_figs(PLTS)
ans = input('S[a]ve all figures, <Return> to quit \n ')
if ans != 'a':
print("Good bye")
sys.exit()
files = {}
for key in list(PLTS.keys()):
files[key] = ('foldtest_'+'%s' % (key.strip()[:2])+'.'+fmt)
pmagplotlib.save_plots(PLTS, files)
if __name__ == "__main__":
main()
| 2.515625 | 3 |
demo.py | solaaa/signal_processing_by_GPU | 0 | 12799891 | # coding=utf-8
import numpy as np
import reikna.cluda as cluda
from reikna.fft import FFT, FFTShift
import pyopencl.array as clarray
from pyopencl import clmath
from reikna.core import Computation, Transformation, Parameter, Annotation, Type
from reikna.algorithms import PureParallel
from matplotlib import cm
import time as t
import matplotlib.pyplot as plt
import statistic_functions4 as sf
#import mylog as Log
np.set_printoptions(threshold=np.inf)
batch = 100
N = 1024
api = cluda.any_api()
thr = api.Thread.create()
data = np.load('8psk_data.npy')
data = np.reshape(data, (batch*4, N)) # 一共 batch*4 = 400次
t1 = t.clock()
data0 = data[0:batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
print(t.clock()-t1)
#compile
fft = FFT(data_g, (0,1))
fftc = fft.compile(thr)
data_f = thr.array(data0.shape, dtype=np.complex128)
shift = FFTShift(data_f, (0,1))
shiftc = shift.compile(thr)
data_shift = thr.array(data0.shape, dtype=np.complex128)
sum = sf.stat(thr)
logg10 = sf.logg10(thr)
def myfft(data):
'''
input:
data: cluda-Array (100, 1024)
-----------------------------------------------
output:
TS_gpu: cluda-Array (1000, 1024)
'''
#FFT
t_fft = t.clock()
data_f = thr.array(data.shape, dtype=np.complex128)
STAT_gpu = thr.array(data.shape, dtype=np.complex128)
fftc(data_f, data)
shiftc(STAT_gpu, data_f)
#log
t_log = t.clock()
STAT_gpu = abs(STAT_gpu)
logg10(STAT_gpu, global_size = (N, batch))
#统计,插值
t_st = t.clock()
TS_gpu = cluda.ocl.Array(thr, shape=(1000, N), dtype=np.int)
sum(TS_gpu, STAT_gpu, global_size = (N,batch))
print('fft: %f, log: %f, stat: %f'%(t_log-t_fft, t_st-t_log, t.clock()-t_st))
print('total: %f'%(t.clock()-t_fft))
return TS_gpu
i=0
j=0
fig=plt.figure()
#fig, ax = plt.subplots()
summ = 0
while i<100:
t1 = t.clock()
data0 = data[j:(j+1)*batch, :].astype(np.complex128)
data_g = thr.to_device(data0)
out = myfft(data_g)
out = out.get()
t2 = t.clock()
#nipy_spectral
plt.clf()
#plt.imshow(out, cmap = cm.hot)
plt.imshow(out, cmap = 'nipy_spectral')
plt.ylim(0,1000)
plt.pause(0.00000001)
print('No. %d, transmission+compute: %f, plot: %f'%(i, t2-t1, t.clock()-t2))
summ = summ + t2-t1
j = j + 1
i = i + 1
if j == 4:
j=0
print('avg compute: %f'%(summ/100))
| 2.140625 | 2 |
doc/source/EXAMPLES/allskyf25.py | kapteyn-astro/kapteyn | 3 | 12799892 | <gh_stars>1-10
from kapteyn import maputils
import numpy
from service import *
fignum = 25
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
title = r"Polyconic projection (PCO). (Cal. fig.29)"
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---PCO',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -5.0,
'CTYPE2' : 'DEC--PCO',
'CRVAL2' : 0.0, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 5.0
}
X = polrange()
Y = numpy.arange(-75,90,15.0)
# !!!!!! Let the world coordinates for constant latitude run from 180,180
# instead of 0,360. Then one prevents the connection between the two points
# 179.9999 and 180.0001 which is a jump, but smaller than the definition of
# a rejected jump in the wcsgrat module.
# Also we need to increase the value of 'gridsamples' to
# increase the relative size of a jump.
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2),
wylim=(-90,90.0), wxlim=(-180,180),
startx=X, starty=Y, gridsamples=2000)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
# Remove the left 180 deg and print the right 180 deg instead
w1 = numpy.arange(0,151,30.0)
w2 = numpy.arange(180,360,30.0)
w2[0] = 180 + epsilon
lon_world = numpy.concatenate((w1, w2))
lat_world = [-60, -30, 30, 60]
labkwargs0 = {'color':'r', 'va':'bottom', 'ha':'right'}
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
labkwargs0=labkwargs0, labkwargs1=labkwargs1,
markerpos=markerpos)
| 2.328125 | 2 |
questionbank/comments/filters.py | SyafiqTermizi/questionbank | 1 | 12799893 | <gh_stars>1-10
import django_filters
from .models import QuestionComment, ExamComment
class QuestionCommentFilter(django_filters.FilterSet):
is_resolved = django_filters.BooleanFilter()
class Meta:
model = QuestionComment
fields = ('is_resolved',)
class ExamCommentFilter(django_filters.FilterSet):
is_resolved = django_filters.BooleanFilter()
class Meta:
model = ExamComment
fields = ('is_resolved',)
| 1.96875 | 2 |
botorch/test_functions/__init__.py | cnheider/botorch | 0 | 12799894 | <reponame>cnheider/botorch
#!/usr/bin/env python3
from .branin import neg_branin
from .eggholder import neg_eggholder
from .hartmann6 import neg_hartmann6
from .holder_table import neg_holder_table
from .michalewicz import neg_michalewicz
from .styblinski_tang import neg_styblinski_tang
__all__ = [
"neg_branin",
"neg_eggholder",
"neg_hartmann6",
"neg_holder_table",
"neg_michalewicz",
"neg_styblinski_tang",
]
| 1.320313 | 1 |
bot/db/repos/HomeworkManager.py | WizzardHub/EcoleDirecteOrtBot | 0 | 12799895 | from bot.db.entities.Homework import Homework
class HomeworkManager:
def __init__(self, db):
self._db = db
def getAll(self):
cur = self._db.cursor()
cur.execute('SELECT * FROM Homework')
homeworks = []
for homework in cur.fetchall():
homeworks.append(Homework(homework[1], homework[2], homework[3], homework[4], homework[5], homework[6], homework[7], homework[8], homework[9], homework[10]))
return homeworks
def insert(self, homework):
cur = self._db.cursor()
cur.execute('INSERT INTO Homework (date, matiere, codeMatiere, aFaire, idDevoir'
', documentsAFaire, donneLe, effectue, interrogation, rendreEnLigne'
') values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)',
(homework.getDate(), homework.getMatiere(), homework.getCodeMatiere(),
homework.getAFaire(), homework.getIdDevoir(), homework.getDocumentAFaire(),
homework.getDonneLe(), homework.getEffectue(), homework.getInterrogation(),
homework.getRendreEnLigne(),))
self._db.commit() | 2.8125 | 3 |
dataconverter.py | zhang96/CSVToJSON | 1 | 12799896 | <gh_stars>1-10
# Simple Python program that converts CSV to JSON for my MongoDB project.
# - It converts all the csv files under the directory at once.
import csv
import json
import glob
for files in glob.glob("*.csv"):
csvfile = open(files, 'r')
jsonfile = open(files[:-4] + '.json', 'w')
reader = csv.reader(open(files, 'rU'))
fieldnames = ()
out = 0
for row in reader:
temp = []
# print row
fieldnames = row
break
counter = 0
out = ""
for row in reader:
if counter == 0:
# skip
print "0"
else:
# print row
itemCounter = 0
temp = "{"
for item in row:
a = item
# print a
nameCounter = 0
for item in fieldnames:
if itemCounter == nameCounter:
# print item
temp += ' "'
temp += item
temp += '"'
temp += ': "'
temp += a
temp += '",'
nameCounter += 1
itemCounter += 1
temp = temp[:-1]
temp += "}"
# print temp
out += temp
out += ", "
counter += 1
out = out[:-1]
jsonfile.write(out)
print "End of Execution"
| 3.609375 | 4 |
encrypt_decrypt_app/atbash_cipher_tests.py | Chika-Jinanwa/chikas-cipher | 0 | 12799897 | import unittest
from atbash_cipher import AtbashCipher
test = AtbashCipher() #instantiate test caesar cipher class
class AtbashCipherEncryptTests(unittest.TestCase):
def test_empty_string(self):
self.assertMultiLineEqual(test.encrypt(''), '')
def test_string_with_only_spaces(self):
self.assertMultiLineEqual(test.encrypt(' '), ' ')
def test_string_no_wrap_around(self):
self.assertMultiLineEqual(test.encrypt('abc'), 'zyx')
def test_string_wrap_around(self):
self.assertMultiLineEqual(
test.encrypt('wvu'),
'def')
def test_multi_word(self):
self.assertMultiLineEqual(test.encrypt('abc wvu'), 'zyx def')
#for values of key less than 0, JavaScript frontend is tasked with validating
#that key must be >= 0. Hence the test is skipped here
class AtbashCipherDecryptTests(unittest.TestCase):
def test_empty_string(self):
self.assertMultiLineEqual(test.decrypt(''), '')
def test_string_with_only_spaces(self):
self.assertMultiLineEqual(test.decrypt(' '), ' ')
def test_string_no_wrap_around(self):
self.assertMultiLineEqual(test.decrypt('zyx'), 'abc')
def test_string_wrap_around(self):
self.assertMultiLineEqual(
test.decrypt('def'),
'wvu')
def test_multi_word(self):
self.assertMultiLineEqual(test.decrypt('zyx def'), 'abc wvu')
if __name__ == '__main__':
unittest.main()
unittest.main()
| 3.25 | 3 |
Pi-SMS.py | Femi123p/Pi-Sms | 1 | 12799898 | <filename>Pi-SMS.py
from firebase import firebase
import RPi.GPIO as GPIO
import plivo
from time import sleep # this lets us have a time delay (see line 15)
GPIO.setmode(GPIO.BCM) # set up BCM GPIO numbering
GPIO.setup(25, GPIO.IN) # set GPIO25 as input (button)
GPIO.setup(24, GPIO.OUT)
#plivo setup
srcPhoneNo="<mobile number>" #phone number associated with your account
dstPhoneNo="<mobile number>" #phone number where you want to send sms
smsText= u"leds glow" #message which you want to send
msgObj={ 'src': srcPhoneNo, 'dst': dstPhoneNo, 'text': smsText}
#get auth_id and auth_token from plivo console
auth_id="<id>"
#auth_id key which you will get on plivio app
auth_token="<token>"
#auth_token which you will get on plivio app
pSMS = plivo.RestAPI(auth_id, auth_token)
#firebase setup
firebaseURL='< your firebase link>'
fBase = firebase.FirebaseApplication(firebaseURL, None)
LedOn=False;
def LedOn():
print(fBase.put('/data/user_1/',"LedOn","1"))
print(pSMS.send_message(msgObj))
LedOn=True
def LedOff():
if(fBase.get('/data/user_1/','LedOn')=="1") :
print(fBase.put('/data/user_1/',"LedOffd","0"))
LedOn=False
try:
while True:
if GPIO.input(25):
print ("Port 25 is 1/HIGH/True - LED ON")
GPIO.output(24, 1)
LedOn()
#result=firebase.put('/data/','user_1',{'gasleakage':'1'})
else:
print ("Port 25 is 0/LOW/False - LED OFF")
GPIO.output(24, 0) # set port/pin value to 0/LOW/False
sleep(0.1)
finally:
GPIO.cleanup()
| 3.3125 | 3 |
dps/common.py | Kel0/django-parrallel-sessions | 0 | 12799899 | from typing import Protocol
class VEnvProtocol(Protocol):
path = None
def activate(self):
"""
Activate virtual environment
"""
class VEnvTypeProtocol(Protocol):
_manager = None
def validate(self, *args, **kwargs):
"""
Make sure that venv is exist
"""
def get_activate_string(self):
pass
class ShellExecutorProtocol:
def add_to_exec_chain(self, activate_str):
pass
def exec(self):
pass
| 2.671875 | 3 |
tests/fields/test_integer.py | Ennkua/wtforms | 1,197 | 12799900 | from tests.common import DummyPostData
from wtforms.fields import IntegerField
from wtforms.form import Form
class F(Form):
a = IntegerField()
b = IntegerField(default=48)
def test_integer_field():
form = F(DummyPostData(a=["v"], b=["-15"]))
assert form.a.data is None
assert form.a.raw_data == ["v"]
assert form.a() == """<input id="a" name="a" type="number" value="v">"""
assert form.b.data == -15
assert form.b() == """<input id="b" name="b" type="number" value="-15">"""
assert not form.a.validate(form)
assert form.b.validate(form)
form = F(DummyPostData(a=[], b=[""]))
assert form.a.data is None
assert form.a.raw_data == []
assert form.b.data is None
assert form.b.raw_data == [""]
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
form = F(b=9)
assert form.b.data == 9
assert form.a._value() == ""
assert form.b._value() == "9"
form = F(DummyPostData(), data=dict(b="v"))
assert form.b.data is None
assert form.a._value() == ""
assert form.b._value() == ""
assert not form.validate()
assert len(form.b.process_errors) == 1
assert len(form.b.errors) == 1
| 2.71875 | 3 |
modules/shellhelper.py | pilgun/app-run-and-log | 1 | 12799901 | <filename>modules/shellhelper.py
from loguru import logger
import subprocess
from modules import config
from modules.exceptions import AbsentPackageException, ErrorInstallingException, ErrorUninstallingException, NotEnoughSpaceException
import os
def install(new_apk_path):
cmd = '"{}" install -r "{}"'.format(config.ADB_PATH, new_apk_path)
try:
out = request_pipe(cmd)
except Exception as e:
if 'not enough space' in str(e):
raise NotEnoughSpaceException()
raise ErrorInstallingException
if 'Exception occurred while dumping' in out:
raise ErrorUninstallingException
def uninstall(package):
cmd = '"{}" uninstall "{}"'.format(config.ADB_PATH, package)
try:
request_pipe(cmd)
except Exception:
raise ErrorUninstallingException
def request_pipe(cmd):
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = pipe.communicate()
res = out
if not out:
res = err
if pipe.returncode > 0 :
raise Exception("----------------------------------------------------\n\
Out: %s\nError: %s" % (out, err))
return res.decode('utf-8')
def start_activity_explicitly(package_name, activity_name):
# adb shell am start -n com.package.name/com.package.name.ActivityName
logger.debug("Starting activity [%s] of the package [%s]..." % (activity_name, package_name))
run_string = package_name + '/' + activity_name
cmd = "{0} shell am start -n {1}".format(config.ADB_PATH, run_string)
request_pipe(cmd)
def clean_log():
cmd = "{0} logcat -c".format(config.ADB_PATH)
request_pipe(cmd)
def dump_log(path):
cmd = "{0} logcat -d *:E > {1}".format(config.ADB_PATH, path)
request_pipe(cmd)
def save_log(logs_dir, app):
file_path = os.path.join(logs_dir, "{}.txt".format(app))
dump_log(file_path)
return file_path
def read_log(path):
with open(path, 'r') as file:
data = file.read()
return data
def get_api_level():
cmd = "{} shell getprop ro.build.version.sdk".format(config.ADB_PATH)
api_level = int(request_pipe(cmd))
return api_level
def run_monkey(package, seed, throttle, event_num):
cmd = 'adb shell monkey -p {} -s {} --throttle {} {}'
request_pipe(cmd.format(package, seed, throttle, event_num))
| 2.234375 | 2 |
models/classification/train_on_flir.py | Lindronics/honours_project_dissertation | 2 | 12799902 | import os
import argparse
import numpy as np
import tensorflow as tf
import tensorflow.keras as K
from sklearn.metrics import classification_report
from dataset import FLIRDataset
def grid_search(train_labels: str,
test_labels: str,
output:str,
res:tuple=(120, 160),
lazy:bool=True,
batch_size:int=16,
epochs:int=20):
"""
Runs a grid search over all known models.
Params
------
train_labels: str
Path to training labels
test_labels: str
Path to testing labels
output: str
Path to output directory
res: tuple
Input resolution of network
lazy: bool
Whether to load data lazily in batches during training
batch_size: int
Batch size in case of lazy loading
epochs: int
Training epochs
"""
# Data
print("=> Loading data.")
train = FLIRDataset(train_labels, res=res, batch_size=batch_size)
test = FLIRDataset(test_labels, res=res, batch_size=batch_size)
# In eager loading mode, train on everything.
if not lazy:
X_train, y_train = train.get_all()
X_test, y_test = test.get_all()
X_train = np.concatenate([X_train, X_test], axis=0)
y_train = np.concatenate([y_train, y_test], axis=0)
def net(x, num_classes=1):
x = K.applications.resnet_v2.ResNet50V2(include_top=False, weights=None, input_shape=x.shape[1:])(x)
x = K.layers.Flatten()(x)
x = K.layers.Dense(num_classes, activation="softmax")(x)
return x
print("\n=> Training model.")
input_tensor = K.layers.Input((160, 120, 1))
output_tensor = net(input_tensor, num_classes=train.num_classes())
model = K.Model(input_tensor, output_tensor)
model.compile(optimizer="sgd",
loss="categorical_crossentropy",
metrics=["accuracy"])
# Train model
if lazy:
model.fit(x=train,
epochs=epochs,
validation_data=train,
verbose=2)
else:
model.fit(x=X_train,
y=y_train,
epochs=epochs,
batch_size=batch_size,
verbose=2)
# Save weights
model.save_weights(os.path.join(output, "flir_pretrained_weights.h5"))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train model on FLIR dataset.")
parser.add_argument("train", help="Directory containing training labels")
parser.add_argument("test", help="Directory containing testing labels")
parser.add_argument("out", help="Output directory for results")
parser.add_argument("epochs", help="Number of epochs")
parser.add_argument("-l", "--lazy", dest="lazy", help="Load data lazily", action="store_true")
args = vars(parser.parse_args())
grid_search(args["train"],
args["test"],
args["out"],
res=(120, 160),
lazy=bool(args["lazy"]),
epochs=int(args["epochs"]))
print("\n=> Finished.") | 2.609375 | 3 |
src/reader.py | DavidRivasPhD/mrse | 0 | 12799903 | """
Module to read the query and other inputs
"""
from Bio import Entrez
from filter import filter_selector
def inputnow():
"""
Reads the inputs' values
:return: query
"""
# the email must be the user's individual/personal email (NOT an institutional email or a default email
# as this could lead to exceeding the maximum allowable frequency of requests per user, of 3 per second)
Entrez.email = "<EMAIL>"
# The maximum number of search results to be displayed could be the following default value or an input value < 100
results_number = 5
query = input("enter your search query: ")
filter_option = input("would you like to use advanced search filter? (yes/no): ")
if filter_option == "yes":
query = filter_selector(query)
return query, results_number
| 3.78125 | 4 |
jarbas_hive_mind/settings.py | flo-mic/HiveMind-core | 43 | 12799904 | <gh_stars>10-100
from os import makedirs
from os.path import isdir, join, expanduser
DATA_PATH = expanduser("~/jarbasHiveMind")
if not isdir(DATA_PATH):
makedirs(DATA_PATH)
CERTS_PATH = join(DATA_PATH, "certs")
if not isdir(CERTS_PATH):
makedirs(CERTS_PATH)
DB_PATH = join(DATA_PATH, "database")
if not isdir(DB_PATH):
makedirs(DB_PATH)
CLIENTS_DB = "sqlite:///" + join(DB_PATH, "clients.db")
DEFAULT_PORT = 5678
USE_SSL = True
LOG_BLACKLIST = []
MYCROFT_WEBSOCKET_CONFIG = {
"host": "0.0.0.0",
"port": 8181,
"route": "/core",
"ssl": False
}
| 1.867188 | 2 |
assignment4/merge.py | zhaoze1991/cs6200 | 1 | 12799905 | #!/usr/bin/env python
file1_name = 'a'
file2_name = 'titanic'
class Item(object):
"""tring for Item"""
def __init__(self):
super(Item, self).__init__()
l1 = open(file1_name, 'r').readlines()
l2 = open(file2_name, 'r').readlines()
res = open('res','w')
def run():
for i in range(len(l1)):
line1 = l1[i].split()
line2 = l2[i].split()
val = (int(line1[3]) + int(line2[3])) / 2
res.writelines(line1[0] + ' ' + line1[1] + ' ' + line1[2] + ' ' + str(val) + ' \n')
run()
res.close()
| 3.453125 | 3 |
kernal/FileSystem.py | FAWC-bupt/OS-Course-Design | 1 | 12799906 | <filename>kernal/FileSystem.py<gh_stars>1-10
"""
目录逻辑结构:树形
目录物理结构:连续形
TODO:磁盘外部碎片如何处理?
TODO:磁盘IO添加中断
"""
import math
from enum import Enum
from kernal import Tool
class FileOperation(Enum):
Read = 0
Write = 1
Create = 2
Rename = 3
Delete = 4
Redirect = 5
class FileAuthority(Enum):
Default = 0
ReadOnly = 1
WriteOnly = 2
class Folder:
def __init__(self, folder_name: str, parent_folder, child_nodes: list):
"""
文件夹数据结构,请注意,文件夹为逻辑结构,因此不会占用物理磁盘空间
:param folder_name:文件夹名
:param parent_folder:父节点。所有父节点一定是文件夹,注意,根节点(根文件夹)没有父节点,该属性为None
:param child_nodes:子节点。可能有多个,且可能是文件夹,也可能是文件
"""
self.id = Tool.uniqueNum()
self.folder_name = folder_name
self.parent_node = parent_folder
self.child_nodes = child_nodes
def __str__(self):
return self.folder_name
class UserFile:
def __init__(self, file_name: str, parent_folder, data, authority: FileAuthority = FileAuthority.Default):
"""
文件数据结构
:param file_name:文件名
:param parent_folder:父节点文件夹,每个文件该属性必须有值
:param data:文件数据
:param authority:文件权限
"""
self.id = Tool.uniqueNum()
self.file_name = file_name
self.parent_node = parent_folder
self.data = data
self.size = math.ceil(len(data) / 10)
"""
size是文件占据的磁盘空间,为了方便前端表示,单位为10字节(即磁盘一个块有10字节),一个英文字符一个字节,注意每次更改file内容时应当要更新该值
eg1. yes 有三个字符,占三个字节,除以10向上取整,因此该文件占据的磁盘空间为1
eg2. I am sure I am very handsome. 有29个字符(标点空格都算),因此该文件占据的磁盘空间为3
"""
self.disk_position = -1 # 文件在磁盘中的位置
self.authority = authority
def __str__(self):
return self.file_name
def contiguousAllocation(file_to_allocated: UserFile, Disk: list):
"""
磁盘文件连续分配
:param Disk: 文件系统磁盘
:param file_to_allocated:需要分配磁盘空间的文件
:return:返回文件所存储的磁盘位置(起始下标),若返回-1说明磁盘空间不足
"""
start_index = -1
space_counter = 0
for i in range(len(Disk)):
# 主要思想是,找到磁盘中连续且符合文件大小的几个块,且从磁盘头部遍历查找,这样有利于减少外部碎片
if Disk[i] == -1:
if start_index == -1:
start_index = i
space_counter += 1
if space_counter >= file_to_allocated.size:
for j in range(start_index, start_index + file_to_allocated.size):
Disk[j] = file_to_allocated.id
return start_index
else:
start_index = -1
space_counter = 0
return -1
def writeDiskToTXT():
# TODO:把结果输出到TXT?
pass
def creatFileOrFolder(is_folder: bool, name: str, parent_folder: Folder, file_table: list, Disk: list, data,
child_nodes=None):
"""
创建文件或文件夹
:param Disk: 文件系统磁盘
:param file_table: 文件表
:param is_folder:是否是文件夹
:param name:文件/文件夹名称
:param parent_folder:父文件夹对象
:param child_nodes:文件夹内容
:param data:文件数据
:return:文件/文件夹对象。若同路径存在重名返回-1,磁盘分配错误返回-2
"""
if child_nodes is None:
child_nodes = []
if is_folder:
if parent_folder is not None:
for node in parent_folder.child_nodes:
if str(node) == name and isinstance(node, Folder):
return -1
new_folder = Folder(name, parent_folder, child_nodes)
if not name == 'root':
parent_folder.child_nodes.append(new_folder)
return new_folder
else:
for node in parent_folder.child_nodes:
if str(node) == name and isinstance(node, UserFile):
# 同路径重名
return -1
new_file = UserFile(name, parent_folder, data)
file_table.append(new_file)
new_file.disk_position = contiguousAllocation(new_file, Disk)
if new_file.disk_position == -1:
print('磁盘空间分配错误') # TODO:异常处理
return -2
parent_folder.child_nodes.append(new_file)
return new_file
def getPath(is_folder: bool, target_folder: Folder = None, target_file: UserFile = None):
"""
利用递归获取文件/文件夹的路径
:param is_folder:欲获取路径的对象是否是文件夹
:param target_folder:目标文件夹
:param target_file:目标文件
:return:目标对象的路径
"""
if is_folder and target_folder.folder_name == 'root':
return '/root'
if not is_folder:
path_now = target_file.file_name
parent_node = target_file.parent_node
else:
path_now = target_folder.folder_name
parent_node = target_folder.parent_node
path = getPath(True, target_folder=parent_node) + '/' + path_now
return path
"""
路径的格式为: /root/aaa/w
以上路径表示root文件夹下的aaa文件夹的名为w的文件/文件夹
"""
def pathToObj(path: str, IR: dict, file_table: list, Disk: list, root: Folder):
"""
通过路径找到文件/文件夹
:param root:文件系统根节点
:param Disk: 文件系统磁盘
:param file_table: 文件表
:param IR: 直接执行指令
:param path:文件字符串
:return:文件/文件夹对象。若查找错误,返回0
"""
path = path.replace(" ", "")
path_node_list = path.split('/')
if path_node_list[0] == "":
path_node_list = path_node_list[1:]
if len(path_node_list) < 1 or path_node_list[0] != 'root':
return 0
# 从root出发
parent_node = root
# 每次都会更新子节点们
child_node_names = list(map(str, parent_node.child_nodes))
for i in range(1, len(path_node_list)):
if i == len(path_node_list) - 1:
# 单纯的查询文件目录树
if IR is None:
return parent_node.child_nodes[child_node_names.index(path_node_list[i])]
elif IR["operator"] == "createFile":
return creatFileOrFolder(False, path_node_list[i], parent_node, data=IR['content'], Disk=Disk,
file_table=file_table)
elif IR["operator"] == "createFolder":
return creatFileOrFolder(True, path_node_list[i], parent_node, data=None, Disk=Disk,
file_table=file_table)
else:
# 不存在问题
if not path_node_list[i] in child_node_names:
return 0
target = parent_node.child_nodes[child_node_names.index(path_node_list[i])]
# 读文件
if IR["operator"] == "readFile":
# 权限不够
if target.authority == FileAuthority.WriteOnly:
return -1
# 读数据
else:
return target.data
# 写文件
elif IR["operator"] == "writeFile":
# 权限不够
if target.authority == FileAuthority.ReadOnly:
return -1
# 写数据
else:
clearFileInDisk(target, Disk)
target.data = IR["content"]
target.size = math.ceil(len(IR["content"]) / 10)
target.disk_position = contiguousAllocation(target, Disk)
return 1
elif IR["operator"] == "delFile":
if isinstance(target, Folder):
return 0
else:
clearFileInDisk(target, Disk)
file_table.remove(target)
target.parent_node.child_nodes.remove(target)
return 1
elif IR["operator"] == "renameFile":
if IR["newName"] in child_node_names:
print('新名称在同路径下冲突')
return -1
else:
target.file_name = IR["newName"]
return 1
elif IR["operator"] == "renameFolder":
if IR["newName"] in child_node_names:
print('新名称在同路径下冲突')
return -1
else:
target.folder_name = IR["newName"]
return 1
elif path_node_list[i] in child_node_names:
parent_node = parent_node.child_nodes[child_node_names.index(path_node_list[i])]
child_node_names = list(map(str, parent_node.child_nodes))
else:
return 0
def clearFileInDisk(target_file: UserFile, Disk: list):
"""
在物理磁盘中删除文件信息
:param Disk: 文件系统磁盘
:param target_file:欲删除的文件
"""
for i in range(target_file.disk_position, target_file.disk_position + target_file.size):
Disk[i] = -1
def findFileById(file_id: int, file_table: list):
"""
通过文件id返回文件对象
该函数常用于磁盘索引文件,因为在本项目中磁盘仅需读取文件标识符(id)即可找到文件对象
:param file_table: 文件表
:param file_id:文件标识符
:return:文件对象。若返回-1表明没有找到对应标识符的文件
"""
for f in file_table:
if f.id == file_id:
return f
return -1
def findObjByName(name: str, parent_node):
"""
利用递归,查找除了root文件夹以外的文件系统对象
:param name:文件/文件夹名称
:param parent_node:该参数用于递归,调用时必须传入root文件系统节点
:return:None表示没有该对象,否则返回文件系统对象
"""
if not parent_node.child_nodes:
return None
child_node_names = list(map(str, parent_node.child_nodes))
if name in child_node_names:
return parent_node.child_nodes[child_node_names.index(name)]
else:
for child_node in parent_node.child_nodes:
if isinstance(child_node, Folder):
result = findObjByName(name, child_node)
if result is not None:
return result
return None
def renameFolder(old_name: str, new_name: str, root: Folder):
"""
重命名文件夹
:param root: 文件系统根节点
:param old_name:旧名称
:param new_name:新名称
:return:0表示找不到文件夹,-1表示新名字重名,1表示改名成功
"""
folder_obj = findObjByName(old_name, root)
if folder_obj is None:
print('找不到该文件夹') # TODO:异常处理
return 0
child_node_names = list(map(str, folder_obj.parent_node.child_nodes))
if new_name in child_node_names:
print('新名称在同路径下冲突')
return -1
else:
folder_obj.folder_name = new_name
return 1
def renameFile(old_name: str, new_name: str, root: Folder):
"""
重命名文件
:param root: 文件系统根节点
:param old_name:旧名称
:param new_name:新名称
:return:0表示找不到文件,-1表示新名字重名,1表示改名成功
"""
file_obj = findObjByName(old_name, root)
if file_obj is None:
print('找不到该文件') # TODO:异常处理
return 0
child_node_names = list(map(str, file_obj.parent_node.child_nodes))
if new_name in child_node_names:
print('新名称在同路径下冲突')
return -1
else:
file_obj.file_name = new_name
return 1
def writeFile(file_name: str, content: str, root: Folder, Disk: list):
"""
写文件内容(原先内容会删除)
:param Disk: 文件系统磁盘
:param root: 文件系统根节点
:param file_name:文件名
:param content:新内容
:return:返回1表示成功写入,返回0表示写入失败,返回-1表示文件没有写权限
"""
target_file = findObjByName(file_name, root)
if target_file is None or isinstance(target_file, Folder):
print('文件不存在') # TODO:异常处理
return 0
assert isinstance(target_file, UserFile)
if target_file.authority == FileAuthority.ReadOnly:
print('文件权限不足') # TODO:异常处理
return -1
else:
clearFileInDisk(target_file, Disk)
target_file.data = content
target_file.size = math.ceil(len(content) / 10)
target_file.disk_position = contiguousAllocation(target_file, Disk)
return 1
def readFile(file_name: str, root: Folder):
"""
读取文件
:param root: 文件系统根节点
:param file_name:文件名称
:return:返回文件数据字符串,若返回0表明文件不存在,返回-1表明文件权限不足
"""
target_file = findObjByName(file_name, root)
if target_file is None or isinstance(target_file, Folder):
print('文件不存在') # TODO:异常处理
return 0
assert isinstance(target_file, UserFile)
if target_file.authority == FileAuthority.WriteOnly:
print('文件权限不足') # TODO:异常处理
return -1
else:
return target_file.data
def delFile(file_name: str, file_table: list, root: Folder, Disk: list):
"""
彻底删除文件,包括磁盘和文件表的记录
:param Disk: 文件系统磁盘
:param root: 文件系统根节点
:param file_table: 文件表
:param file_name:文件名
:return:返回0表示无法找到对应文件,返回1表明删除成功
"""
target_file = findObjByName(file_name, root)
if target_file is None or isinstance(target_file, Folder):
print('文件不存在') # TODO:异常处理
return 0
assert isinstance(target_file, UserFile)
clearFileInDisk(target_file, Disk)
file_table.remove(target_file)
target_file.parent_node.child_nodes.remove(target_file)
return 1
def redirectFile(file_name: str, target_folder_name: str, root: Folder):
"""
在不删除文件的情况下重定向文件路径,可以和其他操作组合实现复制和剪切等操作
:param root: 文件系统根节点
:param file_name:欲重定向的文件名称
:param target_folder_name:欲重定向的目标文件夹
:return:返回0表示无法找到对应文件,返回1表明重定向成功
"""
target_file = findObjByName(file_name, root)
if target_file is None or isinstance(target_file, Folder):
print('文件不存在') # TODO:异常处理
return 0
target_folder = findObjByName(target_folder_name, root)
if target_folder is None or isinstance(target_folder, UserFile):
print('文件夹不存在') # TODO:异常处理
return 0
assert isinstance(target_file, UserFile)
assert isinstance(target_folder, Folder)
target_file.parent_node.child_nodes.remove(target_file)
target_file.parent_node = target_folder
target_folder.child_nodes.append(target_file)
return 1
def initFileSystem(DiskSize: int = 1000, state: bool = False):
"""
文件系统初始化
:param DiskSize:文件系统磁盘大小
:param state:状态标志
:return:状态标志,文件根节点,文件系统磁盘,文件表
"""
disk = [-1 for _ in range(DiskSize)] # 磁盘,存储文件的id
f_table = [] # 文件表,存储所有已经建立的文件
root_node = creatFileOrFolder(True, 'root', None, data=None, Disk=disk, file_table=f_table)
if not state:
state = True
default_folder_1 = creatFileOrFolder(True, 'default_folder_1', root_node, data=None, Disk=disk,
file_table=f_table)
default_folder_2 = creatFileOrFolder(True, 'default_folder_2', root_node, data=None, Disk=disk,
file_table=f_table)
default_folder_3 = creatFileOrFolder(True, 'default_folder_3', root_node, data=None, Disk=disk,
file_table=f_table)
creatFileOrFolder(False, 'test', default_folder_1, data='This is a file for test', Disk=disk,
file_table=f_table)
root_node.child_nodes = [default_folder_1, default_folder_2, default_folder_3]
return state, root_node, disk, f_table
def FileTree(parent_node):
# 是目录
if isinstance(parent_node, Folder):
data = []
child_nodes = list(parent_node.child_nodes)
for child in child_nodes:
data.append(FileTree(child))
return {parent_node.__str__(): data}
elif isinstance(parent_node, UserFile):
return {parent_node.__str__(): 0}
| 2.40625 | 2 |
bin/karyon.py | Gabaldonlab/karyon | 0 | 12799907 | <reponame>Gabaldonlab/karyon
desc="""Karyon pipeline.
More info at: https://github.com/Gabaldonlab/karyon
"""
epilog="""Author: <NAME> (<EMAIL>) Worcester MA, 04/Nov/2021"""
import sys, os, re
import argparse
import psutil
import pysam
import pandas as pd
import string
import random
from spades_recipee import call_SPAdes
from prepare_libraries import preparation
from trimming_libraries import trimming
from varcall_recipee import var_call
from datetime import datetime
parser = argparse.ArgumentParser(description=desc, epilog=epilog, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-d', '--output_directory', required=True, help='Directory where all the output files will be generated. Required.')
parser.add_argument('-o', '--output_name', default=False, help='Prefix name for all the output files. If omitted, it will generate a random string. This random string will be the same as the identifier for intermediate files.')
parser.add_argument('-l', '--libraries', required=True, nargs='+', help="Fastq libraries to use for assembly and variant calling. Unsuitable libraries for any of the steps will be ignored. Required.")
parser.add_argument('-F', '--favourite', default=False, help='Sets one library as the prefered one for the variant calling analysis. Otherwise, karyon will select the largest library for performing the variant calling protocol.')
parser.add_argument('-c', '--configuration', default=False, help="Configuration file. By default will use ./configuration.txt as the configuration file.")
parser.add_argument('-g', '--genome_assembler', default="dipspades", choices=['dipspades','dipSPAdes','spades', 'SPAdes','platanus','Platanus', 'soapdenovo', 'SOAPdenovo'], help="Genome assembly software to use. By default it will use dipSPAdes. Options are: dipSPADEs, SPAdes, SOAPdenovo or Platanus.")
parser.add_argument('-T', '--no_trimming', action='store_true', default=False, help='If this tag is active, the program will skip the trimming step.')
parser.add_argument('-A', '--no_assembly', default=False, help='If this tag is active it will skip the assembly step. It requires a reference assembly.')
parser.add_argument('-R', '--no_reduction', action='store_true', default=False, help='If this tag is active, the program will not launch the reduction step of redundans. Remember that the step is used to perform many downstream analyses. If you skip it, the analyses may not make much sense.')
parser.add_argument('-V', '--no_varcall', nargs='+', default=False, help="If this tag is active, the program will skip the variant calling step. Many downstream analyses require this and won't be possible if you skip it.")
parser.add_argument('-B', '--no_busco', default=False, action='store_true', help='If this tag is active, BUSCO analysis will be ommited.')
parser.add_argument('-P', '--no_plot', action='store_true', default=False, help="If this tag is active, the program will omit the plots at the end of the the variant calling step.")
parser.add_argument('-w', '--window_size', default=1000, help='Window size used for some of the analyses. Default is 1000 (1Kb)')
parser.add_argument('-x', '--max_scaf2plot', default=20, help="Maximum number of scaffolds to plot for scaffold-specific plots. Default is 20.")
parser.add_argument('-s', '--scafminsize', default=False, help="Will ignore scaffolds with length below the given threshold")
parser.add_argument('-S', '--scafmaxsize', default=False, help="Will ignore scaffolds with length above the given threshold")
parser.add_argument('-a', '--try_again', default=False, action='store_true', help='Use previous karyon results and skips already computed steps.')
parser.add_argument('-K', '--keep_tmp', action='store_true', default=False, help='If this tag is active, the program will not remove all intermediary files in the folder tmp after it has finished')
parser.add_argument('-i', '--job_id', default=False, help='Identifier of the intermediate files generated by the different programs. If false, the program will assign a name consisting of a string of 6 random alphanumeric characters.')
parser.add_argument('-m', '--memory_limit', default=False, help='Memory limit for all the programs set in Gb. By default it will try to use all memory available.')
parser.add_argument('-M', '--memory_fraction', default=1, help='Proportion of total memory to use by all programs. By default it will use all available memory (default=1), but it may be useful to reduce the percent to avoid freezing other tasks of the computer during peaks.')
parser.add_argument('-n', '--nodes', default=False, help='Number of computation nodes to use. If set a number higher than total, it will use total. If set a number lower than total, it will calculate memory usage based on the fraction of nodes set with respect to total existing nodes.')
args = parser.parse_args()
def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
###Parses the config file in order to check the parameters of all the programs.###
def parse_config(config):
config_dict = {}
prev = 0
for line in open(config):
if line[0] == "#": continue
elif line[0] == "+":
prev = line[1:-1]
config_dict[prev] = ["","",""]
elif line[0] == "@":
if config_dict[prev][0] != "": continue
config_dict[prev][0] = line[1:-1]
elif line[0] == ">":
config_dict[prev][1] = config_dict[prev][1] + line[1:-1] + " "
elif line[0] == "?":
if config_dict[prev][2] != "": continue
config_dict[prev][2] = line[1:-1] + " "
return config_dict
###Selects the main library to use. This is set to accelerate the assembly process and improve the results
def select_champion(fastq):
parse_dict = {}
for i in open(fastq):
chunk = i.split()
if chunk[5] == "2": continue
else:
parse_dict[chunk[0]] = chunk[1:]
champion=[0,'']
if args.favourite == False:
for element in parse_dict:
if int(parse_dict[element][2]) > champion[0]:
champion = [int(parse_dict[element][2]), element]
else:
champion = [0,args.favourite]
return champion
def exit_program(message):
sys.stderr.write("\n%s\n\n"%message)
sys.exit(1)
def main():
###Defines the location of configuration.txt if setting by default###
config_path = args.configuration
if not args.configuration:
selfpath = os.path.dirname(os.path.realpath(sys.argv[0]))
config_path = selfpath[:selfpath.rfind('/')]
config_path = selfpath[:selfpath.rfind('/')]+"/configuration.txt"
true_output = os.path.abspath(args.output_directory)
if true_output[-1] != "/":
true_output=true_output+"/"
print("wololo", true_output)
###Sets RAM usage options###
total_nodes = n_nodes = psutil.cpu_count()
if args.nodes and int(args.nodes) < total_nodes:
n_nodes = int(args.nodes)
if not args.memory_limit:
ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction))
if n_nodes < total_nodes:
ram_limit = int(psutil.virtual_memory()[0]/1000000000 * float(args.memory_fraction) * (float(n_nodes)/total_nodes))
else:
ram_limit = args.memory_limit * int(args.memory_fraction)
counter = int(args.max_scaf2plot)
###Sets the job ID and the prefix name for the job. If job ID is not user defined, it produces a random 6 character string. If prefix name is not defined, it uses job ID###
job_ID = args.job_id if args.job_id else id_generator()
name = args.output_name if args.output_name else job_ID
print ('###############')
print ('Config. path: '+str(config_path))
print ("RAM Limit: "+str(ram_limit)+"Gb")
print ("Nodes: "+str(n_nodes))
print ("Job ID: "+str(job_ID))
print ("Job name: "+str(name))
print ('###############')
config_dict = parse_config(config_path)
home = config_dict["karyon"][0]
if home[-1] != "/": home = home + "/"
prepared_libs = home + "tmp/" + job_ID + "/prepared_libraries.txt"
path_tmp_jobid = os.path.join(home, "tmp", job_ID)
if not os.path.exists(os.path.join(home, "tmp")):
os.mkdir(os.path.join(home, "tmp"))
prepared_libs = os.path.join(path_tmp_jobid, "prepared_libraries.txt")
###Checks that the output is not a file. If it does not exist, it creates it.###
if not os.path.isdir(args.output_directory):
if os.path.isfile == True:
message = "Path is a file" #Should raise an exception an exit the program
exit_program(message)
else:
os.mkdir(args.output_directory)
elif args.try_again == False:
os.rmdir(args.output_directory)
os.mkdir(args.output_directory)
os.mkdir(path_tmp_jobid)
from karyonplots import katplot, allplots
katplot("", "", config_dict["KAT"][0], "")
###Parses the libraries and checks their parameters for downstream analyses. Also performs trimming.###
print ('###############')
print ('Preparing libraries')
print ('###############')
libs = ''
for i in args.libraries:
libs = libs + " " + os.path.abspath(i)
preparation(libs.split(), 10000, prepared_libs)
libs_parsed = ''
if not args.no_trimming:
print ('###############')
print ('Trimmomatic')
print ('###############')
if config_dict['trimmomatic'][1] == '':
trimmo_commands = ''
else:
trimmo_commands = " -c " + config_dict['trimmomatic'][1]
trimming(prepared_libs, config_dict["trimmomatic"][0], trimmo_commands, home + "tmp/"+job_ID+"/trimmomatic.job", true_output, False)
os.system("bash " + home + "tmp/"+job_ID+"/trimmomatic.job")
for i in os.listdir(args.output_directory):
if i.find("parsed_") > -1:
libs_parsed = libs_parsed + " " + true_output + i
preparation(libs_parsed.split(), 10000, prepared_libs)
###Parsing library names, including putting absolute paths #
libstring = ''
backstring = ''
for i in open(prepared_libs):
chunk = i.split()
if chunk[5] == "1":
libstring = libstring + os.path.abspath(chunk[0]) + " " + os.path.abspath(chunk[6]) + " "
elif chunk[5] == "2": continue
else: backstring = backstring + os.path.abspath(chunk[0]) + " "
libstring = libstring + backstring
champion = select_champion(prepared_libs)
print ('###############')
print ('Params')
print ('###############')
print (args.window_size)
print (true_output+name+".raw.vcf")
print (true_output+"redundans_output/scaffolds.filled.fa")
print (true_output+name+".sorted.bam")
print (true_output+name+".mpileup")
print (champion[-1])
print (config_dict['nQuire'][0])
print (config_dict["KAT"][0])
print (home + "tmp/"+job_ID+"/")
print (true_output)
print (counter)
print ('###############')
###Calling spades_recipee.py to generate the assembly job. In the future it should use config file to select the assembly program to use###
karyonjobfile = open(true_output+name+"_karyon.job", 'a')
karyonjobfile.write("\n")
switch = False
if args.no_assembly == False:
if args.genome_assembler == "dipspades" or args.genome_assembler == 'dipSPAdes':
call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], False, ram_limit, n_nodes)
assembly = true_output+"dipspades/consensus_contigs.fasta"
no_red_assembly = true_output+"dipspades/consensus_contigs.fasta"
elif args.genome_assembler == "spades" or args.genome_assembler == 'SPAdes':
call_SPAdes(prepared_libs, config_dict['SPAdes'][0], true_output, name, config_dict['SPAdes'][1], True, ram_limit, n_nodes)
assembly = true_output+"spades/scaffolds.fasta"
no_red_assembly = true_output+"spades/scaffolds.fasta"
elif args.genome_assembler == "platanus" or args.genome_assembler == "Platanus":
if args.no_reduction == True:
karyonjobfile.write("python2 "+config_dict['redundans'][0]+"redundans.py"+ " -o "+true_output+"redundans_output -i "+libstring+" -t "+str(n_nodes)+" "+config_dict["redundans"][1] + " --noreduction")
no_red_assembly = true_output+"redundans_output/scaffolds.filled.fa"
else:
karyonjobfile.write("python2 "+config_dict['redundans'][0]+"redundans.py"+ " -o "+true_output+"redundans_output -i "+libstring+" -t "+str(n_nodes)+" "+config_dict["redundans"][1])
no_red_assembly = true_output+"redundans_output/contigs.fa"
assembly = true_output+"redundans_output/scaffolds.filled.fa"
switch = True
elif args.genome_assembler == "soapdenovo" or args.genome_assembler == "SOAPdenovo":
from soap_recipee import soap_recipee
soap_recipee(prepared_libs, name, true_output+"soapdenovo/", config_dict['SOAPdeNovo'][1], karyonjobfile, config_dict['SOAPdeNovo'][0])
print ("python3 "+os.path.dirname(__file__)+"/soap_recipee.py -r "+prepared_libs+" -n "+name+" -o "+true_output+"soapdenovo "+ "-j "+true_output+name+"_karyon.job")
os.system("python3 "+os.path.dirname(__file__)+"/soap_recipee.py -r "+prepared_libs+" -n "+name+" -o "+true_output+"soapdenovo "+ "-j "+true_output+name+"_karyon.job")
assembly = true_output+"soapdenovo/"+name+".scafSeq"
else:
pass
else:
no_red_assembly = args.no_assembly
assembly = args.no_assembly
if args.no_reduction == False and switch == False:
karyonjobfile.write("python2 "+config_dict['redundans'][0]+"redundans.py"+" -f "+ assembly + " -o "+true_output+"redundans_output -i "+libstring+" -t "+str(n_nodes)+" "+config_dict["redundans"][1])
reduced_assembly = true_output+"redundans_output/scaffolds.filled.fa"
elif args.no_reduction == False and switch == True:
reduced_assembly = assembly
else:
reduced_assembly = assembly
busco_options = ""
if args.no_busco == False:
for i in config_dict['BUSCO'][1:]:
busco_options = busco_options + " " + i[:-1]
karyonjobfile.write("\n")
karyonjobfile.write(config_dict['BUSCO'][0]+"busco " + "-i " + reduced_assembly + " -o " + name + busco_options + "\n")
karyonjobfile.write("mv " + name + " " + true_output+name+"_busco\n")
#karyonjobfile.write("cp " + true_output+name+"_busco/short_summary*.txt " + true_output+name+".busco\n")
karyonjobfile.write("rm -r busco_downloads\n")
if args.no_reduction == False:
karyonjobfile.write("\n")
karyonjobfile.write(config_dict['BUSCO'][0]+"busco " + "-i " + no_red_assembly + " -o " + name+"_no_reduc" + busco_options + "\n")
karyonjobfile.write("mv " + name + "_no_reduc " + true_output+name+"_no_reduc_busco\n")
#karyonjobfile.write("cp " + true_output+name+"_busco/short_summary.specific.*.txt " + true_output+name+"_no_reduc.busco\n")
karyonjobfile.write("rm -r busco_downloads\n")
karyonjobfile.close()
#5) Create job file that calls all the programs
if args.no_varcall == False:
var_call(prepared_libs, config_dict, true_output, name, args.favourite, home, str(ram_limit), str(n_nodes), reduced_assembly, no_red_assembly, args.no_reduction)
os.system ("bash "+true_output+name+"_karyon.job")
counter = int(args.max_scaf2plot)
if args.no_busco != True:
from shutil import copyfile
mybusco = False
for i in os.listdir(true_output+name+"_busco"):
if i.find("specific") > -1:
mybusco = i
break
if i.find('short_summary') > -1 and mybusco != False:
if i.find("specific") == -1:
mybusco = i
if mybusco != False:
copyfile(true_output+name+"_busco/"+mybusco, true_output+name+".busco")
mybusco = true_output+name+".busco"
if args.no_reduction != True:
noredubusco = False
for e in os.listdir(true_output+name+"_no_reduc_busco"):
if e.find("specific") > -1:
noredubusco = e
break
if e.find('short_summary') > -1 and noredubusco != False:
if e.find("specific") == -1:
noredubusco = e
if noredubusco != False:
copyfile(true_output+name+"_no_reduc_busco/"+noredubusco, true_output+name+"_no_reduc.busco")
noredubusco = true_output+name+"_no_reduc.busco"
def parse_no_varcall(no_varcall):
vcf, bam, mpileup = '', '', ''
for i in no_varcall:
if i[-4:] == ".bam":
bam = i
if os.path.isfile(i+".bam") == True:
bam = i+".bam"
if os.path.isfile(i+".sorted.bam") == True:
bam = i+".sorted.bam"
if i.find("pileup") > -1:
mpileup = i
if os.path.isfile(i+".mpileup") == True:
mpileup = i+".mpileup"
if os.path.isfile(i+".pileup") == True:
mpileup = i+".pileup"
if i[-4:] == ".vcf":
vcf = i
if os.path.isfile(i+".vcf") == True:
vcf = i+".vcf"
if os.path.isfile(i+"raw.vcf") == True:
vcf = i+"raw.vcf"
return vcf, bam, mpileup
os.mkdir(true_output+"Report/")
if args.no_varcall == False:
from karyonplots import katplot, allplots
from report import report, ploidy_veredict
katplot(reduced_assembly, champion[1], config_dict["KAT"][0], true_output+"Report/")
df = allplots(int(args.window_size),
true_output+name+".raw.vcf",
reduced_assembly,
true_output+name+".sorted.bam",
true_output+name+".mpileup",
os.path.abspath(champion[-1]),
config_dict['nQuire'][0],
config_dict["KAT"][0],
home + "tmp/"+job_ID+"/",
true_output+"Report/",
counter,
job_ID, name, args.scafminsize, args.scafmaxsize, args.no_plot)
else:
from karyonplots import katplot, allplots
katplot(reduced_assembly, champion[1], config_dict["KAT"][0], true_output+"Report/")
vcf, bam, mpileup = parse_no_varcall(args.no_varcall)
df = allplots(int(args.window_size),
vcf,
reduced_assembly,
bam,
mpileup,
os.path.abspath(champion[-1]),
config_dict['nQuire'][0],
config_dict["KAT"][0],
home + "tmp/"+job_ID+"/",
true_output,
counter,
job_ID, name, args.scafminsize, args.scafmaxsize, df, args.no_plot)
df2 = ploidy_veredict(df, true_output, name, args.window_size)
report(true_output, name, df2, args.no_reduction, no_red_assembly, args.window_size, mybusco, noredubusco)
df2.to_csv(true_output+"Report/report"+name+".csv", index=False)
###We clean the tmp directory###
if args.keep_tmp == True:
existence = open(home + "tmp/" + job_ID + '/_keep_existing_', 'w')
existence.close()
print ("Now I'm cleaning tmp...")
if args.keep_tmp == True:
print ("...but keeping what you told me...")
for e in os.listdir(home + "tmp/"):
for i in os.listdir(home + "tmp/"+e):
if '_keep_existing_' in os.listdir(home + "tmp/"+e): continue
else:
os.remove(home + "tmp/"+e+"/"+i)
if '_keep_existing_' in os.listdir(home + "tmp/"+e): continue
else: os.rmdir(home + "tmp/"+e)
if args.keep_tmp == True:
print ("... tmp files havee been kept")
else:
print ("... removed tmp files!")
if __name__ == '__main__':
t0 = datetime.now()
try:
main()
except KeyboardInterrupt:
sys.stderr.write("\n Ctrl-C pressed! \n")
dt = datetime.now()-t0
sys.stderr.write("#Time elapsed: %s\n" % dt)
| 2.171875 | 2 |
setup.py | Johannes-Larsson/codeswapbot | 0 | 12799908 | <gh_stars>0
import sqlite3
db = sqlite3.connect('data.db')
c=db.cursor()
c.execute('create table users (id integer primary key, name text, partner text, recieve_date date, partnered_date date)')
db.commit()
db.close()
| 2.859375 | 3 |
misc/update_version.py | andyjgf/libcbor | 283 | 12799909 | import sys, re
from datetime import date
version = sys.argv[1]
release_date = date.today().strftime('%Y-%m-%d')
major, minor, patch = version.split('.')
def replace(file_path, pattern, replacement):
updated = re.sub(pattern, replacement, open(file_path).read())
with open(file_path, 'w') as f:
f.write(updated)
# Update changelog
SEP = '---------------------'
NEXT = f'Next\n{SEP}'
changelog_header = f'{NEXT}\n\n{version} ({release_date})\n{SEP}'
replace('CHANGELOG.md', NEXT, changelog_header)
# Update Doxyfile
DOXY_VERSION = 'PROJECT_NUMBER = '
replace('Doxyfile', DOXY_VERSION + '.*', DOXY_VERSION + version)
# Update CMakeLists.txt
replace('CMakeLists.txt',
'''SET\\(CBOR_VERSION_MAJOR "0"\\)
SET\\(CBOR_VERSION_MINOR "7"\\)
SET\\(CBOR_VERSION_PATCH "0"\\)''',
f'''SET(CBOR_VERSION_MAJOR "{major}")
SET(CBOR_VERSION_MINOR "{minor}")
SET(CBOR_VERSION_PATCH "{patch}")''')
# Update Sphinx
replace('doc/source/conf.py',
"""version = '.*'
release = '.*'""",
f"""version = '{major}.{minor}'
release = '{major}.{minor}.{patch}'""")
| 2.515625 | 3 |
OpenGLCffi/GL/EXT/SGIX/reference_plane.py | cydenix/OpenGLCffi | 0 | 12799910 | from OpenGLCffi.GL import params
@params(api='gl', prms=['equation'])
def glReferencePlaneSGIX(equation):
pass
| 1.59375 | 2 |
src/models/operation.py | takedarts/DenseResNet | 0 | 12799911 | from .modules import DropBlock, SEModule, SKConv2d, BlurPool2d, SplitAttentionModule
import torch.nn as nn
import collections
class BasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm2', normalization(out_channels)),
('drop2', None if not dropblock else DropBlock()),
] if m[1] is not None))
class BottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SelectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=stride, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class PreActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
] if m[1] is not None))
class SingleActBasicOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, out_channels, kernel_size=3, padding=1,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class SingleActBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck * groups)
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=stride, groups=groups, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
('act3', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm4', normalization(out_channels)),
('drop4', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedBottleneckOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, groups=groups, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class TweakedSlectedKernelOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', SKConv2d(
channels, channels, kernel_size=3, padding=1,
stride=1, radix=radix, groups=groups)),
('drop2', None if not dropblock else DropBlock()),
('pool', None if stride == 1 else BlurPool2d(channels, stride=stride)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class MobileNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, kernel, stride, expansion,
normalization, activation, dropblock,
seoperation, sereduction, sesigmoid, **kwargs):
channels = int(in_channels * expansion)
modules = []
if in_channels != channels:
modules.extend([
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
])
modules.extend([
('conv2', nn.Conv2d(
channels, channels, kernel_size=kernel, padding=kernel // 2,
stride=stride, groups=channels, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('semodule', None if not seoperation else SEModule(
channels, reduction=sereduction, activation=nn.ReLU, sigmoid=sesigmoid)),
('act2', activation(inplace=True)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
])
super().__init__(collections.OrderedDict(m for m in modules if m[1] is not None))
class SplitAttentionOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, radix, groups, bottleneck,
normalization, activation, dropblock, **kwargs):
channels = round(out_channels / bottleneck)
super().__init__(collections.OrderedDict(m for m in [
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm1', normalization(channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, channels * radix, kernel_size=3, padding=1,
stride=1, groups=groups * radix, bias=False)),
('norm2', normalization(channels * radix)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('attention', SplitAttentionModule(
channels, radix=radix, groups=groups,
normalization=normalization, activation=activation)),
('downsample', None if stride == 1 else nn.AvgPool2d(
kernel_size=3, stride=stride, padding=1)),
('conv3', nn.Conv2d(
channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm3', normalization(out_channels)),
('drop3', None if not dropblock else DropBlock()),
] if m[1] is not None))
class DenseNetOperation(nn.Sequential):
def __init__(self, in_channels, out_channels, stride, growth, expansion,
normalization, activation, dropblock, **kwargs):
if stride != 1:
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, out_channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('pool1', nn.AvgPool2d(kernel_size=2, stride=stride)),
] if m[1] is not None))
else:
channels = growth * expansion
super().__init__(collections.OrderedDict(m for m in [
('norm1', normalization(in_channels)),
('drop1', None if not dropblock else DropBlock()),
('act1', activation(inplace=True)),
('conv1', nn.Conv2d(
in_channels, channels, kernel_size=1, padding=0,
stride=1, groups=1, bias=False)),
('norm2', normalization(channels)),
('drop2', None if not dropblock else DropBlock()),
('act2', activation(inplace=True)),
('conv2', nn.Conv2d(
channels, growth, kernel_size=3, padding=1,
stride=1, bias=False)),
] if m[1] is not None))
| 2.4375 | 2 |
StkAutomation/IntegrationCertification/IntegrationCert.py | jgonzalesAGI/STKCodeExamples | 0 | 12799912 | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 09:33:16 2020
@author: jvergere
Ideas: Something similar to the Iridium Constellation:
66 Sats
781 km (7159 semimajor axis)
86.4 inclination
6 Orbit planes 30 degrees apart
11 in each plane
"""
import datetime as dt
import numpy as np
import os
#Need to cleanup this file before running each time,
#or refactor code to avoid writing to file in append mode
if os.path.exists("MaxOutageData.txt"):
os.remove("MaxOutageData.txt")
from comtypes.client import CreateObject # Will allow you to launch STK
#from comtypes.client import GetActiveObject #Will allow you to connect a running instance of STK
#Start the application, it will return a pointer to the Application Interface
app = CreateObject("STK12.Application")
#app = GetActiveObject("STK12.Application")
#app is a pointer to IAgUiApplication
#type info is available with python builtin type method
#type(app)
#More info is available via python built in dir method, which will list
#all the available properties and methods available
#dir(app)
#Additional useful information is available via the python builtin help
#help(app)
app.Visible = True
app.UserControl = True
root = app.Personality2 #root ->IAgStkObjectRoot
#These are not available to import until this point if this is the first time
#running STK via COM with python....it won't hurt to leave them there, but after running once they can be
#included at the top with all the other import statements
from comtypes.gen import STKUtil
from comtypes.gen import STKObjects
root.NewScenario("NewTestScenario")
scenario = root.CurrentScenario #scenario -> IAgStkObject
scenario2 = scenario.QueryInterface(STKObjects.IAgScenario) #scenaro2 -> IAgScenario
scenario2.StartTime = "1 Jun 2016 16:00:00.000"
scenario2.StopTime = "2 Jun 2016 16:00:00.000"
root.Rewind()
#Insert Facilites from text file using connect. Each line of the text file is
#formatted:
#FacName,Longitude,Latitude
with open("Facilities.txt", "r") as faclist:
for line in faclist:
facData = line.strip().split(",")
insertNewFacCmd = "New / */Facility {}".format(facData[0])
root.ExecuteCommand(insertNewFacCmd)
setPositionCmd = "SetPosition */Facility/{} Geodetic {} {} Terrain".format(facData[0], facData[2], facData[1])
root.ExecuteCommand(setPositionCmd)
setColorCommand = "Graphics */Facility/{} SetColor blue".format(facData[0])
root.ExecuteCommand(setColorCommand)
#Create sensor constellation, used later to hold all the sensor objects
sensorConst = scenario.Children.New(STKObjects.eConstellation, "SensorConst")
sensorConst2 = sensorConst.QueryInterface(STKObjects.IAgConstellation)
#Build satellite constellation, attach sensors, assign sensor to constellation object
i = 1
for RAAN in range(0,180,45): # 4 orbit planes
j = 1
for trueAnomaly in range(0,360,45): # 8 sats per plane
#insert satellite
newSat = scenario.Children.New(STKObjects.eSatellite, "Sat{}{}".format(i,j))
newSat2 = newSat.QueryInterface(STKObjects.IAgSatellite)
#change some basic display attributes
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Color = 65535
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Line.Width = STKObjects.e1
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesBasic).Inherit = False
newSat2.Graphics.Attributes.QueryInterface(STKObjects.IAgVeGfxAttributesOrbit).IsGroundTrackVisible = False
#Buildup Initial State using TwoBody Propagator and Classical Orbital Elements
keplarian = newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.ConvertTo(STKUtil.eOrbitStateClassical).QueryInterface(STKObjects.IAgOrbitStateClassical)
keplarian.SizeShapeTpye = STKObjects.eSizeShapeSemimajorAxis
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).SemiMajorAxis = 7159
keplarian.SizeShape.QueryInterface(STKObjects.IAgClassicalSizeShapeSemimajorAxis).Eccentricity = 0
keplarian.Orientation.Inclination = 86.4
keplarian.Orientation.ArgOfPerigee = 0
keplarian.Orientation.AscNodeType = STKObjects.eAscNodeRAAN
keplarian.Orientation.AscNode.QueryInterface(STKObjects.IAgOrientationAscNodeRAAN).Value = RAAN
keplarian.LocationType = STKObjects.eLocationTrueAnomaly
keplarian.Location.QueryInterface(STKObjects.IAgClassicalLocationTrueAnomaly).Value = trueAnomaly + (45/2)*(i%2) #Stagger TrueAnomalies for every other orbital plane
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).InitialState.Representation.Assign(keplarian)
newSat2.Propagator.QueryInterface(STKObjects.IAgVePropagatorTwoBody).Propagate()
#Attach sensors to each satellite
sensor = newSat.Children.New(STKObjects.eSensor,"Sensor{}{}".format(i,j))
sensor2 = sensor.QueryInterface(STKObjects.IAgSensor)
sensor2.CommonTasks.SetPatternSimpleConic(62.5, 2)
#Add the sensor to the SensorConstellation
sensorConst2.Objects.Add("Satellite/Sat{0}{1}/Sensor/Sensor{0}{1}".format(i,j))
#Adjust the translucenty of the sensor projections
sensor2.VO.PercentTranslucency = 75
sensor2.Graphics.LineStyle = STKUtil.eDotted
j+=1
i+=1
#Create a Chain object for each Facility to the constellation.
facCount = scenario.Children.GetElements(STKObjects.eFacility).Count
for i in range(facCount):
#Create Chain
facName = scenario.Children.GetElements(STKObjects.eFacility).Item(i).InstanceName
chain = scenario.Children.New(STKObjects.eChain, "{}ToSensorConst".format(facName))
chain2 = chain.QueryInterface(STKObjects.IAgChain)
#Modify some display properties
chain2.Graphics.Animation.Color = 65280
chain2.Graphics.Animation.LineWidth = STKObjects.e1
chain2.Graphics.Animation.IsHighlightVisible = False
#Add objects to the chain
chain2.Objects.Add("Facility/{}".format(facName))
chain2.Objects.Add("Constellation/SensorConst")
#Get complete chain access data
compAcc = chain.DataProviders.Item("Complete Access").QueryInterface(STKObjects.IAgDataPrvInterval).Exec(scenario2.StartTime,scenario2.StopTime)
el = compAcc.DataSets.ElementNames
numRows = compAcc.DataSets.RowCount
maxOutage = []
#Save out the report to a text file
with open("{}CompleteChainAccess.txt".format(facName),"w") as dataFile:
dataFile.write("{},{},{},{}\n".format(el[0],el[1],el[2],el[3]))
for row in range(numRows):
rowData = compAcc.DataSets.GetRow(row)
dataFile.write("{},{},{},{}\n".format(rowData[0],rowData[1],rowData[2],rowData[3]))
dataFile.close()
#Get max outage time for each chain, print to console and save to file
with open("MaxOutageData.txt", "a") as outageFile:
if numRows == 1:
outageFile.write("{},NA,NA,NA\n".format(facName))
print("{}: No Outage".format(facName))
else:
#Get StartTimes and StopTimes as lists
startTimes = list(compAcc.DataSets.GetDataSetByName("Start Time").GetValues())
stopTimes = list(compAcc.DataSets.GetDataSetByName("Stop Time").GetValues())
#convert to from strings to datetimes
startDatetimes = np.array([dt.datetime.strptime(startTime[:-3], "%d %b %Y %H:%M:%S.%f") for startTime in startTimes])
stopDatetimes = np.array([dt.datetime.strptime(stopTime[:-3], "%d %b %Y %H:%M:%S.%f") for stopTime in stopTimes])
outages = startDatetimes[1:] - stopDatetimes[:-1]
maxOutage = np.amax(outages).total_seconds()
start = stopTimes[np.argmax(outages)]
stop = startTimes[np.argmax(outages)+1]
outageFile.write("{},{},{},{}\n".format(facName,maxOutage,start,stop))
print("{}: {} seconds from {} until {}".format(facName, maxOutage, start, stop))
root.Rewind()
root.Save() | 2.5625 | 3 |
example_project/some_modules/third_modules/a55.py | Yuriy-Leonov/cython_imports_limit_issue | 0 | 12799913 | class A55:
pass
| 1.054688 | 1 |
retrograph/training/preprocessors.py | ai-nikolai/Retrograph-1 | 14 | 12799914 | #####################################################
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
# Copyright 2019 <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################
####################################################
# IMPORT STATEMENTS
####################################################
# >>>>>> Native Imports <<<<<<<
import os
# >>>>>> Package Imports <<<<<<<
import tensorflow as tf
import csv
# >>>>>> Local Imports <<<<<<<
from retrograph.models import tokenization
####################################################
# CODE
####################################################
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.gfile.Open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
class XnliProcessor(DataProcessor):
"""Processor for the XNLI data set."""
def __init__(self):
self.language = "zh"
def get_train_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(
os.path.join(data_dir, "multinli",
"multinli.train.%s.tsv" % self.language))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "train-%d" % (i)
text_a = tokenization.convert_to_unicode(line[0])
text_b = tokenization.convert_to_unicode(line[1])
label = tokenization.convert_to_unicode(line[2])
if label == tokenization.convert_to_unicode("contradictory"):
label = tokenization.convert_to_unicode("contradiction")
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_dev_examples(self, data_dir):
"""See base class."""
lines = self._read_tsv(os.path.join(data_dir, "xnli.dev.tsv"))
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "dev-%d" % (i)
language = tokenization.convert_to_unicode(line[0])
if language != tokenization.convert_to_unicode(self.language):
continue
text_a = tokenization.convert_to_unicode(line[6])
text_b = tokenization.convert_to_unicode(line[7])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir, matched=True):
"""See base class."""
if matched:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
else:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_mismatched.tsv")),
"dev_mismatched")
def get_test_examples(self, data_dir, matched=True):
"""See base class."""
if matched:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_matched.tsv")), "test")
else:
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test_mismatched.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[8])
text_b = tokenization.convert_to_unicode(line[9])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class DiagnosticProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "diagnostic.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "contradiction"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class WNLIProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")),
"dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
if set_type == "test":
label = "0"
else:
label = tokenization.convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
# Only the test set has a header
if set_type == "test" and i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[3])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
'''Added by Anne'''
class SST2Processor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
label = "0"
else:
text_a = tokenization.convert_to_unicode(line[0])
label = tokenization.convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class QQPProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
if set_type == "test":
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
label = "0"
else:
if len(line) != 6:
# there is a problematic line
print(line)
continue
text_a = tokenization.convert_to_unicode(line[3])
text_b = tokenization.convert_to_unicode(line[4])
label = tokenization.convert_to_unicode(line[5])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class QNLIProcessor(DataProcessor):
"""Processor for the QQP data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "entailment"
else:
label = tokenization.convert_to_unicode(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class STSBProcessor(DataProcessor):
"""Processor for the STS-B data set."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
if set_type == 'test':
text_a = tokenization.convert_to_unicode(line[-2])
text_b = tokenization.convert_to_unicode(line[-1])
label = 0.0
else:
text_a = tokenization.convert_to_unicode(line[-3])
text_b = tokenization.convert_to_unicode(line[-2])
label = float(line[-1])
examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class RTEProcessor(DataProcessor):
"""Processor for the RTE data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "test")
def get_labels(self):
"""See base class."""
return ["entailment", "not_entailment"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, tokenization.convert_to_unicode(line[0]))
text_a = tokenization.convert_to_unicode(line[1])
text_b = tokenization.convert_to_unicode(line[2])
if set_type == "test":
label = "entailment"
else:
label = tokenization.convert_to_unicode(line[3])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class SICKEntailmentProcessor(DataProcessor):
"""Processor for the SICK data set (SentEval version)."""
def loadFile(self, fpath):
skipFirstLine = True
sick_data = {'X_A': [], 'X_B': [], 'y': []}
with os.open(fpath, 'r', encoding='utf-8') as f:
for line in f:
if skipFirstLine:
skipFirstLine = False
else:
text = line.strip().split('\t')
sick_data['X_A'].append(text[1].split())
sick_data['X_B'].append(text[2].split())
sick_data['y'].append(text[4])
return sick_data
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_train.txt')), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_trial.txt')), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'SICK_test_annotated.txt')), "test")
def get_labels(self):
"""See base class."""
return ['CONTRADICTION', 'NEUTRAL', 'ENTAILMENT']
def _create_examples(self, dicts, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, dict) in enumerate(dicts):
guid = "%s-%s" % (set_type, str(i))
text_a = tokenization.convert_to_unicode(dict['X_A'])
text_b = tokenization.convert_to_unicode(dict['X_B'])
label = tokenization.convert_to_unicode(dict['y'])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class TRECProcessor(DataProcessor):
"""Processor for the TREC data set (SentEval version)."""
def loadFile(self, fpath):
trec_data = {'X': [], 'y': []}
with os.open(fpath, 'r', encoding='latin-1') as f:
for line in f:
target, sample = line.strip().split(':', 1)
sample = sample.split(' ', 1)[1].split()
trec_data['X'].append(sample)
trec_data['y'].append(target)
return trec_data
def get_train_examples(self, data_dir):
"""See base class."""
data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))
split_index = len(data)*0.7
return self._create_examples(data[:split_index], "train")
def get_dev_examples(self, data_dir):
"""See base class."""
data = self.loadFile(os.path.join(data_dir, 'train_5500.label'))
split_index = len(data)*0.7
return self._create_examples(data[split_index:], "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(self.loadFile(os.path.join(data_dir, 'TREC_10.label')), "test")
def get_labels(self):
"""See base class."""
return ['ABBR', 'DESC', 'ENTY', 'HUM', 'LOC', 'NUM']
def _create_examples(self, dicts, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, dict) in enumerate(dicts):
guid = "%s-%s" % (set_type, str(i))
text_a = tokenization.convert_to_unicode(dict['X'])
label = tokenization.convert_to_unicode(dict['y'])
examples.append(
InputExample(guid=guid, text_a=text_a, label=label))
return examples
####################################################
# MAIN
####################################################
# EOF
| 1.367188 | 1 |
load_data.py | penguin2048/StockIt | 32 | 12799915 | """
handle preprocessing and loading of data.
"""
import html
import os.path
import pandas as pd
import re
from nltk import word_tokenize, pos_tag
from nltk.corpus import stopwords, wordnet
from nltk.stem.wordnet import WordNetLemmatizer
class LoadData:
@classmethod
def preprocess_stocktwits_data(cls, file_location, columns=['datetime', 'message']):
"""
preprocess the data in file location and saves it as a csv file (appending
'_preprocessed' before '.csv). The preprocessing us in following ways:
1) extract message and datetime columns.
2) sort according to datetime in descending order (newest first)
3) remove links, @ and $ references, extra whitespaces, extra '.', digits, slashes,
hyphons
4) decode html entities
5) convert everything to lower case
"""
if 'datetime' in columns:
dataFrame = pd.read_csv(file_location, usecols=columns, parse_dates=['datetime'], infer_datetime_format=True)
dataFrame.sort_values(by='datetime', ascending=False)
else:
dataFrame = pd.read_csv(file_location, usecols=columns)
dataFrame['message'] = dataFrame['message'].apply(lambda x: html.unescape(x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'(www\.|https?://).*?(\s|$)|@.*?(\s|$)|\$.*?(\s|$)|\d|\%|\\|/|-|_', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\.+', '. ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\,+', ', ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\?+', '? ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: re.sub(r'\s+', ' ', x))
dataFrame['message'] = dataFrame['message'].apply(lambda x: x.lower())
dataFrame.to_csv(file_location[:-4]+'_preprocessed.csv', index=False)
@classmethod
def labelled_data_lexicon_analysis(cls):
"""
extract keywords from labelled stocktwits data for improved accuracy in scoring
for each labelled message do
1) tokenize the message
2) perform POS tagging
3) if a sense is present in wordnet then, lemmatize the word and remove stop words else ignore the word
remove intersections from the two lists before saving
"""
dataFrame = LoadData.get_labelled_data()
bullish_keywords = set()
bearish_keywords = set()
lemmatizer = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
for index, row in dataFrame.iterrows():
tokens = word_tokenize(row['message'])
pos = pos_tag(tokens)
selected_tags = set()
for i in range(len(pos)):
if len(wordnet.synsets(pos[i][0])):
if pos[i][1].startswith('J'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'a'))
elif pos[i][1].startswith('V'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'v'))
elif pos[i][1].startswith('N'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'n'))
elif pos[i][1].startswith('R'):
selected_tags.add(lemmatizer.lemmatize(pos[i][0], 'r'))
selected_tags -= stop_words
if row['sentiment'] == 'Bullish':
bullish_keywords = bullish_keywords.union(selected_tags)
elif row['sentiment'] == 'Bearish':
bearish_keywords = bearish_keywords.union(selected_tags)
updated_bullish_keywords = bullish_keywords - bearish_keywords
updated_bearish_keywords = bearish_keywords - bullish_keywords
with open('data-extractor/lexicon_bullish_words.txt', 'a') as file:
for word in updated_bullish_keywords:
file.write(word+"\n")
with open('data-extractor/lexicon_bearish_words.txt', 'a') as file:
for word in updated_bearish_keywords:
file.write(word+"\n")
@classmethod
def get_stocktwits_data(cls, symbol):
"""
get_data loads the preprocessed data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [message(object), datetime(datetime64[ns])].
"""
file_location = 'data-extractor/stocktwits_'+symbol+'_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/stocktwits_'+symbol+'.csv')
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_price_data(cls, symbol):
"""
loads the price data of 'symbol' from data-extractor
and returns a pandas dataframe with columns [Date(datetime64[ns]), Opening Price(float64), Closing Price(float64), Volume(float64)].
"""
file_location = 'data-extractor/stock_prices_'+symbol+'.csv'
dataFrame = pd.read_csv(file_location, usecols=['Date', 'Opening Price', 'Closing Price', 'Volume'], parse_dates=['Date'], infer_datetime_format=True)
return dataFrame
@classmethod
def get_labelled_data(cls, type='complete'):
"""
get_labelled_data loads the preprocessed labelled data of stocktwits from data-extractor
and returns a pandas dataframe with columns [sentiment(object), message(object)].
"""
if type == 'complete':
file_location = 'data-extractor/labelled_data_complete_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_complete.csv', columns=['sentiment', 'message'])
elif type == 'training':
file_location = 'data-extractor/labelled_data_training_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.get_training_data()
elif type == 'test':
file_location = 'data-extractor/labelled_data_test_preprocessed.csv'
if os.path.isfile(file_location) is False:
LoadData.preprocess_stocktwits_data('data-extractor/labelled_data_test.csv', columns=['sentiment', 'message'])
dataFrame = pd.read_csv(file_location)
return dataFrame
@classmethod
def get_custom_lexicon(cls):
"""
get custom lexicon of bearish and bullish words respectively
"""
file_location1 = 'data-extractor/lexicon_bearish_words.txt'
file_location2 = 'data-extractor/lexicon_bullish_words.txt'
if os.path.isfile(file_location1) is False or os.path.isfile(file_location2) is False:
LoadData.labelled_data_lexicon_analysis()
dataFrameBearish = pd.read_csv(file_location1, header=None, names=['word'])
dataFrameBullish = pd.read_csv(file_location2, header=None, names=['word'])
return dataFrameBearish, dataFrameBullish
@classmethod
def get_training_data(cls):
"""
get labelled training data with equal bearish and bullish messages
"""
try:
os.remove('data-extractor/labelled_data_training.csv')
except OSError:
pass
dataFrame = LoadData.get_labelled_data(type='complete')
dataFrameBearish = dataFrame[dataFrame['sentiment']=='Bearish']
dataFrameBullish = dataFrame[dataFrame['sentiment']=='Bullish']
dataFrameBearishTraining = dataFrameBearish
dataFrameBullishTraining = dataFrameBullish[:len(dataFrameBearish)]
dataFrameTraining = dataFrameBearishTraining.append(dataFrameBullishTraining, ignore_index=True).sample(frac=1).reset_index(drop=True)
dataFrameTraining.to_csv('data-extractor/labelled_data_training_preprocessed.csv', index=False)
@classmethod
def combine_price_and_sentiment(cls, sentimentFrame, priceFrame):
from datetime import timedelta
"""
receive sentimentFrame as (date, sentiment, message) indexed by date and sentiment
and priceFrame as (Date, Opening Price, Closing Price, Volume) and return a combined
frame as (sentiment_calculated_bullish, sentiment_calculated_bearish,
sentiment_actual_previous, tweet_volume_change, cash_volume, label)
"""
dataFrame = pd.DataFrame()
for date, df in sentimentFrame.groupby(level=0, sort=False):
price_current = priceFrame[priceFrame['Date'] == date]
if price_current.empty or date-timedelta(days=1) not in sentimentFrame.index:
continue
tweet_minus1 = sentimentFrame.loc[date-timedelta(days=1)]
days = 1
price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]
while price_plus1.empty:
days += 1
price_plus1 = priceFrame[priceFrame['Date'] == date+timedelta(days=days)]
days = 1
price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]
while price_minus1.empty:
days += 1
price_minus1 = priceFrame[priceFrame['Date'] == date-timedelta(days=days)]
new_row = {}
new_row['date'] = date
new_row['sentiment_calculated_bullish'] = df.loc[(date, 'Bullish')]['message']
new_row['sentiment_calculated_bearish'] = df.loc[(date, 'Bearish')]['message']
new_row['sentiment_actual_previous'] = 1 if ((price_minus1.iloc[0]['Closing Price'] - price_minus1.iloc[0]['Opening Price']) >= 0) else -1
new_row['tweet_volume_change'] = df['message'].sum() - tweet_minus1['message'].sum()
new_row['cash_volume'] = price_current['Volume'].iloc[0]
new_row['label'] = 1 if ((price_plus1.iloc[0]['Closing Price'] - price_current.iloc[0]['Closing Price']) >= 0) else -1
print(new_row)
dataFrame = dataFrame.append(new_row, ignore_index=True)
return dataFrame
@classmethod
def aggregate_stock_price_data(cls):
"""
compile stocktwits data for stock prediction analysis in the following form
(date, sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous, tweet_volume_change, cash_volume, label)
we have choice to take previous n days sentiment_calculated and using label of next nth day
returns dataframes for AAPL, AMZN, GOOGL respectively
"""
if not (os.path.isfile('data-extractor/stocktwits_AAPL_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_AMZN_sharedata.csv') and os.path.isfile('data-extractor/stocktwits_GOOGL_sharedata.csv')):
from sklearn.externals import joblib
file_location = 'naive_bayes_classifier.pkl'
priceAAPL = LoadData.get_price_data('AAPL')
priceAMZN = LoadData.get_price_data('AMZN')
priceGOOGL = LoadData.get_price_data('GOOGL')
sentimented_file = 'data-extractor/stocktwits_AAPL_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataAAPL = LoadData.get_stocktwits_data('AAPL')
dataAAPL['sentiment'] = dataAAPL['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataAAPL['datetime'] = dataAAPL['datetime'].apply(lambda x: x.date())
dataAAPL.rename(columns={'datetime':'date'}, inplace=True)
dataAAPL.to_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', index=False)
sentimented_file = 'data-extractor/stocktwits_AMZN_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataAMZN = LoadData.get_stocktwits_data('AMZN')
dataAMZN['sentiment'] = dataAMZN['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataAMZN['datetime'] = dataAMZN['datetime'].apply(lambda x: x.date())
dataAMZN.rename(columns={'datetime':'date'}, inplace=True)
dataAMZN.to_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', index=False)
sentimented_file = 'data-extractor/stocktwits_GOOGL_withsentiment.csv'
if os.path.isfile(sentimented_file) is False:
tweet_classifier = joblib.load(file_location)
dataGOOGL = LoadData.get_stocktwits_data('GOOGL')
dataGOOGL['sentiment'] = dataGOOGL['message'].apply(lambda x: tweet_classifier.predict([x])[0])
dataGOOGL['datetime'] = dataGOOGL['datetime'].apply(lambda x: x.date())
dataGOOGL.rename(columns={'datetime':'date'}, inplace=True)
dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', index=False)
dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_withsentiment.csv', parse_dates=['date'], infer_datetime_format=True)
dataAAPL = dataAAPL.groupby(['date','sentiment'], sort=False).count()
dataAMZN = dataAMZN.groupby(['date','sentiment'], sort=False).count()
dataGOOGL = dataGOOGL.groupby(['date','sentiment'], sort=False).count()
dataAAPL = LoadData.combine_price_and_sentiment(dataAAPL, priceAAPL)
dataAMZN = LoadData.combine_price_and_sentiment(dataAMZN, priceAMZN)
dataGOOGL = LoadData.combine_price_and_sentiment(dataGOOGL, priceGOOGL)
dataAAPL.to_csv('data-extractor/stocktwits_AAPL_sharedata.csv', index=False)
dataAMZN.to_csv('data-extractor/stocktwits_AMZN_sharedata.csv', index=False)
dataGOOGL.to_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', index=False)
dataAAPL = pd.read_csv('data-extractor/stocktwits_AAPL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
dataAMZN = pd.read_csv('data-extractor/stocktwits_AMZN_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
dataGOOGL = pd.read_csv('data-extractor/stocktwits_GOOGL_sharedata.csv', parse_dates=['date'], infer_datetime_format=True)
return dataAAPL, dataAMZN, dataGOOGL
@classmethod
def get_stock_prediction_data(cls, symbol='ALL', type='training'):
"""
get the training and test data for stock prediction in format
(sentiment_calculated_bullish, sentiment_calculated_bearish, sentiment_actual_previous,
tweet_volume_change, cash_volume, label)
Standardize the data before using.
"""
file_location = 'data-extractor/stockdata_'+symbol+'_'+type+'.csv'
if not os.path.isfile(file_location):
import numpy as np
dataAAPL, dataAMZN, dataGOOGL = LoadData.aggregate_stock_price_data()
combined_data = dataAAPL.append([dataAMZN, dataGOOGL], ignore_index=True)
combined_data.sort_values('date')
combined_data.drop(columns='date', inplace=True)
combined_training, combined_test = np.split(combined_data.sample(frac=1), [int(.9*len(combined_data))])
combined_training.to_csv('data-extractor/stockdata_ALL_training.csv', index=False)
combined_test.to_csv('data-extractor/stockdata_ALL_test.csv', index=False)
dataAAPL.sort_values('date')
dataAAPL.drop(columns='date', inplace=True)
AAPL_training, AAPL_test = np.split(dataAAPL.sample(frac=1), [int(.9*len(dataAAPL))])
AAPL_training.to_csv('data-extractor/stockdata_AAPL_training.csv', index=False)
AAPL_test.to_csv('data-extractor/stockdata_AAPL_test.csv', index=False)
dataAMZN.sort_values('date')
dataAMZN.drop(columns='date', inplace=True)
AMZN_training, AMZN_test = np.split(dataAMZN.sample(frac=1), [int(.9*len(dataAMZN))])
AMZN_training.to_csv('data-extractor/stockdata_AMZN_training.csv', index=False)
AMZN_test.to_csv('data-extractor/stockdata_AMZN_test.csv', index=False)
dataGOOGL.sort_values('date')
dataGOOGL.drop(columns='date', inplace=True)
GOOGL_training, GOOGL_test = np.split(dataGOOGL.sample(frac=1), [int(.9*len(dataGOOGL))])
GOOGL_training.to_csv('data-extractor/stockdata_GOOGL_training.csv', index=False)
GOOGL_test.to_csv('data-extractor/stockdata_GOOGL_test.csv', index=False)
data = pd.read_csv(file_location)
return data
| 3.078125 | 3 |
tests/component/test_performance_log_dataframe.py | cswarth/whylogs | 603 | 12799916 | <filename>tests/component/test_performance_log_dataframe.py
import cProfile
import json
import os
import pstats
from logging import getLogger
from shutil import rmtree
from time import sleep
from typing import List
import pandas as pd
import pytest
from whylogs.app.config import SessionConfig, WriterConfig
from whylogs.app.session import session_from_config
script_dir = os.path.dirname(os.path.realpath(__file__))
TEST_LOGGER = getLogger(__name__)
def count_features(json_profile_filename):
if not os.path.isfile(json_profile_filename):
raise ValueError(f"{json_profile_filename} is not a json file but trying to open it to count features")
profile = get_json_profile(json_profile_filename)
if profile and profile.get("columns"):
return len(profile["columns"].keys())
return 0
def get_json_profile(json_profile_filename):
profile = {}
if os.path.exists(json_profile_filename) and os.stat(json_profile_filename).st_size > 0:
with open(json_profile_filename) as profile_file:
profile = json.load(profile_file)
return profile
def assert_all_elements_equal(data: List):
if not data or len(data) == 1:
return True
first = data[0]
for element in iter(data):
assert first[0] == element[0], f"Found differing feature counts: {first[0]} vs {element[0]} in files {first[1]} and {element[1]}"
@pytest.mark.load
def test_log_rotation_concurrency(tmpdir):
log_rotation_interval = "1s"
sleep_interval = 2
test_path = tmpdir.mkdir("log_rotation_concurrency_repro")
writer_config = WriterConfig("local", ["json"], test_path.realpath(), filename_template="dataset_summary-$dataset_timestamp")
# Load the full lending club 1000 csv, to get a chance at hitting the bug.
csv_path = os.path.join(script_dir, "lending_club_1000.csv")
full_df = pd.read_csv(csv_path)
# full_df has shape (1000, 151) so create a test df with 4x size by iteratively appending to self 2 times
for _ in range(2):
full_df = full_df.append(full_df)
TEST_LOGGER.info(f"test dataframe has shape {full_df.shape}")
# Create a whylogs logging session
session_config = SessionConfig("project", "pipeline", writers=[writer_config])
session = session_from_config(session_config)
TEST_LOGGER.info(f"Running rotate log test with {log_rotation_interval} flush intervals and {sleep_interval}s pause")
profiler = cProfile.Profile()
profiler.enable()
with session.logger(tags={"datasetId": "model-1"}, with_rotation_time=log_rotation_interval) as ylog:
ylog.log_dataframe(full_df) # Log a larger dataframe to increase chance of rotation before seeing all columns
sleep(sleep_interval)
ylog.log_dataframe(full_df.head(n=2)) # Log a smaller dataframe to get more features before rotation
sleep(sleep_interval)
profiler.disable()
stats = pstats.Stats(profiler).sort_stats("cumulative")
TEST_LOGGER.info(stats.print_stats(10))
output_files = []
for root, subdir, file_names in os.walk(test_path):
if not file_names:
continue
if subdir:
for directory in subdir:
for file in file_names:
full_file_path = os.path.join(root, directory, file)
output_files += [full_file_path]
else:
for file in file_names:
full_file_path = os.path.join(root, file)
output_files += [full_file_path]
assert len(output_files) > 0, "No output files were generated during stress test"
TEST_LOGGER.debug(f"Generated {len(output_files)} dataset summary files.")
feature_counts = []
for filename in output_files:
feature_count = count_features(filename)
if feature_count > 0:
feature_counts.append((count_features(filename), filename))
assert len(feature_counts) > 0, f"feature counts are all empty, we expect some empty files with aggressive log rotation but not all empty!"
TEST_LOGGER.info(f"Feature counts all same, first file with features was {feature_counts[0]}")
TEST_LOGGER.debug(f"There were {len(feature_counts)} files with features.")
assert_all_elements_equal(feature_counts)
rmtree(test_path, ignore_errors=True)
TEST_LOGGER.debug(f"End cleaning up test directory {test_path}")
| 2.359375 | 2 |
thing/models/skillplan.py | Gillingham/evething | 33 | 12799917 | # ------------------------------------------------------------------------------
# Copyright (c) 2010-2013, EVEthing team
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
# OF SUCH DAMAGE.
# ------------------------------------------------------------------------------
from django.contrib.auth.models import User
from django.db import models
class SkillPlan(models.Model):
PRIVATE_VISIBILITY = 1
PUBLIC_VISIBILITY = 2
GLOBAL_VISIBILITY = 3
MASTERY_VISIBILITY = 99
VISIBILITY_CHOICES = (
(PRIVATE_VISIBILITY, 'Private'),
(PUBLIC_VISIBILITY, 'Public'),
(GLOBAL_VISIBILITY, 'Global'),
)
user = models.ForeignKey(User, null=True, blank=True)
name = models.CharField(max_length=64)
visibility = models.IntegerField(default=1, choices=VISIBILITY_CHOICES)
class Meta:
app_label = 'thing'
ordering = ('name',)
def __unicode__(self):
if hasattr(self.user, 'username'):
return '%s - %s' % (self.user.username, self.name)
else:
return '%s' % self.name
| 1.460938 | 1 |
sorting-and-searching/shell-sort.py | rayruicai/coding-interview | 0 | 12799918 | import unittest
# time complexity O(n**2)
# space complexity O(1)
def shell_sort(arr):
n = len(arr)
gap = n//2
while gap >= 1:
for start in range(gap):
gap_insertion_sort(arr, start, gap)
gap = gap//2
return arr
def gap_insertion_sort(arr, start, gap):
n = len(arr)
for i in range(start, n, gap):
j = i - gap
while (j >= start) and (arr[i] < arr[j]):
arr[i], arr[j] = arr[j], arr[i]
i = j
j -= gap
class Test(unittest.TestCase):
def test_shell_sort(self):
arr = [3,6,9,7,8,4,2,5,1,9,6]
self.assertEqual(shell_sort(arr), [1,2,3,4,5,6,6,7,8,9,9]);
if __name__ == "__main__":
unittest.main()
| 4.03125 | 4 |
tests/test_parameter.py | jlant/gagepy | 0 | 12799919 | # -*- coding: utf-8 -*-
"""
test_parameter
~~~~~~~~~~~~~~~
Tests for `gagepy.parameter` class
:copyright: 2015 by <NAME>, see AUTHORS
:license: United States Geological Survey (USGS), see LICENSE file
"""
import pytest
import os
import numpy as np
from datetime import datetime
from gagepy.parameter import Parameter
def test_parameter_init(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
values = np.array([100, 110, 105, 107, 112]),
units = "cubic feet per second (Mean)",
code = "06_00060_00003")
assert list(parameter.dates) == list(dates_daily)
assert parameter.code == "06_00060_00003"
assert parameter.name == "Discharge"
assert parameter.units == "cubic feet per second (Mean)"
assert list(parameter.values) == list(np.array([100, 110, 105, 107, 112]))
def test_parameter_values_mean_max_min_without_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.mean == 3.0
assert parameter.max == 5.0
assert parameter.min == 1.0
def test_parameter_values_mean_max_min_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, np.nan, 12]))
assert parameter.mean == 4.5 # sum(values)/len(values) -> 18/4 = 4.5
assert parameter.max == 12.0
assert parameter.min == 1.0
def test_max_min_date(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_max_min_date_with_nan(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, np.nan, 4, 5]))
assert parameter.max_date == datetime(2015, 8, 5, 0, 0)
assert parameter.min_date == datetime(2015, 8, 1, 0, 0)
def test_print_parameter_by_not_capturing_stdout(dates_daily):
parameter = Parameter(name = "Discharge",
dates = dates_daily,
units = "cubic feet per second (Mean)",
code = "06_00060_00003",
values = np.array([1, 2, 3, 4, 5]))
print(parameter)
| 2.671875 | 3 |
Ex5.py | zelfg/Exercicios_LP_1B | 0 | 12799920 | algo = bool(input("Digite alguma coisa: "))
print("O valor {} é int?".format(algo).isnumeric()) | 3.53125 | 4 |
3/3_4_2_full_permutation_2.py | DingJunyao/aha-algorithms-py | 2 | 12799921 | <gh_stars>1-10
"""
全排列2
求1、2、3、4的全排列
"""
k = [1, 2, 3, 4]
for a in k:
for b in k:
for c in k:
for d in k:
if a != b and a != c and a != d and b != c and b != d and c != d:
print("%s%s%s%s" % (a, b, c, d))
| 3.265625 | 3 |
code/haitiwater/apps/consumers/views.py | exavince/HaitiWater | 4 | 12799922 | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.template import loader
from haitiwater.settings import PROJECT_VERSION, PROJECT_NAME
from ..utils.get_data import *
@login_required(login_url='/login/')
def index(request):
template = loader.get_template('consumers.html')
context = {
'project_version': PROJECT_VERSION,
'project_name': PROJECT_NAME,
'zone_name': get_zone(request),
'current_period': get_current_month_fr(),
'water_outlets': get_outlets(request),
'consumer_groups': get_amount_household(request),
'consumer_individuals': get_total_consumers(request),
'unpaid_bills': 42, # Todo, but for later as we can't mark a payment yet
}
return HttpResponse(template.render(context, request))
| 1.953125 | 2 |
pyrich/asset.py | choi-jiwoo/pyrich | 0 | 12799923 | from datetime import date
import pandas as pd
from pyrich.record import Record
class Asset(Record):
def __init__(self, table: str) -> None:
super().__init__(table)
def record_current_asset(self, current_asset: float) -> None:
table = 'current_asset'
query = (f'SELECT date FROM {table} '
f'WHERE id=(SELECT MAX(id) FROM {table});')
self.db.run_query(query)
date_format = '%Y-%m-%d'
today = date.today()
timestamp = today.strftime(date_format)
record = {
'date': timestamp,
'amount': current_asset,
}
try:
latest_date = self.db.cur.fetchone()[0]
except TypeError:
self.db.insert(table, record, msg=False)
else:
if today > latest_date:
self.db.insert(table, record, msg=False)
def __repr__(self) -> str:
return f"Asset(table='{self.table}')"
| 3 | 3 |
lib/assets/Lib/browser/timer.py | s6007589/cafe-grader-web | 25 | 12799924 | <gh_stars>10-100
from browser import window
def wrap(func):
# Transforms a function f into another function that prints a
# traceback in case of exception
def f(*args, **kw):
try:
return func(*args, **kw)
except Exception as exc:
msg = '{0.info}\n{0.__name__}: {0.args[0]}'.format(exc)
import sys
sys.stderr.write(msg)
return f
clear_interval = window.clearInterval
clear_timeout = window.clearTimeout
def set_interval(func,interval):
return window.setInterval(wrap(func),interval)
def set_timeout(func,interval):
return int(window.setTimeout(wrap(func),interval))
def request_animation_frame(func):
return int(window.requestAnimationFrame(func))
def cancel_animation_frame(int_id):
window.cancelAnimationFrame(int_id)
def set_loop_timeout(x):
# set a variable used to stop loops that last more than x seconds
assert isinstance(x, int)
__BRYTHON__.loop_timeout = x | 2.8125 | 3 |
update_daemon.py | dgnorth/drift-serverdaemon | 0 | 12799925 | import os, sys, shutil
import zipfile, subprocess
from serverdaemon.logsetup import setup_logging, logger, log_event
import boto3
from boto3.s3.transfer import S3Transfer, TransferConfig
REGION = "eu-west-1"
BUCKET_NAME = "directive-tiers.dg-api.com"
UE4_BUILDS_FOLDER = "ue4-builds"
INSTALL_FOLDER = r"c:\drift-serverdaemon"
def get_my_version():
t = [0, 0, 0]
try:
with open("VERSION") as f:
version = f.read().strip()
# increment version each time the script is called
t = [int(p) for p in version.split(".")]
except:
logger.warning("Old version invalid")
return t
def kill_python_processes():
command = ["tasklist"]
popen = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout, stderr = popen.communicate()
lst = stdout.split("\n")
for l in lst:
ll = l.split()
if not len(ll):
continue
name = ll[0]
try:
pid = int(ll[1])
except:
continue
if pid == os.getpid():
continue
if "python.exe" in l:
try:
logger.info("Killing task '%s' with pid %s..." % (name, pid))
command = ["taskkill", "/PID", str(pid), "/f"]
subprocess.check_call(command, shell=True)
except Exception as e:
logger.error('Could not kill task. Error = %s' % e)
def check_download():
client = boto3.client('s3', REGION)
files = client.list_objects(Bucket=BUCKET_NAME, Prefix=UE4_BUILDS_FOLDER)['Contents']
max_version = get_my_version()
my_version = max_version
logger.info("My version is %s", ".".join(str(p) for p in max_version))
max_key = None
for s3_key in files:
filename = s3_key['Key']
if "drift-serverdaemon-" in filename:
lst = filename.split("-")[-1].split(".")
try:
file_version = [int(p) for p in lst[0:-1]]
except ValueError:
continue
is_more = False
if file_version[0] > max_version[0]:
is_more = True
elif file_version[1] > max_version[1]:
is_more = True
elif file_version[2] > max_version[2]:
is_more = True
if is_more:
max_version = file_version
max_key = filename
if not max_key:
logger.info("No new version found. Bailing out.")
return None
log_event("upgrade_daemon", "Upgrading Serverdaemon from version %s to %s" % (my_version, max_version), severity="WARNING")
logger.info("found version %s, %s", max_version, max_key)
transfer = S3Transfer(client)
out_filename = "c:\\temp\\drift-serverdaemon.zip"
transfer.download_file(BUCKET_NAME, max_key, out_filename)
return out_filename
if __name__ == "__main__":
setup_logging("updatedaemon")
filename = check_download()
if not filename:
sys.exit(0)
zip_file = zipfile.ZipFile(filename, 'r')
for member in zip_file.namelist():
# copy file (taken from zipfile's extract)
filename = "/".join(member.split("/")[1:])
source = zip_file.open(member)
out_filename = os.path.join(INSTALL_FOLDER, filename)
try:
out_dirname = os.path.dirname(out_filename)
os.makedirs(out_dirname)
except:
pass
target = file(out_filename, "wb")
with source, target:
shutil.copyfileobj(source, target)
zip_file.close()
kill_python_processes()
log_event("upgrade_daemon_complete", "Done Upgrading Serverdaemon. All python processes have been killed", severity="WARNING") | 1.984375 | 2 |
app/schemas/email_sub.py | javi-cortes/linkedon | 0 | 12799926 | from typing import Optional
from pydantic import BaseModel, EmailStr
class EmailSub(BaseModel):
email: EmailStr = None
# search patterns
salary_max: Optional[int] = 0
salary_min: Optional[int] = 0
class Config:
orm_mode = True
| 2.53125 | 3 |
goatools/ratio.py | ezequieljsosa/goatools | 0 | 12799927 | <reponame>ezequieljsosa/goatools
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
__copyright__ = "Copyright (C) 2010-2016, <NAME> al., All rights reserved."
__author__ = "various"
from collections import defaultdict, Counter
def count_terms(geneset, assoc, obo_dag):
"""count the number of terms in the study group
"""
term_cnt = Counter()
for gene in (g for g in geneset if g in assoc):
for x in assoc[gene]:
if x in obo_dag:
term_cnt[obo_dag[x].id] += 1
return term_cnt
def get_terms(desc, geneset, assoc, obo_dag, log):
"""Get the terms in the study group
"""
term2itemids = defaultdict(set)
genes = [g for g in geneset if g in assoc]
for gene in genes:
for x in assoc[gene]:
if x in obo_dag:
term2itemids[obo_dag[x].id].add(gene)
log.write("{N:>6,} out of {M:>6,} {DESC} items found in association\n".format(
DESC=desc, N=len(genes), M=len(geneset)))
return term2itemids
def is_ratio_different(min_ratio, study_go, study_n, pop_go, pop_n):
"""
check if the ratio go /n is different between the study group and
the population
"""
if min_ratio is None:
return True
s = float(study_go) / study_n
p = float(pop_go) / pop_n
if s > p:
return s / p > min_ratio
return p / s > min_ratio
# Copyright (C) 2010-2016, <NAME> al., All rights reserved.
| 2.625 | 3 |
old/cartpole_lib/cartpole_ppo.py | mmolnar0/sgillen_research | 0 | 12799928 | from baselines.common.cmd_util import make_mujoco_env
from baselines.common import tf_util as U
from baselines import logger
from baselines.ppo1 import pposgd_simple
from cartpole.cartpole_sim import cartpole_policy
def train(env_id, num_timesteps, seed=0):
U.make_session(num_cpu=1).__enter__()
def policy_fn(name, ob_space, ac_space):
return cartpole_policy.CartPolePolicy(name=name, ob_space=ob_space, ac_space=ac_space, hid_size=6,
num_hid_layers=2)
env = make_mujoco_env(env_id, seed)
pi = pposgd_simple.learn(env, policy_fn,
max_timesteps=num_timesteps,
timesteps_per_actorbatch=2048,
clip_param=0.2, entcoeff=0.0,
optim_epochs=10, optim_stepsize=3e-4, optim_batchsize=64,
gamma=0.99, lam=0.95, schedule='linear',
)
env.close()
return pi
if __name__ == '__main__':
logger.configure(dir = "./tensorboard_test", format_strs=["tensorboard"] )
pi = train('InvertedPendulum-v2', num_timesteps=5000, seed=0)
| 1.945313 | 2 |
text_game_map_maker/forms.py | eriknyquist/text_map_builder_gui | 0 | 12799929 | from collections import OrderedDict
class AutoFormSettings(object):
def __init__(self):
if not hasattr(self, "spec"):
raise RuntimeError("%s instance has no 'spec' attribute"
% self.__class__.__name__)
for attrname in self.spec.keys():
setattr(self, attrname, None)
class WallSettings(AutoFormSettings):
spec = OrderedDict([
("north", {"type": "bool", "tooltip": "Enable/disable wall to the north"}),
("south", {"type": "bool", "tooltip": "Enable/disable wall to the south"}),
("east", {"type": "bool", "tooltip": "Enable/disable wall to the east"}),
("west", {"type": "bool", "tooltip": "Enable/disable wall to the west"})
])
class DoorSettings(AutoFormSettings):
spec = OrderedDict([
("direction", {"type": "choice", "choices": ["north", "south", "east", "west"],
"tooltip": "Set the direction to this door from currently"
" selected tile"}),
("prefix", {"type": "str", "tooltip": "Set the word that should precede "
"the name of this door, usually 'a' or 'an' (e.g. 'a' "
"wooden door, 'an' oak door)"}),
("name", {"type": "str", "tooltip": "name of this door, e.g. "
"'wooden door' or 'oak door'"}),
("tile_id", {"type": "str", "label": "tile ID", "tooltip": "unique "
"identifier for programmatic access to this door"})
])
class KeypadDoorSettings(AutoFormSettings):
spec = OrderedDict([
("direction", {"type": "choice", "choices": ["north", "south", "east", "west"],
"tooltip": "Set the direction to this door from currently"
" selected tile"}),
("prefix", {"type": "str", "tooltip": "Set the word that should precede "
"the name of this door, usually 'a' or 'an' (e.g. 'a' "
"wooden door, 'an' oak door)"}),
("name", {"type": "str", "tooltip": "name of this door, e.g. "
"'wooden door' or 'oak door'"}),
("tile_id", {"type": "str", "label": "tile ID", "tooltip": "unique "
"identifier for programmatic access to this door"}),
("code", {"type": "int", "label": "keypad code", "tooltip": "Integer "
"code required to unlock this door"}),
("prompt", {"type": "str", "label": "keypad prompt", "tooltip": "String "
"used to prompt player for keypad code entry"})
])
class TileSettings(AutoFormSettings):
spec = OrderedDict([
('tile_id', {'type': 'str', 'label': 'tile ID', "tooltip": "Unique "
"identifier for programmatic access to this tile"}),
('name', {'type': 'str', 'tooltip': "Short string used to describe this "
"tile to the player from afar, e.g. 'a scary room'"}),
('description', {'type':'long_str', 'tooltip': "String used to describe "
"the tile to player when they enter it. Note that this "
"string will always be prefixed with 'You are' during "
"gameplay"}),
('dark', {'type': 'bool', 'tooltip': "If enabled, player will need a "
"light source to see anything on this tile"}),
('first_visit_message', {'type': 'long_str', 'label': 'first visit message',
'tooltip': "String displayed only when player "
"enters this tile for the first time"}),
('first_visit_message_in_dark', {'type': 'bool', 'label': 'show first visit message if dark',
'tooltip': "Enable/disable showing the "
"first visit message if the current tile "
"is dark"}),
('smell_description', {'type': 'str', 'label': 'smell description',
'tooltip': "String displayed when player smells "
"the air on the current tile"}),
('ground_smell_description', {'type': 'str', 'label': 'ground smell description',
'tooltip': "String displayed when player "
"smells the ground on the current tile"}),
('ground_taste_description', {'type': 'str', 'label': 'ground taste description',
'tooltip': "String displayed when player "
"tastes the ground on the current tile"}),
('name_from_north', {'type': 'str', 'label': 'name from south',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the south'}),
('name_from_south', {'type': 'str', 'label': 'name from south',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the south'}),
('name_from_east', {'type': 'str', 'label': 'name from east',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the east'}),
('name_from_west', {'type': 'str', 'label': 'name from west',
'tooltip': 'String used to describe this tile when'
' player is on the adjacent tile to the west'})
])
| 2.96875 | 3 |
venv/Lib/site-packages/timingsutil/unittests/test_stopwatch.py | avim2809/CameraSiteBlocker | 0 | 12799930 | # encoding: utf-8
import time
import unittest
from timingsutil import Stopwatch
import logging_helper
logging = logging_helper.setup_logging()
class TestConfiguration(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_stopwatch(self):
stopwatch = Stopwatch()
for _ in range(3):
time.sleep(1)
self.assertEqual(round(stopwatch.lap()), 1)
self.assertEqual(round(stopwatch.stop()), 3)
if __name__ == u'__main__':
unittest.main()
| 2.84375 | 3 |
model/items/apx_data.py | mlabru/visil | 0 | 12799931 | # -*- coding: utf-8 -*-
"""
apx_data
mantém as informações sobre o dicionário de procedimento de aproximação
revision 0.2 2015/nov mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
"""
# < imports >--------------------------------------------------------------------------------------
# python library
import logging
import sys
# PyQt library
from PyQt5 import QtCore
# FIXME QtXml is no longer supported.
from PyQt5 import QtXml
# model
import model.items.apx_new as model
import model.items.parser_utils as parser
# control
import control.events.events_basic as events
# < class CApxData >-------------------------------------------------------------------------------
class CApxData(dict):
"""
mantém as informações sobre o dicionário de procedimento de aproximação
<aproximacao nApx="1">
<descricao>FINAL H3</descricao>
<aerodromo>SBSP</aerodromo>
<pista>17R</pista>
<ils>N</ils>
<aproxperd>N</aproxperd>
<espera>2</espera>
<breakpoint nBrk="1"> ... </breakpoint>
</aproximacao>
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, f_model, f_data=None):
"""
@param f_model: model manager
@param f_data: dados dos procedimentos de aproximação
"""
# check input
assert f_model
# inicia a super class
super(CApxData, self).__init__()
# salva o model manager
self._model = f_model
# salva o event manager
self._event = f_model.event
# recebeu dados ?
if f_data is not None:
# recebeu uma lista ?
if isinstance(f_data, list):
# cria um procedimento de aproximação com os dados da lista
pass # self.make_apx(f_data)
# recebeu um procedimento de aproximação ?
elif isinstance(f_data, CApxData):
# copia o procedimento de aproximação
pass # self.copy_apx(f_data)
# senão, recebeu o pathname de um arquivo de procedimento de aproximação
else:
# carrega o dicionário de procedimento de aproximação de um arquivo em disco
self.load_file(f_data)
# ---------------------------------------------------------------------------------------------
def load_file(self, fs_apx_pn):
"""
carrega os dados do procedimento de aproximação de um arquivo em disco
@param fs_apx_pn: pathname do arquivo em disco
"""
# check input
assert fs_apx_pn
# carrega o arquivo de procedimento de aproximação
self.parse_apx_xml(fs_apx_pn + ".xml")
# ---------------------------------------------------------------------------------------------
def make_apx(self, fdct_root, fdct_data):
"""
carrega os dados de procedimento de aproximação a partir de um dicionário
@param fdct_data: lista de dados de procedimento de aproximação
@return flag e mensagem
"""
# check input
assert fdct_root is not None
assert fdct_data is not None
# é uma procedimento de aproximação do newton ?
if "aproximacoes" != fdct_root["tagName"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E01: não é um arquivo de procedimentos de aproximação.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# é um arquivo do newton ?
if "NEWTON" != fdct_root["FORMAT"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E02: não está em um formato aceito.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# é a assinatura do newton ?
if "1961" != fdct_root["CODE"]:
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E03: não tem a assinatura correta.")
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# se não for, cai fora...
sys.exit(1)
# verifica se existe identificação
if "nApx" in fdct_data:
# cria procedimento de aproximação
l_apx = model.CApxNEW(self._model, fdct_data, fdct_root["VERSION"])
assert l_apx
# coloca a procedimento de aproximação no dicionário
self[fdct_data["nApx"]] = l_apx
# senão, não existe identificação
else:
# monta uma mensagem
ls_msg = "não tem identificação. Aproximação não incluída."
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.WARNING)
l_log.warning("<E04: {}".format(ls_msg))
# se não for, cai fora...
return False, ls_msg
# retorna Ok
return True, None
# ---------------------------------------------------------------------------------------------
def parse_apx_xml(self, fs_apx_pn):
"""
carrega o arquivo de procedimentos de aproximação
@param fs_apx_pn: pathname do arquivo em disco
"""
# check input
assert fs_apx_pn
# cria o QFile para o arquivo XML do procedimentos de aproximação
l_data_file = QtCore.QFile(fs_apx_pn)
assert l_data_file is not None
# abre o arquivo XML do procedimentos de aproximação
l_data_file.open(QtCore.QIODevice.ReadOnly)
# erro na abertura do arquivo ?
if not l_data_file.isOpen():
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E01: erro na abertura de {}.".format(fs_apx_pn))
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# termina a aplicação
sys.exit(1)
# cria o documento XML do procedimento de aproximação
# FIXME QtXml is no longer supported.
l_xdoc_apx = QtXml.QDomDocument("aproximacoes")
assert l_xdoc_apx is not None
# erro na carga do documento ?
if not l_xdoc_apx.setContent(l_data_file):
# fecha o arquivo
l_data_file.close()
# logger
l_log = logging.getLogger("CApxData::make_apx")
l_log.setLevel(logging.CRITICAL)
l_log.critical("<E02: falha no parse de {}.".format(fs_apx_pn))
# cria um evento de quit
l_evt = events.CQuit()
assert l_evt
# dissemina o evento
self._event.post(l_evt)
# termina a aplicação
sys.exit(1)
# fecha o arquivo
l_data_file.close()
# obtém o elemento raíz do documento
l_elem_root = l_xdoc_apx.documentElement()
assert l_elem_root is not None
# faz o parse dos atributos do elemento raíz
ldct_root = parser.parse_root_element(l_elem_root)
# cria uma lista com os elementos de procedimento de aproximação
l_node_list = l_elem_root.elementsByTagName("aproximacao")
# para todos os nós na lista...
for li_ndx in range(l_node_list.length()):
# inicia o dicionário de dados
ldct_data = {}
# inicia a lista de breakpoints
ldct_data["breakpoints"] = []
# obtém um nó da lista
l_element = l_node_list.at(li_ndx).toElement()
assert l_element is not None
# read identification if available
if l_element.hasAttribute("nApx"):
ldct_data["nApx"] = int(l_element.attribute("nApx"))
# obtém o primeiro nó da sub-árvore
l_node = l_element.firstChild()
assert l_node is not None
# percorre a sub-árvore
while not l_node.isNull():
# tenta converter o nó em um elemento
l_element = l_node.toElement()
assert l_element is not None
# o nó é um elemento ?
if not l_element.isNull():
# faz o parse do elemento
ldct_tmp = parser.parse_aproximacao(l_element)
# atualiza o dicionário com o breakpoint
if "breakpoint" in ldct_tmp:
# atualiza o dicionário com o breakpoint
ldct_data["breakpoints"].append(ldct_tmp["breakpoint"])
# apaga este elemento
del ldct_tmp["breakpoint"]
# atualiza o dicionário de dados
ldct_data.update(ldct_tmp)
# próximo nó
l_node = l_node.nextSibling()
assert l_node is not None
# carrega os dados de procedimento de aproximação a partir de um dicionário
self.make_apx(ldct_root, ldct_data)
# ---------------------------------------------------------------------------------------------
def save2disk(self, fs_apx_pn=None):
"""
salva os dados da procedimento de aproximação em um arquivo em disco
@param fs_apx_pn: path name do arquivo onde salvar
@return flag e mensagem
"""
# return code
lv_ok = True
# mensagem
ls_msg = "save Ok"
# retorna flag e mensagem
return lv_ok, ls_msg
# < the end >--------------------------------------------------------------------------------------
| 1.914063 | 2 |
autograd_manipulation/sim.py | dawsonc/a_tale_of_two_gradients | 2 | 12799932 | <reponame>dawsonc/a_tale_of_two_gradients<filename>autograd_manipulation/sim.py
"""Automatically-differentiable manipulation simulation engine using JAX"""
import jax.numpy as jnp
import jax
@jax.jit
def box_finger_signed_distance(box_pose, finger_pose, box_size):
"""Compute the signed distance from the box to the finger
args:
box_pose: current (x, z, theta) state of the box
finger_pose: current (x, z) state of the finger
box_size: side length of box
returns:
float signed distance
"""
# Credit to this stackoverflow answer for the inspiration for this code:
# stackoverflow.com/questions/30545052/calculate-signed-distance-between-point-and-
# rectangle
# First transform the finger (x, z) into the box frame
p_WF = finger_pose
p_WB = box_pose[:2]
theta_B = box_pose[2]
p_BF_W = p_WF - p_WB
# Rotate p_BF_W by -theta about the z axis to get position in box frame
R_WB = jnp.array(
[[jnp.cos(theta_B), -jnp.sin(theta_B)], [jnp.sin(theta_B), jnp.cos(theta_B)]]
)
R_BW = R_WB.T
p_BF = R_BW @ p_BF_W
# Now get the signed distance
x_dist = jnp.maximum(-(p_BF[0] + box_size / 2.0), p_BF[0] - box_size / 2.0)
z_dist = jnp.maximum(-(p_BF[1] + box_size / 2.0), p_BF[1] - box_size / 2.0)
# phi = signed distance.
phi = jnp.minimum(0.0, jnp.maximum(x_dist, z_dist))
phi = phi + jnp.linalg.norm(
jnp.maximum(jnp.array([1e-3, 1e-3]), jnp.array([x_dist, z_dist]))
)
return phi
@jax.jit
def rotation_matrix(theta):
"""Return the 2D rotation matrix for angle theta"""
return jnp.array(
[[jnp.cos(theta), -jnp.sin(theta)], [jnp.sin(theta), jnp.cos(theta)]]
)
@jax.jit
def calc_finger_ground_force(finger_state, mu_d, c, psi_s, contact_k, contact_d):
"""Compute the contact force between a finger and the ground.
args:
finger_state: current (x, z, theta, vx, vz, omega) state of the box
mu_d: coefficient of friction between box and ground while slipping
c: coefficient of tangential velocity in determining sticking friction
psi_s: tangential velocity where slipping begins
contact_k: spring constant of contact
contact_d: damping constant of contact
returns:
contact force in x and z
"""
# Get the position and velocity of the finger in the world frame
p_WF = finger_state[:2]
v_WF = finger_state[2:]
# Get penetration into ground
phi_finger_ground = jnp.minimum(jnp.zeros(1), p_WF[1])
# Get the contact forces. Approximate ground force as a damped spring, as in
# the simplified friction model from eq 21 and 22 in
# https://arxiv.org/pdf/2109.05143.pdf, but with damping.
normal_velocity = v_WF[1]
normal_force = -contact_k * phi_finger_ground
normal_force = normal_force - contact_d * normal_velocity * (phi_finger_ground < 0)
tangential_velocity = v_WF[0]
sticking_mask = jnp.abs(tangential_velocity) <= psi_s
slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity)
mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d
tangent_force = -mu * normal_force
contact_force = jnp.array([tangent_force, normal_force]).reshape(2)
return contact_force
@jax.jit
def calc_box_ground_wrench(box_state, box_size, mu_d, c, psi_s, contact_k, contact_d):
"""Compute the contact wrench between the box and the ground.
args:
box_state: current (x, z, theta, vx, vz, omega) state of the box
box_size: float indicating the side length of the box
mu_d: coefficient of friction between box and ground while slipping
c: coefficient of tangential velocity in determining sticking friction
psi_s: tangential velocity where slipping begins
contact_k: spring constant of contact
contact_d: damping constant of contact
returns:
contact wrench in x, z, and theta.
"""
# Start by finding any box corner points that intersect the ground at z = 0
half_size = box_size / 2.0
p_BC = jnp.array(
[
[-half_size, half_size], # top left
[half_size, half_size], # top right
[-half_size, -half_size], # bottom left
[half_size, -half_size], # bottom right
]
) # corner points in box frame
# Transform into world frame
R_WB = rotation_matrix(box_state[2])
p_BC_W = (R_WB @ p_BC.T).T
p_WC = p_BC_W + jnp.tile(box_state[:2], [4, 1])
# Also find the velocities of each corner point
r = jnp.sqrt(2) * half_size
v_BC = (
box_state[5]
* r
* jnp.array(
[
[-1, -1], # top left
[-1, 1], # top right
[1, -1], # bottom left
[1, 1], # bottom right
]
)
) # corner point velocities in box frame
# Transform to world frame
v_WC = (R_WB @ v_BC.T).T + jnp.tile(box_state[3:5], [4, 1])
# Find any that have negative z: min(0, signed distance)
phi_corner_ground = jnp.minimum(jnp.zeros(4), p_WC[:, 1])
# For each corner, sum up the forces and torques due to penetration with the ground
contact_wrench_on_box = jnp.zeros(3)
for i in range(4):
# Get the friction force. Approximate ground force as a damped spring, as in
# the simplified friction model from eq 21 and 22 in
# https://arxiv.org/pdf/2109.05143.pdf, but with damping.
normal_velocity = v_WC[i, 1]
normal_force = -contact_k * phi_corner_ground[i]
normal_force = normal_force - contact_d * normal_velocity * (
phi_corner_ground[i] < 0
)
tangential_velocity = v_WC[i, 0]
sticking_mask = jnp.abs(tangential_velocity) <= psi_s
slipping_mask = jnp.logical_not(sticking_mask) * jnp.sign(tangential_velocity)
mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d
tangent_force = -mu * normal_force
contact_force = jnp.array([tangent_force, normal_force])
# Add the friction force to the box
contact_wrench_on_box = contact_wrench_on_box.at[:2].add(contact_force)
# Also add the torque from this interaction
contact_wrench_on_box = contact_wrench_on_box.at[2].add(
jnp.cross(p_BC_W[i, :], contact_force)
)
return contact_wrench_on_box
@jax.jit
def calc_box_finger_wrench(
box_state, finger_state, box_size, mu_d, c, psi_s, contact_k, contact_d
):
"""Compute the contact wrench between the box and the ground.
args:
box_state: current (x, z, theta, vx, vz, omega) state of the box
finger_state: current (x, z, vx, vz) state of the finger
box_size: float indicating the side length of the box
mu_d: coefficient of friction between box and ground while slipping
c: coefficient of tangential velocity in determining sticking friction
psi_s: tangential velocity where slipping begins
contact_k: spring constant of contact
contact_d: damping constant of contact
returns:
Tuple of
- contact wrench on box in x, z, and theta.
- contact force on finger in x and z.
"""
# Contact point is just the finger point in the box frame
p_WF = finger_state[:2]
p_WB = box_state[:2]
p_BF_W = p_WF - p_WB
R_WB = rotation_matrix(box_state[2])
p_BF = R_WB.T @ p_BF_W
# Get velocity of the finger in box frame
v_WF = finger_state[2:]
v_WB = box_state[3:5]
v_BF_W = v_WF - v_WB
v_BF = R_WB.T @ v_BF_W
# Get velocity of contact point in box frame
v_Bcontact = box_state[5] * jnp.array([[0, -1], [1, 0]]) @ p_BF
# Get velocity of finger relative to contact pt in box frame
v_contactF_B = v_BF - v_Bcontact
# Get the normal vector of the contact in the box frame
right_or_up = p_BF[1] > -p_BF[0]
left_or_up = p_BF[1] > p_BF[0]
normal_right = jnp.logical_and(right_or_up, jnp.logical_not(left_or_up))
normal_up = jnp.logical_and(right_or_up, left_or_up)
normal_left = jnp.logical_and(jnp.logical_not(right_or_up), left_or_up)
normal_down = jnp.logical_and(
jnp.logical_not(right_or_up), jnp.logical_not(left_or_up)
)
normal = normal_right * jnp.array([1.0, 0.0])
normal += normal_left * jnp.array([-1.0, 0.0])
normal += normal_up * jnp.array([0.0, 1.0])
normal += normal_down * jnp.array([0.0, -1.0])
# Get the tangent vector, which is orthogonal to the normal vector
# and points in the same direction as the relative velocity
tangential_velocity = (
v_contactF_B - v_contactF_B.dot(normal) * normal
) # relative velocity in tangent direction
normal_velocity = v_contactF_B.dot(normal) # scalar, along the normal vector
tangent = tangential_velocity / (jnp.linalg.norm(tangential_velocity + 1e-3) + 1e-3)
# Get signed distance
phi_finger_box = box_finger_signed_distance(
box_state[:3], finger_state[:2], box_size
)
# Clip to only consider negative values
phi_finger_box = jnp.minimum(0, phi_finger_box)
# Use the same simplified friction model as used for ground contact
normal_force = -contact_k * phi_finger_box # scalar, in normal direction
normal_force = normal_force - contact_d * normal_velocity * (phi_finger_box < 0)
sticking_mask = jnp.linalg.norm(tangential_velocity + 1e-3) <= psi_s
slipping_mask = jnp.logical_not(sticking_mask)
mu = sticking_mask * c * tangential_velocity + slipping_mask * mu_d * tangent
tangent_force = -mu * normal_force # vector!
# Sum up the contact forces in the box frame
contact_force_B = normal_force * normal + tangent_force
# transform into the world frame
contact_force_W = R_WB @ contact_force_B
# Add the contact force to the box and finger
box_wrench = jnp.zeros(3)
box_wrench = box_wrench.at[:2].add(-contact_force_W)
box_wrench = box_wrench.at[2].add(jnp.cross(p_BF_W, -contact_force_W))
finger_forces = contact_force_W
return box_wrench, finger_forces
@jax.jit
def box_single_finger_step(
box_state,
finger_state,
finger_state_desired,
finger_control_stiffness,
):
"""Compute a single discrete-time update for box manipulation with one finger, using
the penalty method for contact modelling with a simplified Coulomb friction model
args:
box_state: current (x, z, theta, vx, vz, omega) state of the box
finger_state: current (x, z, vx, vz) state of the finger
finger_state_desired: desired (x_d, z_d) state of the finger
finger_control_stiffness: the parameter for the finger stiffness control
returns:
new_box_state, new_finger_state
"""
######################################
# define parameters of the simulation
######################################
# Box properties
box_mass_kg = 1.0
box_side_m = 0.5
box_inertia = 1 / 6 * box_mass_kg * box_side_m ** 2
# Finger properties
finger_mass_kg = 0.1
finger_control_damping = 2
# Contact properties
mu_d = 0.7
c = 2.0
psi_s = mu_d / c
contact_k = 1000
contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping
# General properties
g = 9.81
dt = 0.001 # seconds per step
######################################
# Get forces on each body
######################################
finger_forces = jnp.zeros(2)
box_forces = jnp.zeros(3)
# Gravitational force on each body
finger_forces = finger_forces.at[1].add(-g * finger_mass_kg)
box_forces = box_forces.at[1].add(-g * box_mass_kg)
# Control forces on finger
finger_pos_error = finger_state_desired - finger_state[:2]
finger_vel_error = -finger_state[2:]
finger_forces = finger_forces + finger_control_stiffness * finger_pos_error
finger_forces = finger_forces + finger_control_damping * finger_vel_error
# Contact forces from the ground.
box_forces += calc_box_ground_wrench(
box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d
)
finger_forces += calc_finger_ground_force(
finger_state, mu_d, c, psi_s, contact_k, contact_d
)
# Contact forces between box and finger
finger_wrench_on_box, box_force_on_finger = calc_box_finger_wrench(
box_state, finger_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d
)
box_forces += finger_wrench_on_box
finger_forces += box_force_on_finger
######################################
# Numerically integrate
######################################
# Build the derivatives matrix
box_state_dot = jnp.zeros(6)
finger_state_dot = jnp.zeros(4)
# Velocities
box_state_dot = box_state_dot.at[:3].add(box_state[3:])
finger_state_dot = finger_state_dot.at[:2].add(finger_state[2:])
# Forces
box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg)
finger_state_dot = finger_state_dot.at[2:].add(finger_forces / finger_mass_kg)
# Torques
box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia)
# Itegrate
new_box_state = box_state + dt * box_state_dot
new_finger_state = finger_state + dt * finger_state_dot
return new_box_state, new_finger_state
def box_single_finger_simulate(
box_state_initial,
finger_state_initial,
finger_state_desired_trace,
finger_control_stiffness,
N_steps,
):
"""Simulate the evolution of the box-finger system with one finger, starting at the
given initial states and applying the specified control inputs
args:
box_state_initial: initial (x, z, theta, vx, vz, omega) state of the box
finger_state_initial: initial (x, z, vx, vz) state of the finger
finger_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of the
finger over time
finger_control_stiffness: the parameter for the finger stiffness control
N_steps: int specifying the number of discrete time steps to simulate
returns:
box_state_trace, finger_state_trace
"""
# Create arrays to store simulation traces
box_state_trace = jnp.zeros((N_steps, 6))
finger_state_trace = jnp.zeros((N_steps, 4))
# Store the initial conditions
box_state_trace = box_state_trace.at[0, :].set(box_state_initial)
finger_state_trace = finger_state_trace.at[0, :].set(finger_state_initial)
# Simulate
for i in range(1, N_steps):
# get currents state
current_box_state = box_state_trace[i - 1, :]
current_finger_state = finger_state_trace[i - 1, :]
current_finger_state_desired = finger_state_desired_trace[i - 1]
# get next state
next_box_state, next_finger_state = box_single_finger_step(
current_box_state,
current_finger_state,
current_finger_state_desired,
finger_control_stiffness,
)
# Save
box_state_trace = box_state_trace.at[i, :].set(next_box_state)
finger_state_trace = finger_state_trace.at[i, :].set(next_finger_state)
# Return the simulated values
return box_state_trace, finger_state_trace
@jax.jit
def box_two_finger_step(
box_state,
finger1_state,
finger1_state_desired,
finger2_state,
finger2_state_desired,
finger_control_stiffness,
):
"""Compute a single discrete-time update for box manipulation with one finger, using
the penalty method for contact modelling with a simplified Coulomb friction model
args:
box_state: current (x, z, theta, vx, vz, omega) state of the box
finger1_state: current (x, z, vx, vz) state of the first finger
finger1_state_desired: desired (x_d, z_d) state of the first finger
finger2_state: current (x, z, vx, vz) state of the second finger
finger2_state_desired: desired (x_d, z_d) state of the second finger
finger_control_stiffness: the parameter for the finger stiffness control
returns:
new_box_state, new_finger_state
"""
######################################
# define parameters of the simulation
######################################
# Box properties
box_mass_kg = 1.0
box_side_m = 0.5
box_inertia = 1 / 6 * box_mass_kg * box_side_m ** 2
# Finger properties
finger_mass_kg = 0.1
finger_control_damping = 2
# Contact properties
mu_d = 0.7
c = 2.0
psi_s = mu_d / c
contact_k = 1000
contact_d = 2 * jnp.sqrt(box_mass_kg * contact_k) # critical damping
# General properties
g = 9.81
dt = 0.001 # seconds per step
######################################
# Get forces on each body
######################################
finger1_forces = jnp.zeros(2)
finger2_forces = jnp.zeros(2)
box_forces = jnp.zeros(3)
# Gravitational force on each body
finger1_forces = finger1_forces.at[1].add(-g * finger_mass_kg)
finger2_forces = finger2_forces.at[1].add(-g * finger_mass_kg)
box_forces = box_forces.at[1].add(-g * box_mass_kg)
# Control forces on fingers
finger1_pos_error = finger1_state_desired - finger1_state[:2]
finger1_vel_error = -finger1_state[2:]
finger1_forces = finger1_forces + finger_control_stiffness * finger1_pos_error
finger1_forces = finger1_forces + finger_control_damping * finger1_vel_error
finger2_pos_error = finger2_state_desired - finger2_state[:2]
finger2_vel_error = -finger2_state[2:]
finger2_forces = finger2_forces + finger_control_stiffness * finger2_pos_error
finger2_forces = finger2_forces + finger_control_damping * finger2_vel_error
# Contact forces from ground.
box_forces += calc_box_ground_wrench(
box_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d
)
finger1_forces += calc_finger_ground_force(
finger1_state, mu_d, c, psi_s, contact_k, contact_d
)
finger2_forces += calc_finger_ground_force(
finger2_state, mu_d, c, psi_s, contact_k, contact_d
)
# Contact forces between box and fingers
finger1_wrench_on_box, box_force_on_finger1 = calc_box_finger_wrench(
box_state, finger1_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d
)
box_forces += finger1_wrench_on_box
finger1_forces += box_force_on_finger1
finger2_wrench_on_box, box_force_on_finger2 = calc_box_finger_wrench(
box_state, finger2_state, box_side_m, mu_d, c, psi_s, contact_k, contact_d
)
box_forces += finger2_wrench_on_box
finger2_forces += box_force_on_finger2
######################################
# Numerically integrate
######################################
# Build the derivatives matrix
box_state_dot = jnp.zeros(6)
finger1_state_dot = jnp.zeros(4)
finger2_state_dot = jnp.zeros(4)
# Velocities
box_state_dot = box_state_dot.at[:3].add(box_state[3:])
finger1_state_dot = finger1_state_dot.at[:2].add(finger1_state[2:])
finger2_state_dot = finger2_state_dot.at[:2].add(finger2_state[2:])
# Forces
box_state_dot = box_state_dot.at[3:5].add(box_forces[:2] / box_mass_kg)
finger1_state_dot = finger1_state_dot.at[2:].add(finger1_forces / finger_mass_kg)
finger2_state_dot = finger2_state_dot.at[2:].add(finger2_forces / finger_mass_kg)
# Torques
box_state_dot = box_state_dot.at[5].add(box_forces[2] / box_inertia)
# Itegrate
new_box_state = box_state + dt * box_state_dot
new_finger1_state = finger1_state + dt * finger1_state_dot
new_finger2_state = finger2_state + dt * finger2_state_dot
return new_box_state, new_finger1_state, new_finger2_state
def box_two_finger_simulate(
box_state_initial,
finger1_state_initial,
finger1_state_desired_trace,
finger2_state_initial,
finger2_state_desired_trace,
finger_control_stiffness,
N_steps,
):
"""Simulate the evolution of the box-finger system with one finger, starting at the
given initial states and applying the specified control inputs
args:
box_state_initial: initial (x, z, theta, vx, vz, omega) state of the box
finger1_state_initial: initial (x, z, vx, vz) state of the finger
finger1_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of
the finger over time
finger2_state_initial: initial (x, z, vx, vz) state of the finger
finger2_state_desired_trace: N_steps x 2 array of desired (x_d, z_d) state of
the finger over time
finger_control_stiffness: the parameter for the finger stiffness control
N_steps: int specifying the number of discrete time steps to simulate
returns:
box_state_trace, finger_state_trace
"""
# Create arrays to store simulation traces
box_state_trace = jnp.zeros((N_steps, 6))
finger1_state_trace = jnp.zeros((N_steps, 4))
finger2_state_trace = jnp.zeros((N_steps, 4))
# Store the initial conditions
box_state_trace = box_state_trace.at[0, :].set(box_state_initial)
finger1_state_trace = finger1_state_trace.at[0, :].set(finger1_state_initial)
finger2_state_trace = finger2_state_trace.at[0, :].set(finger2_state_initial)
# Simulate
for i in range(1, N_steps):
# get currents state
current_box_state = box_state_trace[i - 1, :]
current_finger1_state = finger1_state_trace[i - 1, :]
current_finger1_state_desired = finger1_state_desired_trace[i - 1]
current_finger2_state = finger2_state_trace[i - 1, :]
current_finger2_state_desired = finger2_state_desired_trace[i - 1]
# get next state
next_box_state, next_finger1_state, next_finger2_state = box_two_finger_step(
current_box_state,
current_finger1_state,
current_finger1_state_desired,
current_finger2_state,
current_finger2_state_desired,
finger_control_stiffness,
)
# Save
box_state_trace = box_state_trace.at[i, :].set(next_box_state)
finger1_state_trace = finger1_state_trace.at[i, :].set(next_finger1_state)
finger2_state_trace = finger2_state_trace.at[i, :].set(next_finger2_state)
# Return the simulated values
return box_state_trace, finger1_state_trace, finger2_state_trace
| 2.53125 | 3 |
prodigy_backend/classes/migrations/0004_auto_20210118_0351.py | Savimaster/Prodigy | 4 | 12799933 | # Generated by Django 3.1.5 on 2021-01-18 03:51
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0003_auto_20210117_2058'),
]
operations = [
migrations.AlterField(
model_name='class',
name='cost',
field=models.DecimalField(decimal_places=2, max_digits=8),
),
migrations.AlterField(
model_name='review',
name='rating',
field=models.DecimalField(decimal_places=1, max_digits=2, validators=[django.core.validators.MaxValueValidator(5), django.core.validators.MinValueValidator(0)]),
),
]
| 1.59375 | 2 |
tests/bugs/core_6108_test.py | reevespaul/firebird-qa | 0 | 12799934 | #coding:utf-8
#
# id: bugs.core_6108
# title: Regression: FB3 throws "Datatypes are not comparable in expression" in procedure parameters
# decription:
# Confirmed bug on 4.0.0.1567; 3.0.5.33160.
# Works fine on 4.0.0.1573; 3.0.x is still affected
#
# tracker_id: CORE-6108
# min_versions: ['2.5']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = []
init_script_1 = """"""
db_1 = db_factory(sql_dialect=3, init=init_script_1)
# test_script_1
#---
#
# proc_ddl='''
# create or alter procedure test_proc ( a_dts timestamp) returns ( o_dts timestamp) as
# begin
# o_dts = a_dts;
# suspend;
# end
# '''
#
# db_conn.execute_immediate( proc_ddl )
# db_conn.commit()
#
# cur=db_conn.cursor()
#
# sttm="select o_dts from test_proc('2019-'|| COALESCE( ?, 1) ||'-01' )"
# cur.execute( sttm, ( 3, ) )
# for r in cur:
# print(r[0])
# cur.close()
#
#
#---
#act_1 = python_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
2019-03-01 00:00:00
"""
@pytest.mark.version('>=2.5')
@pytest.mark.xfail
def test_1(db_1):
pytest.fail("Test not IMPLEMENTED")
| 1.46875 | 1 |
test_sample_policies.py | donghun2018/adclick-simulator-v2 | 0 | 12799935 | <reponame>donghun2018/adclick-simulator-v2
"""
Sample bid policy testing script
for ORF418 Spring 2019 course
"""
import numpy as np
import pandas as pd
def simulator_setup_1day():
"""
This is a tool to set up a simulator and problem definition (state set, action set, and attribute set)
:return: simulator, state set, action set, attribute set
"""
from ssa_sim_v2.simulator.modules.auctions.auctions_base_module import AuctionsPoissonModule
from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_base_module import \
AuctionAttributesModule
from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_module import VickreyAuctionModule
from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_base_module import \
CompetitiveClickProbabilityTwoClassGeometricModule
from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_base_module import \
CompetitiveClicksBinomialModule
from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_base_module import ConversionRateFlatModule
from ssa_sim_v2.simulator.modules.conversions.conversions_base_module import ConversionsBinomialModule
from ssa_sim_v2.simulator.modules.revenue.revenue_base_module import RevenueGammaNoiseModule
from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_base_module import CompetitiveCPCVickreyModule
from ssa_sim_v2.simulator.modules.auctions.auctions_date_how_module import AuctionsDateHoWModule
from ssa_sim_v2.simulator.modules.auction_attributes.auction_attributes_date_how_module import \
AuctionAttributesDateHoWModule
from ssa_sim_v2.simulator.modules.vickrey_auction.vickrey_auction_date_how_module import \
VickreyAuctionDateHoWModule
from ssa_sim_v2.simulator.modules.competitive_click_probability.competitive_click_probability_date_how_module import \
CompetitiveClickProbabilityDateHoWModule
from ssa_sim_v2.simulator.modules.competitive_clicks.competitive_clicks_date_how_module import \
CompetitiveClicksDateHoWModule
from ssa_sim_v2.simulator.modules.conversion_rate.conversion_rate_date_how_module import \
ConversionRateDateHoWModule
from ssa_sim_v2.simulator.modules.conversions.conversions_date_how_module import ConversionsDateHoWModule
from ssa_sim_v2.simulator.modules.revenue.revenue_date_how_module import RevenueDateHoWModule
from ssa_sim_v2.simulator.modules.competitive_cpc.competitive_cpc_date_how_module import \
CompetitiveCpcDateHoWModule
from ssa_sim_v2.simulator.competitive_date_how_simulator import CompetitiveDateHowSimulator
from ssa_sim_v2.simulator.state import StateSet
from ssa_sim_v2.simulator.action import ActionSet
from ssa_sim_v2.simulator.attribute import AttrSet
seed = 1111
date_from = "2018-01-01"
date_to = "2018-01-02"
tmp_df = pd.DataFrame(np.array(range(24)), columns=["hour_of_day"])
tmp_df["key"] = 1
dates = pd.DataFrame(pd.date_range(date_from, date_to), columns=["date"])
dates_list = dates["date"].tolist()
dates["key"] = 1
dates = pd.merge(dates, tmp_df, on=["key"], how="left") # columns: ['date', 'hour_of_day']
dates["hour_of_week"] = pd.to_datetime(dates["date"]).dt.dayofweek * 24 + dates["hour_of_day"]
dates["date"] = dates["date"].dt.strftime("%Y-%m-%d")
dates = dates[["date", "hour_of_week"]]
# Initialize state set
state_set = StateSet(["date", "how"], ["discrete", "discrete"],
[dates_list, list(range(168))])
# Initialize attribute set
names = ['gender', 'age']
vals = {'gender': ['M', 'F', 'U'],
'age': ['0-19', '20-29', '30-39', '40-49', '50-59', '60-69', '70-*']}
attr_set = AttrSet(names, vals)
attr_combinations = attr_set.get_all_attr_tuples()
# Initialize action set
action_set = ActionSet(attr_set, max_bid=9.99, min_bid=0.01, max_mod=9.0, min_mod=0.1)
def initialize_priors(params, base_class):
attr_combinations = list(attr_set.get_all_attr_tuples())
priors = dates.copy()
priors.loc[:, "prior"] = pd.Series([dict.fromkeys(attr_combinations, params)] * len(priors))
base_classes = dates.copy()
base_classes.loc[:, "base_class"] = base_class
return priors, base_classes
# Initialize auctions priors
module_class = AuctionsPoissonModule
Params = module_class.Params
params = Params(auctions=100)
priors = dates.copy()
priors.loc[:, "prior"] = [{(): params}] * len(priors)
base_classes = dates.copy()
base_classes.loc[:, "base_class"] = module_class
auctions_priors = priors
auctions_base_classes = base_classes
# Initialize auction_attributes priors
module_class = AuctionAttributesModule
Params = module_class.Params
params = Params(p=1.0) # Probabilities are normalized
auction_attributes_priors, auction_attributes_base_classes \
= initialize_priors(params, module_class)
# Initialize vickrey_auction priors
module_class = VickreyAuctionModule
Params = module_class.Params
params = Params()
vickrey_auction_priors, vickrey_auction_base_classes \
= initialize_priors(params, module_class)
# Initialize competitive_click_probability priors
module_class = CompetitiveClickProbabilityTwoClassGeometricModule
Params = module_class.Params
params = Params(n_pos=8, p=0.5, q=0.5, r_11=0.6, r_12=0.4, r_2=0.5)
competitive_click_probability_priors, competitive_click_probability_base_classes \
= initialize_priors(params, module_class)
# Initialize competitive_clicks priors
module_class = CompetitiveClicksBinomialModule
Params = module_class.Params
params = Params(noise_level=0.0, noise_type="multiplicative")
competitive_clicks_priors, competitive_clicks_base_classes \
= initialize_priors(params, module_class)
# Initialize conversion_rate priors
module_class = ConversionRateFlatModule
Params = module_class.Params
params = Params(cvr=0.02, noise_level=0.0, noise_type="multiplicative")
conversion_rate_priors, conversion_rate_base_classes \
= initialize_priors(params, module_class)
# Initialize conversions priors
module_class = ConversionsBinomialModule
Params = module_class.Params
params = Params(noise_level=0.0, noise_type="multiplicative")
conversions_priors, conversions_base_classes \
= initialize_priors(params, module_class)
# Initialize revenue priors
module_class = RevenueGammaNoiseModule
Params = module_class.Params
params = Params(avg_rpv=300.0, noise_level=100.0)
revenue_priors, revenue_base_classes = initialize_priors(
params, module_class)
# Initialize competitive_cpc priors
module_class = CompetitiveCPCVickreyModule
Params = module_class.Params
params = Params(n_pos=8, fee=0.01)
competitive_cpc_priors, competitive_cpc_base_classes = \
initialize_priors(params, module_class)
# Module setup for the simulator
mods = \
{"auctions": AuctionsDateHoWModule(auctions_priors,
auctions_base_classes,
seed),
"auction_attributes": AuctionAttributesDateHoWModule(auction_attributes_priors,
auction_attributes_base_classes,
seed),
"vickrey_auction": VickreyAuctionDateHoWModule(vickrey_auction_priors,
vickrey_auction_base_classes,
seed),
"competitive_click_probability": CompetitiveClickProbabilityDateHoWModule(
competitive_click_probability_priors,
competitive_click_probability_base_classes,
seed),
"competitive_clicks": CompetitiveClicksDateHoWModule(competitive_clicks_priors,
competitive_clicks_base_classes,
seed),
"conversion_rate": ConversionRateDateHoWModule(conversion_rate_priors,
conversion_rate_base_classes,
seed),
"conversions": ConversionsDateHoWModule(conversions_priors,
conversions_base_classes,
seed),
"revenue": RevenueDateHoWModule(revenue_priors,
revenue_base_classes,
seed),
"competitive_cpc": CompetitiveCpcDateHoWModule(competitive_cpc_priors,
competitive_cpc_base_classes,
seed)
}
simulator = CompetitiveDateHowSimulator(state_set, action_set, attr_set,
mods, date_from, date_to, income_share=1.0)
return simulator, state_set, action_set, attr_set
if __name__ == "__main__":
"""
This script shows how the bidding policies will interact with the simulator
The codes are written out for easier understanding and convenient debugging for your policies
"""
# import policy classes from files
from policy2019 import Policy2019
from policy_thompson import PolicyThompsonSamplingSI
# handy function that initializes all for you
simulator, state_set, action_set, attr_set = simulator_setup_1day()
# build "policies" list that contains all bidding policies
policy1 = Policy2019(state_set, action_set, attr_set, seed=1234) # this policy is a bare-bone sample policy that bids randomly without learning
policy2 = PolicyThompsonSamplingSI(state_set, action_set, attr_set, seed=1234)
policy2.initialize({"stp": {"cvr_default": 0.02, "rpv_default": 300.0}}) # this policy is one of production level policies that needs this extra step
policies = []
policies.append(policy1)
policies.append(policy2)
policies.append(Policy2019(state_set, action_set, attr_set, seed=9292)) # adding another policy2019 with different seed on-the-fly
# Simulator will run 24 steps (t=0,1,...,23) (corresponding to 1 simulated day)
T = 24 # note that this particular setup limits T up to 48. T>48 will cause an error.
for t in range(T):
s = simulator.state
print("t={} of {}".format(t, T))
print(" state={}".format(simulator.state))
actions = []
for p in policies:
pol_action = p.act(s) # each policy responds with a bid
actions.append(pol_action)
print(" Actions={}".format(actions))
results = simulator.step(actions)
for ix, p in enumerate(policies):
p.learn(s, results[ix]) # each policy will learn with result
# note that policy in index ix gets result in index ix. The results can be different
| 1.929688 | 2 |
object_classification/batchbald_redux/batchbald.py | YilunZhou/optimal-active-learning | 10 | 12799936 | # AUTOGENERATED! DO NOT EDIT! File to edit: 01_batchbald.ipynb (unless otherwise specified).
__all__ = ['compute_conditional_entropy', 'compute_entropy', 'CandidateBatch', 'get_batchbald_batch', 'get_bald_batch']
# Cell
from dataclasses import dataclass
from typing import List
import torch
import math
from tqdm.auto import tqdm
from toma import toma
from batchbald_redux import joint_entropy
# Cell
def compute_conditional_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
nats_n_K_C = probs_n_K_C * torch.log(probs_n_K_C)
nats_n_K_C[probs_n_K_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy(probs_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = probs_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(probs_N_K_C, 1024)
def compute(probs_n_K_C, start: int, end: int):
mean_probs_n_C = probs_n_K_C.mean(dim=1)
nats_n_C = mean_probs_n_C * torch.log(mean_probs_n_C)
nats_n_C[mean_probs_n_C ==0] = 0.
entropies_N[start:end].copy_(-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Internal Cell
# Not publishing these at the moment.
def compute_conditional_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Conditional Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
nats_n_K_C = logits_n_K_C * torch.exp(logits_n_K_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_K_C, dim=(1, 2)) / K)
pbar.update(end - start)
pbar.close()
return entropies_N
def compute_entropy_from_logits(logits_N_K_C: torch.Tensor) -> torch.Tensor:
N, K, C = logits_N_K_C.shape
entropies_N = torch.empty(N, dtype=torch.double)
pbar = tqdm(total=N, desc="Entropy", leave=False)
@toma.execute.chunked(logits_N_K_C, 1024)
def compute(logits_n_K_C, start: int, end: int):
mean_logits_n_C = torch.logsumexp(logits_n_K_C, dim=1) - math.log(K)
nats_n_C = mean_logits_n_C * torch.exp(mean_logits_n_C)
entropies_N[start:end].copy_(
-torch.sum(nats_n_C, dim=1))
pbar.update(end - start)
pbar.close()
return entropies_N
# Cell
@dataclass
class CandidateBatch:
scores: List[float]
indices: List[int]
def get_batchbald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
num_samples: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
if batch_size == 0:
return CandidateBatch(candidate_scores, candidate_indices)
conditional_entropies_N = compute_conditional_entropy(probs_N_K_C)
batch_joint_entropy = joint_entropy.DynamicJointEntropy(num_samples,
batch_size - 1,
K,
C,
dtype=dtype,
device=device)
# We always keep these on the CPU.
scores_N = torch.empty(N, dtype=torch.double, pin_memory=torch.cuda.is_available())
for i in tqdm(range(batch_size), desc="BatchBALD", leave=False):
if i > 0:
latest_index = candidate_indices[-1]
batch_joint_entropy.add_variables(
probs_N_K_C[latest_index:latest_index + 1])
shared_conditinal_entropies = conditional_entropies_N[
candidate_indices].sum()
batch_joint_entropy.compute_batch(probs_N_K_C,
output_entropies_B=scores_N)
scores_N -= conditional_entropies_N + shared_conditinal_entropies
scores_N[candidate_indices] = -float('inf')
candidate_score, candidate_index = scores_N.max(dim=0)
candidate_indices.append(candidate_index.item())
candidate_scores.append(candidate_score.item())
return CandidateBatch(candidate_scores, candidate_indices)
# Cell
def get_bald_batch(probs_N_K_C: torch.Tensor,
batch_size: int,
dtype=None,
device=None) -> CandidateBatch:
N, K, C = probs_N_K_C.shape
batch_size = min(batch_size, N)
candidate_indices = []
candidate_scores = []
scores_N = -compute_conditional_entropy(probs_N_K_C)
scores_N += compute_entropy(probs_N_K_C)
candiate_scores, candidate_indices = torch.topk(scores_N, batch_size)
return CandidateBatch(candiate_scores.tolist(), candidate_indices.tolist()) | 2.328125 | 2 |
extra/unused/check_ntis.py | whyjz/CARST | 10 | 12799937 | #/usr/bin/python
import re;
import subprocess;
cmd="\nfind . -name \"*nti21_cut.grd\"\n";
pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
ntis=pipe.read().split();
pipe.close();
for nti in ntis:
jday=nti[nti.find(".A")+6:nti.find(".A")+9];
vdir=nti[nti.find("/")+1:nti.rfind("/")];
image="data_more/"+nti[nti.rfind("/")+1:nti.find("_cut")]+".grd";
cmd="\ngrdinfo "+nti+"\n";
pipe=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE).stdout;
info=pipe.read().strip();
pipe.close();
zmax=info[re.search("z_max:\s*",info).end(0):re.search("z_max:\s*\S*\s*",info).end(0)].strip();
if zmax != "0":
print(jday+" "+zmax+" "+vdir+" "+image);
#if zmax != "0" and float(zmax) > -0.861:
#print(nti+" "+zmax);
exit();
"""
exit();
"""
| 2.21875 | 2 |
src/__init__.py | btrnt/butternut_backend | 0 | 12799938 | <reponame>btrnt/butternut_backend<filename>src/__init__.py
from .gltr import *
| 1.179688 | 1 |
main.py | aishmittal/Real-time-Face-Recognition-based-Surveillance-System | 10 | 12799939 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import os
import cv2
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
import datetime
import string
import random
import shutil
from time import gmtime, strftime, sleep
import sqlite3
# import imageUpload.py for uploading captured images to cloudinary
import imageUpload as imup
# import MSFaceAPI.py for msface api calls
import MSFaceAPI as msface
large_text_size = 22
medium_text_size = 14
small_text_size = 10
base_path = os.path.dirname(os.path.realpath(__file__))
dataset_path = os.path.join(base_path,'dataset')
unknown_user_path=os.path.join(base_path,'unknowns')
tmp_path = os.path.join(base_path,'tmp')
placeholder_image = os.path.join(base_path,'placeholder_600x400.svg')
db_path = os.path.join(base_path,'users.db')
cloudinary_dataset = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/dataset'
cloudinary_tmp = 'http://res.cloudinary.com/aish/image/upload/v1488457817/RTFRSS/tmp'
current_userid = 0
current_userfname = ''
detection_interval=10000
capture_interval=30
camera_port = 0
font1 = QFont('Helvetica', small_text_size)
font2 = QFont('Helvetica', medium_text_size)
font3 = QFont('Helvetica', large_text_size)
conn = sqlite3.connect(db_path)
cursor = conn.cursor()
TABLE_NAME="users"
cascPath = 'haarcascade_frontalface_default.xml'
faceCascade = cv2.CascadeClassifier(cascPath)
# function to generate a random id for image file name
def id_generator(size=20, chars=string.ascii_lowercase + string.digits + string.ascii_uppercase):
return ''.join(random.choice(chars) for _ in range(size))
def make_dir(path):
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
class DynamicFrame(QWidget):
def __init__(self, parent, *args, **kwargs):
super(DynamicFrame, self).__init__()
self.initUI()
self.counter=0
self.capture_cnt=0
self.capture = cv2.VideoCapture(camera_port)
def initUI(self):
self.video_stream = QLabel()
self.video_stream.setScaledContents(True)
self.video_stream.setAlignment(Qt.AlignLeft)
self.video_stream.setFixedSize(600,450)
self.video_stream_label=QLabel('Live Video Stream')
self.video_stream_label.setAlignment(Qt.AlignCenter)
self.video_stream_label.setFont(font2)
self.face_image = QLabel()
self.face_image.setScaledContents(True)
self.face_image.setFixedSize(600,450)
self.face_image.setAlignment(Qt.AlignRight)
self.face_image.setPixmap(QPixmap(placeholder_image))
self.face_image_label=QLabel('Last Capture Results')
self.face_image_label.setAlignment(Qt.AlignCenter)
self.face_image_label.setFont(font2)
self.vbox1=QVBoxLayout()
self.vbox1.addWidget(self.video_stream)
self.vbox1.addWidget(self.video_stream_label)
self.vbox2=QVBoxLayout()
self.vbox2.addWidget(self.face_image)
self.vbox2.addWidget(self.face_image_label)
self.hbox=QHBoxLayout()
self.hbox.addLayout(self.vbox1)
self.hbox.addLayout(self.vbox2)
self.hbox.setAlignment(Qt.AlignCenter)
self.hbox.setSpacing(20)
self.hbox2=QHBoxLayout()
self.hbox2.setAlignment(Qt.AlignCenter)
self.message_label=QLabel('message')
self.message_label.setAlignment(Qt.AlignCenter)
self.message_label.setFont(font2)
self.hbox2.addWidget(self.message_label)
self.hbox2.setContentsMargins(20, 20, 20, 20)
self.hbox2.setSpacing(10)
self.label1 = QLabel('Real-Time Face Recognition based Surveillance')
self.label2 = QLabel('')
self.label1.setAlignment(Qt.AlignCenter)
self.label2.setAlignment(Qt.AlignCenter)
self.label1.setFont(font3)
self.label2.setFont(font3)
self.fbox = QFormLayout()
self.fbox.setAlignment(Qt.AlignCenter)
self.fbox.setContentsMargins(20, 20, 20, 20)
self.fbox.addRow(self.label1)
self.fbox.addRow(self.label2)
self.vbox = QVBoxLayout()
self.vbox.addLayout(self.fbox)
self.vbox.addLayout(self.hbox)
self.vbox.addLayout(self.hbox2)
self.vbox.setAlignment(Qt.AlignCenter)
self.setLayout(self.vbox)
self.update_check()
def stop_capture(self):
if self.capturing:
self.capturing = False
self.capture.release()
self.timer.stop()
cv2.destroyAllWindows()
def update_check(self):
self.video_timer = QTimer(self)
self.video_timer.timeout.connect(self.display_video_stream)
self.video_timer.start(capture_interval)
def display_video_stream(self):
ret,frame = self.capture.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(80, 80),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = cv2.flip(frame, 1)
image = QImage(frame, frame.shape[1], frame.shape[0],
frame.strides[0], QImage.Format_RGB888)
self.video_stream.setPixmap(QPixmap.fromImage(image))
self.message_label.setText('Next image capture in %d seconds' % int((detection_interval-self.counter*capture_interval)/1000))
if self.counter==int(detection_interval/capture_interval):
self.message_label.setText('Face identification started ...')
self.update_dynamic_frame()
self.counter=0
else:
self.counter=self.counter+1
def update_dynamic_frame(self):
global current_userid
global current_userfname
detected_personid = ''
welcome_names=''
ramp_frames = 10
print "Face identification started .........."
cv2.destroyAllWindows()
try:
for i in xrange(ramp_frames):
s, im = self.capture.read()
ret,frame = self.capture.read()
#self.message_label.setText('Image Captured')
self.capture_cnt+=1
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(80, 80),
flags=cv2.cv.CV_HAAR_SCALE_IMAGE
)
print "Total Faces in Image = %d " % len(faces)
#self.message_label.setText("Total Faces in Image = %d " % len(faces))
if len(faces) > 0:
detected_persons = []
persons = []
persons_cnt=0
detected_persons_cnt=0
for (x, y, w, h) in faces:
if w*h>500:
persons_cnt+=1
image_crop = frame[y:y+h,x:x+w]
#self.message_label.setText("Processing.. %d " % persons_cnt)
file_name = id_generator()+'.jpg'
file = os.path.join(tmp_path,file_name)
cloudinary_url=cloudinary_tmp + '/' + file_name
cv2.imwrite(file, image_crop)
imup.upload_image(file,file_name)
faceid=msface.face_detect(cloudinary_url)
print "Result for person %d " % persons_cnt
print "Image File = " + str(file)
print "faceId = " + str(faceid)
detected_personid = msface.face_identify(faceid)
if detected_personid:
print "detected_personid = " + str(detected_personid)
comm = "SELECT * FROM %s WHERE personid = '%s'" % (TABLE_NAME,detected_personid)
res = cursor.execute(comm)
res = cursor.fetchone()
if res:
userid = res[0]
uname = res[1]
fname = res[2]
lname = res[3]
print "Welcome %s !" % (fname+' '+lname)
detected_persons_cnt+=1
detected_persons.append(fname)
persons.append(fname)
now = datetime.datetime.now()
comm = "SELECT * FROM users_present WHERE userid = %d and date = '%s' " % (int(userid), now.strftime("%Y-%m-%d"))
#print comm
res2=cursor.execute(comm)
res2=cursor.fetchone()
if res2==None:
format_str = "INSERT INTO users_present (id, userid) VALUES (NULL,%d)" %(int(userid))
#print format_str
conn.execute(format_str)
conn.commit()
print "Attendance marked for user %s " % uname
else
print "Attendance already marked for user %s " % uname
else:
time_str=strftime("%Y-%m-%d_%H:%M:%S", gmtime())
print "Unknown person found"
cv2.imwrite(os.path.join(unknown_user_path,'cam1_'+time_str+'.jpg'),image_crop)
persons.append('Unknown')
if detected_persons_cnt > 1:
for i in range(detected_persons_cnt-1):
welcome_names = welcome_names + detected_persons[i] + ', '
welcome_names=welcome_names[:-2]
welcome_names=welcome_names + ' & ' + detected_persons[detected_persons_cnt-1]
elif detected_persons_cnt>0:
welcome_names = detected_persons[0]
self.label2.setText('Hello '+ welcome_names)
else:
self.label2.setText('')
print "No person in image"
k=0
for (x, y, w, h) in faces:
if persons[k]!='Unknown':
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,255,0),1)
else:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)
cv2.putText(frame, persons[k],(x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL , 1,(0,0,255),1)
k=k+1
#image=cv2.flip(frame, 1)
image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
face_image = QImage(image, image.shape[1], image.shape[0],
image.strides[0], QImage.Format_RGB888)
self.face_image.setPixmap(QPixmap.fromImage(face_image))
except Exception as e:
print "Errors occured !"
print e
class FullscreenWindow:
def __init__(self, parent, *args, **kwargs):
self.qt = QWidget()
self.qt.showFullScreen()
self.qt.pal=QPalette()
self.qt.pal.setColor(QPalette.Background,QColor(0,0,0))
self.qt.pal.setColor(QPalette.Foreground,QColor(255,255,255))
self.qt.setPalette(self.qt.pal)
self.bg_color=0
self.qt.hbox4 = QHBoxLayout()
self.qt.Dynamicframe = DynamicFrame(self.qt)
self.qt.hbox4.addWidget(self.qt.Dynamicframe)
self.qt.vbox = QVBoxLayout()
self.qt.vbox.addLayout(self.qt.hbox4)
self.qt.setLayout(self.qt.vbox)
if __name__ == '__main__':
make_dir(tmp_path)
make_dir(unknown_user_path)
for root, dirs, files in os.walk(tmp_path):
for f in files:
os.unlink(os.path.join(root, f))
for d in dirs:
shutil.rmtree(os.path.join(root, d))
a = QApplication(sys.argv)
w = FullscreenWindow(a)
sys.exit(a.exec_())
# command to terminate the running program
# killall -9 python | 1.898438 | 2 |
server/brain/data-cleaner.py | souravchanda001/ratings-predicting-chrome-extension | 6 | 12799940 | import re
import csv
import pandas as pd
f= open("data-errors.txt",'r',encoding="utf8")
fc = f.read()
fcbRegex = re.compile(r"line(\s\d+)")
clear = re.findall(fcbRegex,fc)
for i in clear:
print("lines",i)
arr=clear
print("array is",arr)
count = 1
reader = csv.reader(open('amazon_reviews_us_Watches_v1_00.tsv', 'r',encoding="utf8"), delimiter="\t")
writer = csv.writer(open('amazon_reviews_us_Watches_v1_00_clean.tsv', 'w',encoding="utf8"), delimiter="\t")
for row in reader:
if count in arr:
print("skipping ", count)
count += 1
continue
else:
print("writting ", count)
writer.writerow(row)
count += 1
| 3.046875 | 3 |
drmaa_futures/slave.py | ndevenish/drmaa_futures | 0 | 12799941 | # coding: utf-8
"""
Running a slave instance.
"""
import logging
import sys
import time
import traceback
import dill as pickle
import zmq
logger = logging.getLogger(__name__)
class ExceptionPicklingError(Exception):
"""Represent an error attempting to pickle the result of a task"""
class TaskSystemExit(Exception):
"""For when the task raised a SystemExit exception, trying to quit"""
def do_task(task_id, task_function):
"""Do a task, as specified in a pickle bundle.
:arg byte data: The pickle-data to load
:returns: Pickle data of the result, or an exception
"""
try:
logger.debug("Running task with ID {}".format(task_id))
# Run whatever task we've been given
result = task_function()
logger.debug("Completed task")
# An error pickling here counts as a job failure
return b"YAY " + pickle.dumps((task_id, result))
except KeyboardInterrupt:
# This is interactive so we want to let it float up - we'll handle the
# special case in the parent context
raise
except BaseException:
logger.debug("Exception processing task")
# Everything else: We want to pass back across the network
(_, exc_value, exc_trace) = sys.exc_info()
exc_trace = traceback.format_tb(exc_trace)
# We don't want to propagate a SystemExit to the other side
if isinstance(exc_value, SystemExit):
logger.debug("Intercepted task calling sys.exit")
exc_value = TaskSystemExit()
# Be careful - we might not be able to pickle the exception?? Go to lengths
# to make sure that we pass something sensible back
try:
pickle.dumps(exc_value)
except pickle.PicklingError:
exc_value = ExceptionPicklingError("{}: {}".format(
str(type(exc_value)), str(exc_value)))
return b"ONO " + pickle.dumps((task_id, exc_trace, exc_value))
def _do_handshake(socket, worker_id):
logger.debug("Sending hello")
socket.send(b"HELO IAM " + worker_id.encode("utf-8"))
logger.debug("Awaiting confirmation of hello recieved")
assert socket.recv() == b"HAY"
logger.debug("Got hello. Going into task loop")
def _handle_task(socket, data):
"""Handle a reply asking us to do a task"""
try:
(task_id, task_function) = pickle.loads(data)
logger.debug("Got task %s (%d bytes)", task_id, len(data))
return do_task(task_id, task_function)
except KeyboardInterrupt as exc:
# This is a special case; try to tell the master that we failed
# to quit, then continue to raise the error.
logger.info("Got interrupt while processing task")
socket.send(b"ONO " + pickle.dumps((task_id, "", exc)))
socket.recv()
raise
def run_slave(server_url, worker_id, timeout=30):
"""Run a slave instance and connect it to a specific master URL.
:param str server_url: The server string to use to connect
:param str worker_if: The worker ID to use when communicating
:param timeout: The time (in seconds) to wait with no jobs before terminating
"""
logger.debug("Running slave {} connect to {}".format(worker_id, server_url))
context = zmq.Context()
socket = context.socket(zmq.REQ)
logger.debug("Connecting")
socket.connect(server_url)
socket.RCVTIMEO = int(1000 * timeout)
try:
_do_handshake(socket, worker_id)
except zmq.error.Again:
logger.debug("Timed out waiting for handshake.")
sys.exit(1)
else:
# If waiting for the whole timeout, then stop waiting
last_job = time.time()
while time.time() - last_job < timeout:
logger.debug("Asking for a task")
socket.send("IZ BORED {}".format(worker_id).encode("UTF-8"))
reply = socket.recv()
# We get a command returned
assert reply.startswith(b"PLZ")
if reply == b"PLZ WAIT":
logger.debug("No tasks available. Trying again in a few seconds.")
time.sleep(min(timeout / 2.0, 5))
elif reply == b"PLZ GOWAY":
logger.debug("Got quit signal. ending main loop.")
break
elif reply.startswith(b"PLZ DO"):
try:
result = _handle_task(socket, reply[7:])
except KeyboardInterrupt:
# Now, we know we want to quit - so send the message letting
# the master know. This is a little unclean, but it's only
# because we are here that we can guarantee that we weren't in
# the middle of a send/recv when the signal was sent
logger.debug("Sending quit message after keyboardinterrupt")
socket.send(b"IGIVEUP " + worker_id.encode("utf-8"))
socket.recv()
raise
logger.debug("Sending result of %d bytes", len(result))
socket.send(result)
# Await the ok
assert socket.recv() == b"THX"
last_job = time.time()
if time.time() - last_job >= timeout:
logger.debug("Waited too long for new tasks. Quitting.")
socket.send(b"IGIVEUP " + worker_id.encode("utf-8"))
socket.recv()
finally:
logger.debug("Closing socket")
socket.LINGER = 300
socket.close()
logger.debug("Closing context")
context.term()
logger.debug("Slave completed.")
# Messaging protocol:
# Sent Recieved Action
# ----------------------- ------------- ----------------------------------
# HELO IAM {id} HAY Negotiation success
# IZ BORED {id} PLZ GOWAY Exit
# PLZ WAIT Nothing to do; try again soon
# PLZ DO {task} Hand off task to runner
# YAY {result} THX Task succeeded with result data
# ONO {result} THX Task failed - with exception data
# IGIVEUP {id} BYE Quitting; given up with processing
| 2.609375 | 3 |
trachours/web_ui.py | t-kenji/trac-hours-plugin | 0 | 12799942 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 <NAME> <<EMAIL>>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
import re
import calendar
import csv
import time
from StringIO import StringIO
from datetime import datetime, timedelta
from pkg_resources import parse_version
from genshi.filters import Transformer
from genshi.filters.transform import StreamBuffer
from trac import __version__ as TRAC_VERSION
from trac.core import *
from trac.ticket import Ticket
from trac.ticket.model import Milestone
from trac.util.datefmt import format_date, parse_date, user_time
from trac.util.html import html as tag
from trac.util.translation import _
from trac.web.api import IRequestHandler, ITemplateStreamFilter
from trac.web.chrome import (
Chrome, ITemplateProvider, add_ctxtnav, add_link, add_stylesheet
)
from hours import TracHoursPlugin, _
from sqlhelper import get_all_dict
from utils import hours_format
class TracHoursRoadmapFilter(Component):
implements(ITemplateStreamFilter)
# ITemplateStreamFilter methods
def filter_stream(self, req, method, filename, stream, data):
"""
filter the stream for the roadmap (/roadmap)
and milestones /milestone/<milestone>
"""
if filename in ('roadmap.html', 'milestone_view.html') and \
'TICKET_VIEW_HOURS' in req.perm:
trac_hours = TracHoursPlugin(self.env)
hours = {}
milestones = data.get('milestones')
this_milestone = None
if milestones is None:
# /milestone view : only one milestone
milestones = [data['milestone']]
this_milestone = milestones[0].name
find_xpath = "//div[@class='milestone']/h1"
xpath = "//div[@class='milestone']/div[1]"
else:
# /roadmap view
find_xpath = "//*[@class='milestone']//h2/a"
xpath = "//*[@class='milestone']/div[1]"
for milestone in milestones:
hours[milestone.name] = dict(totalhours=0.,
estimatedhours=0., )
tickets = [tid for tid, in self.env.db_query("""
SELECT id FROM ticket WHERE milestone=%s
""", (milestone.name,))]
if tickets:
hours[milestone.name]['date'] = \
Ticket(self.env, tickets[0])['time']
for ticket in tickets:
ticket = Ticket(self.env, ticket)
# estimated hours for the ticket
try:
estimated_hours = float(ticket['estimatedhours'])
except (ValueError, TypeError):
estimated_hours = 0.
hours[milestone.name]['estimatedhours'] += estimated_hours
# total hours for the ticket (seconds -> hours)
total_hours = trac_hours.get_total_hours(
ticket.id) / 3600.0
hours[milestone.name]['totalhours'] += total_hours
# update date for oldest ticket
if ticket['time'] < hours[milestone.name]['date']:
hours[milestone.name]['date'] = ticket['time']
b = StreamBuffer()
stream |= Transformer(find_xpath).copy(b).end().select(xpath). \
append(
self.MilestoneMarkup(req, b, hours, req.href, this_milestone))
return stream
class MilestoneMarkup(object):
"""Iterator for Transformer markup injection"""
def __init__(self, req, buffer, hours, href, this_milestone):
self.req = req
self.buffer = buffer
self.hours = hours
self.href = href
self.this_milestone = this_milestone
def __iter__(self):
if self.this_milestone is not None: # for /milestone/xxx
milestone = self.this_milestone
else:
milestone = self.buffer.events[3][1]
if milestone not in self.hours.keys():
return iter([])
hours = self.hours[milestone]
estimated_hours = hours['estimatedhours']
total_hours = hours['totalhours']
if not (estimated_hours or total_hours):
return iter([])
items = []
if estimated_hours:
if parse_version(TRAC_VERSION) < parse_version('1.0'):
items.append(tag.dt(_("Estimated Hours:")))
items.append(tag.dd(str(estimated_hours)))
else:
items.append(tag.span(_("Estimated Hours: "),
str(estimated_hours),
class_="first interval"))
date = hours['date']
link = self.href("hours", milestone=milestone,
from_date=user_time(self.req, format_date, date))
if parse_version(TRAC_VERSION) < parse_version('1.0'):
items.append(tag.dt(tag.a(_("Total Hours:"), href=link)))
items.append(
tag.dd(tag.a(hours_format % total_hours, href=link)))
return iter(tag.dl(*items))
else:
items.append(tag.span(tag.a(_("Total Hours: "),
hours_format % total_hours,
href=link),
class_='interval'))
return iter(tag.p(*items, class_='legend'))
class TracUserHours(Component):
implements(ITemplateProvider, IRequestHandler)
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
from pkg_resources import resource_filename
return [resource_filename(__name__, 'templates')]
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/hours/user' or \
re.match(r'/hours/user/(?:tickets|dates)/(?:\w+)', req.path_info) is not None
def process_request(self, req):
req.perm.require('TICKET_VIEW_HOURS')
if req.path_info.rstrip('/') == '/hours/user':
return self.users(req)
m = re.match(r'/hours/user/(?P<field>\w+)/(?P<user>\w+)',
req.path_info)
field = m.group('field')
user = m.group('user')
if field == 'tickets':
return self.user_by_ticket(req, user)
elif field == 'dates':
return self.user_by_date(req, user)
# Internal methods
def date_data(self, req, data):
"""data for the date"""
now = datetime.now()
data['days'] = range(1, 32)
data['months'] = list(enumerate(calendar.month_name))
data['years'] = range(now.year, now.year - 10, -1)
if 'from_date' in req.args:
from_date_raw = user_time(req, parse_date, req.args['from_date'])
else:
from_date_raw = datetime(now.year, now.month, now.day)
from_date_raw = from_date_raw - timedelta(days=7)
if 'to_date' in req.args:
to_date_raw = user_time(req, parse_date, req.args['to_date'])
to_date_raw = to_date_raw + timedelta(hours=23, minutes=59, seconds=59)
else:
to_date_raw = now
data['from_date_raw'] = from_date_raw
data['from_date'] = user_time(req, format_date, from_date_raw)
data['to_date_raw'] = to_date_raw
data['to_date'] = user_time(req, format_date, to_date_raw)
data['prev_week'] = from_date_raw - timedelta(days=7)
args = dict(req.args)
args['from_date'] = user_time(req, format_date, data['prev_week'])
args['to_date'] = user_time(req, format_date, from_date_raw)
data['prev_url'] = req.href('/hours/user', **args)
def users(self, req):
"""hours for all users"""
data = {'hours_format': hours_format}
# date data
self.date_data(req, data)
# milestone data
milestone = req.args.get('milestone')
milestones = Milestone.select(self.env)
data['milestones'] = milestones
# get the hours
# trachours = TracHoursPlugin(self.env)
# tickets = trachours.tickets_with_hours()
hours = get_all_dict(self.env, """
SELECT * FROM ticket_time
WHERE time_started >= %s AND time_started < %s
""", *[int(time.mktime(data[i].timetuple()))
for i in ('from_date_raw', 'to_date_raw')])
details = req.args.get('details')
worker_hours = {}
if details != 'date':
for entry in hours:
worker = entry['worker']
if worker not in worker_hours:
worker_hours[worker] = 0
if milestone and milestone != \
Ticket(self.env, entry['ticket']).values.get('milestone'):
continue
worker_hours[worker] += entry['seconds_worked']
worker_hours = [(worker, seconds / 3600.)
for worker, seconds in sorted(worker_hours.items())]
else:
for entry in hours:
date = user_time(req, format_date, entry['time_started'])
worker = entry['worker']
key = (date, worker)
if key not in worker_hours:
worker_hours[key] = 0
if milestone and milestone != \
Ticket(self.env, entry['ticket']).values.get('milestone'):
continue
worker_hours[key] += entry['seconds_worked']
worker_hours = [(key[0], key[1], seconds / 3600.)
for key, seconds in sorted(worker_hours.items())]
data['details'] = details
data['worker_hours'] = worker_hours
data['total_hours'] = sum(hours[-1] for hours in worker_hours)
if req.args.get('format') == 'csv':
req.send(self.export_csv(req, data))
add_stylesheet(req, 'common/css/report.css')
if details == 'date':
add_ctxtnav(req, _('Hours summary'),
req.href.hours('user',
from_date=data['from_date'],
to_date=data['to_date']))
else:
add_ctxtnav(req, _('Hours by date'),
req.href.hours('user',
details='date',
from_date=data['from_date'],
to_date=data['to_date']))
add_link(req, 'alternate', req.href(req.path_info, format='csv'),
'CSV', 'text/csv', 'csv')
# add_link(req, 'prev', self.get_href(query, args, context.href),
# _('Prev Week'))
# add_link(req, 'next', self.get_href(query, args, context.href),
# _('Next Week'))
# prevnext_nav(req, _('Prev Week'), _('Next Week'))
Chrome(self.env).add_jquery_ui(req)
return 'hours_users.html', data, 'text/html'
def user_by_ticket(self, req, user):
"""hours page for a single user"""
data = {'hours_format': hours_format,
'worker': user}
self.date_data(req, data)
args = [user]
args += [int(time.mktime(data[i].timetuple()))
for i in ('from_date_raw', 'to_date_raw')]
hours = get_all_dict(self.env, """
SELECT * FROM ticket_time
WHERE worker=%s AND time_started >= %s AND time_started < %s
""", *args)
worker_hours = {}
for entry in hours:
ticket = entry['ticket']
if ticket not in worker_hours:
worker_hours[ticket] = 0
worker_hours[ticket] += entry['seconds_worked']
data['tickets'] = dict([(i, Ticket(self.env, i))
for i in worker_hours.keys()])
# sort by ticket number and convert to hours
worker_hours = [(ticket_id, seconds / 3600.)
for ticket_id, seconds in
sorted(worker_hours.items())]
data['worker_hours'] = worker_hours
data['total_hours'] = sum(hours[1] for hours in worker_hours)
if req.args.get('format') == 'csv':
buffer = StringIO()
writer = csv.writer(buffer)
title = _("Hours for {user}").format(user=user)
writer.writerow([title, req.abs_href()])
writer.writerow([])
writer.writerow(['From', 'To'])
writer.writerow([data['from_date'], data['to_date']])
writer.writerow([])
writer.writerow(['Ticket', 'Hours'])
for ticket, hours in worker_hours:
writer.writerow([ticket, hours])
req.send(buffer.getvalue(), 'text/csv')
add_stylesheet(req, 'common/css/report.css')
add_ctxtnav(req, _('Hours by Query'),
req.href.hours(from_date=data['from_date'],
to_date=data['to_date']))
add_ctxtnav(req, _('Hours by User'),
req.href.hours('user',
from_date=data['from_date'],
to_date=data['to_date']))
add_ctxtnav(req, _('Hours by date'),
req.href.hours('user/dates/{}'.format(user),
from_date=data['from_date'],
to_date=data['to_date']))
add_link(req, 'alternate', req.href(req.path_info, format='csv'),
'CSV', 'text/csv', 'csv')
Chrome(self.env).add_jquery_ui(req)
return 'hours_user_by_ticket.html', data, 'text/html'
def user_by_date(self, req, user):
"""hours page for a single user"""
data = {'hours_format': hours_format,
'worker': user}
self.date_data(req, data)
args = [user]
args += [int(time.mktime(data[i].timetuple()))
for i in ('from_date_raw', 'to_date_raw')]
hours = get_all_dict(self.env, """
SELECT * FROM ticket_time
WHERE worker=%s AND time_started >= %s AND time_started < %s
""", *args)
worker_hours = {}
for entry in hours:
date = user_time(req, format_date, entry['time_started'])
ticket = entry['ticket']
if date not in worker_hours:
worker_hours[date] = {
'seconds': 0,
'tickets': [],
}
worker_hours[date]['seconds'] += entry['seconds_worked']
if ticket not in worker_hours[date]['tickets']:
worker_hours[date]['tickets'].append(ticket)
data['tickets'] = dict([(entry['ticket'], Ticket(self.env, entry['ticket']))
for entry in hours])
# sort by ticket number and convert to hours
worker_hours = [(date, details['tickets'], details['seconds'] / 3600.)
for date, details in
sorted(worker_hours.items())]
data['worker_hours'] = worker_hours
data['total_hours'] = sum(hours[2] for hours in worker_hours)
if req.args.get('format') == 'csv':
buffer = StringIO()
writer = csv.writer(buffer)
title = _("Hours for {user}").format(user=user)
writer.writerow([title, req.abs_href()])
writer.writerow([])
writer.writerow(['From', 'To'])
writer.writerow([data['from_date'], data['to_date']])
writer.writerow([])
writer.writerow(['Ticket', 'Hours'])
for date, tickets, hours in worker_hours:
ids = ['#{}'.format(id) for id in tickets]
writer.writerow([date, ','.join(ids), hours])
req.send(buffer.getvalue(), 'text/csv')
add_stylesheet(req, 'common/css/report.css')
add_ctxtnav(req, _('Hours by Query'),
req.href.hours(from_date=data['from_date'],
to_date=data['to_date']))
add_ctxtnav(req, _('Hours by User'),
req.href.hours('user',
from_date=data['from_date'],
to_date=data['to_date']))
add_ctxtnav(req, _('Hours by ticket'),
req.href.hours('user/tickets/{}'.format(user),
from_date=data['from_date'],
to_date=data['to_date']))
add_link(req, 'alternate', req.href(req.path_info,
format='csv',
from_date=data['from_date'],
to_date=data['to_date']),
'CSV', 'text/csv', 'csv')
Chrome(self.env).add_jquery_ui(req)
return 'hours_user_by_date.html', data, 'text/html'
def export_csv(self, req, data, sep=',', mimetype='text/csv'):
content = StringIO()
content.write('\xef\xbb\xbf') # BOM
writer = csv.writer(content, delimiter=sep, quoting=csv.QUOTE_MINIMAL)
title = _("Hours for {project}").format(project=self.env.project_name)
writer.writerow([title, req.abs_href()])
writer.writerow([])
writer.writerow(['From', 'To'])
writer.writerow([data['from_date'], data['to_date']])
if data['milestone']:
writer.writerow(['Milestone', data['milestone']])
writer.writerow([])
writer.writerow(['Worker', 'Hours'])
for worker, hours in data['worker_hours']:
writer.writerow([worker, hours])
return content.getvalue(), '%s;text/csv' % mimetype
| 2.140625 | 2 |
unittest_reinvent/running_modes/transfer_learning_tests/test_link_invent_transfer_learning.py | lilleswing/Reinvent-1 | 183 | 12799943 | <gh_stars>100-1000
import shutil
import unittest
import os
from running_modes.configurations import TransferLearningLoggerConfig, GeneralConfigurationEnvelope
from running_modes.configurations.transfer_learning.link_invent_learning_rate_configuration import \
LinkInventLearningRateConfiguration
from running_modes.configurations.transfer_learning.link_invent_transfer_learning_configuration import \
LinkInventTransferLearningConfiguration
from running_modes.constructors.transfer_learning_mode_constructor import TransferLearningModeConstructor
from running_modes.utils import set_default_device_cuda
from running_modes.enums.logging_mode_enum import LoggingModeEnum
from running_modes.enums.running_mode_enum import RunningModeEnum
from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum
from unittest_reinvent.fixtures.paths import MAIN_TEST_PATH, SMILES_SET_LINK_INVENT_PATH, LINK_INVENT_PRIOR_PATH
from unittest_reinvent.fixtures.utils import count_empty_files
class TestLinkInventTransferLearning(unittest.TestCase):
def setUp(self):
set_default_device_cuda()
lm_enum = LoggingModeEnum()
rm_enum = RunningModeEnum()
mt_enum = ModelTypeEnum()
self.workfolder = os.path.join(MAIN_TEST_PATH, mt_enum.LINK_INVENT + rm_enum.TRANSFER_LEARNING)
if not os.path.isdir(self.workfolder):
os.makedirs(self.workfolder)
self.log_dir = os.path.join(self.workfolder, "test_log")
log_config = TransferLearningLoggerConfig(logging_path=self.log_dir, recipient=lm_enum.LOCAL,
job_name="test_job")
self.lr_config = LinkInventLearningRateConfiguration()
self.parameters = LinkInventTransferLearningConfiguration(empty_model=LINK_INVENT_PRIOR_PATH,
output_path=self.workfolder,
input_smiles_path=SMILES_SET_LINK_INVENT_PATH,
validation_smiles_path=None,
num_epochs=2,
sample_size=10,
learning_rate=self.lr_config)
self.general_config = GeneralConfigurationEnvelope(model_type=mt_enum.LINK_INVENT, logging=vars(log_config),
run_type=rm_enum.TRANSFER_LEARNING, version="3.0",
parameters=vars(self.parameters))
self.runner = TransferLearningModeConstructor(self.general_config)
def tearDown(self):
if os.path.isdir(self.workfolder):
shutil.rmtree(self.workfolder)
def _model_saved_and_logs_exist(self):
self.assertTrue(os.path.isfile(os.path.join(self.workfolder, self.parameters.model_file_name)))
self.assertTrue(os.path.isdir(self.log_dir))
self.assertEqual(count_empty_files(self.log_dir), 0)
def test_no_validation(self):
self.parameters.validation_smiles_path = None
self.runner.run()
self._model_saved_and_logs_exist()
def test_with_validation(self):
self.parameters.validation_smiles_path = SMILES_SET_LINK_INVENT_PATH
self.runner.run()
self._model_saved_and_logs_exist()
| 1.835938 | 2 |
datasets/datasets.py | pengpeg/PFAN_MX | 0 | 12799944 | # -*- coding: utf-8 -*-
# @Time : 2020/2/12 15:47
# @Author : Chen
# @File : datasets.py
# @Software: PyCharm
import os, warnings
from mxnet.gluon.data import dataset, sampler
from mxnet import image
import numpy as np
class IdxSampler(sampler.Sampler):
"""Samples elements from [0, length) randomly without replacement.
Parameters
----------
length : int
Length of the sequence.
"""
def __init__(self, indices_selected):
if isinstance(indices_selected, list):
indices_selected = np.array(indices_selected)
self._indices_selected = indices_selected
self._length = indices_selected.shape[0]
def __iter__(self):
indices = self._indices_selected
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self._length
class ImageFolderDataset(dataset.Dataset):
"""A dataset for loading image files stored in a folder structure.
like::
root/car/0001.jpg
root/car/xxxa.jpg
root/car/yyyb.jpg
root/bus/123.jpg
root/bus/023.jpg
root/bus/wwww.jpg
Parameters
----------
root : str
Path to root directory.
flag : {0, 1}, default 1
If 0, always convert loaded images to greyscale (1 channel).
If 1, always convert loaded images to colored (3 channels).
transform : callable, default None
A function that takes data and label and transforms them::
transform = lambda data, label: (data.astype(np.float32)/255, label)
Attributes
----------
synsets : list
List of class names. `synsets[i]` is the name for the integer label `i`
items : list of tuples
List of all images in (filename, label) pairs.
"""
def __init__(self, root, flag=1, transform=None, pseudo_labels=None):
self._root = os.path.expanduser(root)
self._flag = flag
self._transform = transform
self._exts = ['.jpg', '.jpeg', '.png']
self._list_images(self._root)
self._pseudo_labels = pseudo_labels
def _list_images(self, root):
self.synsets = []
self.items = []
for folder in sorted(os.listdir(root)):
path = os.path.join(root, folder)
if not os.path.isdir(path):
warnings.warn('Ignoring %s, which is not a directory.'%path, stacklevel=3)
continue
label = len(self.synsets)
self.synsets.append(folder)
for filename in sorted(os.listdir(path)):
filename = os.path.join(path, filename)
ext = os.path.splitext(filename)[1]
if ext.lower() not in self._exts:
warnings.warn('Ignoring %s of type %s. Only support %s'%(
filename, ext, ', '.join(self._exts)))
continue
self.items.append((filename, label))
def __getitem__(self, idx):
img = image.imread(self.items[idx][0], self._flag)
label = self.items[idx][1]
if self._transform is not None:
return self._transform(img, label)
if self._pseudo_labels is not None:
pseudo_label = self._pseudo_labels[idx]
return img, label, idx, pseudo_label
return img, label, idx
def __len__(self):
return len(self.items)
| 2.671875 | 3 |
dev/atlas/create_atlas/create_masks_csf_and_gm.py | valosekj/spinalcordtoolbox | 1 | 12799945 | <filename>dev/atlas/create_atlas/create_masks_csf_and_gm.py<gh_stars>1-10
#!/usr/bin/env python
# create masks of CSF and gray matter
# Author: <EMAIL>
# Created: 2014-12-06
# TODO: get GM
# TODO: add tract corresponding to the undefined values in WM atlas
import sys, io, os, glob
import numpy as np
import nibabel as nib
path_sct = os.environ.get("SCT_DIR", os.path.dirname(os.path.dirname(__file__)))
# append path that contains scripts, to be able to load modules
sys.path.append(os.path.join(path_sct, 'scripts'))
import sct_utils as sct
# parameters
tracts_to_sum_index = 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29
folder_atlas = os.path.join("WMtracts_outputs", "final_results")
file_csf = "WMtract__csf.nii.gz"
file_gm = "WMtract__gm.nii.gz"
file_label = 'info_label.txt'
def main():
# Extract the tracts from the atlas' folder
tracts = get_tracts(folder_atlas)
nb_tracts = len(tracts)
# Get the sum of the tracts
tracts_sum = add_tracts(tracts, tracts_to_sum_index)
# Save sum of the tracts to niftii
save_3D_nparray_nifti(tracts_sum, 'tmp.WM_all.nii.gz', os.path.join(folder_atlas, "WMtract__00.nii.gz"))
# binarize it
sct.run('fslmaths tmp.WM_all.nii.gz -thr 0.5 -bin tmp.WM_all_bin.nii.gz')
# dilate it
sct.run('fslmaths tmp.WM_all_bin.nii.gz -kernel boxv 5x5x1 -dilM tmp.WM_all_bin_dil.nii.gz')
# subtract WM mask to obtain CSF mask
sct.run('fslmaths tmp.WM_all_bin_dil -sub tmp.WM_all '+os.path.join(os.path.join(folder_atlas, file_csf)))
# add line in info_label.txt
text_label = '\n'+str(nb_tracts)+', CSF, '+file_csf
io.open(os.path.join(folder_atlas, file_label) 'a+b').write(text_label)
def get_tracts(tracts_folder):
"""Loads tracts in an atlas folder and converts them from .nii.gz format to numpy ndarray
Save path of each tracts
Only the tract must be in tracts_format in the folder"""
fname_tract = glob.glob(os.path.join(tracts_folder, "*.nii.gz"))
#Initialise tracts variable as object because there are 4 dimensions
tracts = np.empty([len(fname_tract), 1], dtype=object)
#Load each partial volumes of each tracts
for label in range(0, len(fname_tract)):
tracts[label, 0] = nib.load(fname_tract[label]).get_data()
#Reshape tracts if it is the 2D image instead of 3D
for label in range(0, len(fname_tract)):
if (tracts[label,0]).ndim == 2:
tracts[label,0] = tracts[label,0].reshape(int(np.size(tracts[label,0],0)), int(np.size(tracts[label,0],1)),1)
return tracts
def save_3D_nparray_nifti(np_matrix_3d, output_image, fname_atlas):
# Save 3d numpy matrix to niftii image
# np_matrix_3d is a 3D numpy ndarray
# output_image is the name of the niftii image created, ex: '3D_matrix.nii.gz'
img = nib.Nifti1Image(np_matrix_3d, np.eye(4))
affine = img.get_affine()
np_matrix_3d_nii = nib.Nifti1Image(np_matrix_3d,affine)
nib.save(np_matrix_3d_nii, output_image)
# copy geometric information
sct.run('fslcpgeom '+fname_atlas+' '+output_image, verbose=0)
def add_tracts(tracts, tracts_to_sum_index):
tracts_sum = np.empty((tracts[0, 0]).shape)
for i in tracts_to_sum_index:
tracts_sum = tracts_sum + tracts[i, 0]
return tracts_sum
if __name__ == "__main__":
main()
| 2.265625 | 2 |
train.py | rodolfo-mendes/diabetes-prediction-api | 0 | 12799946 | '''
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <https://unlicense.org>
'''
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from joblib import dump
def train_model():
data = pd.read_csv('data/diabetes.csv')
X_train = data.drop(columns='Outcome')
y_train = data['Outcome']
rfc = RandomForestClassifier()
rfc.fit(X_train, y_train)
dump(rfc, 'model/diabetes_predictor.joblib')
def main():
train_model()
if __name__ == '__main__':
main()
| 1.585938 | 2 |
2017/day_03/day_03.py | viddrobnic/adventofcode | 0 | 12799947 | <gh_stars>0
from collections import defaultdict
number = int(input())
values = defaultdict(int)
values[(0, 0)] = 1
x, y = (0, 0)
direction = 0
directions = [(1, 0), (0, 1), (-1, 0), (0, -1), (1, 1), (-1, -1), (1, -1), (-1, 1)]
data = 1
second_result = 0
length = 1
step = 0
rotations = 0
first, second = False, False
while not (first and second):
# Add a step
d = directions[direction]
x += d[0]
y += d[1]
step += 1
# If number of steps equals length of the current trajectory, then rotate
if step >= length:
direction = (direction + 1) % 4
step = 0
rotations += 1
# Every two rotations length of the side is 1 longer
if rotations == 2:
length += 1
rotations = 0
if not first:
data += 1
if data == number:
first = True
# Sum of all adjecant squares
adjecant_sum = 0
for d in directions:
x_1 = x + d[0]
y_1 = y + d[1]
adjecant_sum += values[(x_1, y_1)]
values[(x, y)] = adjecant_sum
if not second and adjecant_sum > number:
second_result = adjecant_sum
second = True
print('Part One: {}\nPart Two: {}'.format(abs(x) + abs(y), second_result))
| 3.25 | 3 |
script.py | Lets7512/Saved_WiFi_Passwords_Scraper_On_Windows | 0 | 12799948 | <reponame>Lets7512/Saved_WiFi_Passwords_Scraper_On_Windows
import os,platform,subprocess
from sys import exit
def main():
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
txt=subprocess.check_output('netsh wlan show profiles', shell=False, startupinfo=startupinfo).decode()
txt=txt.splitlines()
users=[]
for i in txt[9:]:
try:
users.append(i[27:])
except:
pass
with open("keys.txt",'a') as w:
w.write("-------------------------\n")
for e in users:
if e in [""," "]:
continue
e=("\""+e+"\"")
pro=subprocess.check_output("netsh wlan show profile {} key=clear".format(e), shell=False, startupinfo=startupinfo).decode('windows-1252')
pro=pro.splitlines()
passwrd=""
for i in pro:
if ' Key Content : ' in i:
passwrd=i
break
passwrd=passwrd[29:]
to_w=e+' : '+passwrd+"\n"
w.write(to_w)
if __name__ == "__main__":
main()
exit() | 2.609375 | 3 |
setup.py | zain/pyr | 1 | 12799949 | #!/usr/bin/env python
from setuptools import setup
setup(
name='pyr',
version='0.4.1',
description='A nicer REPL for Python.',
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/zain/pyr',
packages=['pyr'],
install_requires=['pygments'],
scripts=['bin/pyr'],
)
| 0.964844 | 1 |
charis/primitives/locate_psflets.py | thaynecurrie/charis-dep | 0 | 12799950 | #!/usr/bin/env python
import copy
import glob
import logging
import os
import re
import numpy as np
from astropy.io import fits
from scipy import interpolate, ndimage, optimize, signal
try:
from charis.image import Image
except:
from image import Image
log = logging.getLogger('main')
class PSFLets:
"""
Helper class to deal with the PSFLets on the detector. Does most of the heavy lifting
during the wavelength calibration step.
"""
def __init__(self, load=False, infile=None, infiledir='.'):
'''
Initialize the class
Parameters
----------
load: Boolean
Whether to load an already-existing wavelength calibration file
infile: String
If load is True, this is the name of the file
infiledir: String
If load is True, this is the directory in which the file resides
'''
self.xindx = None
self.yindx = None
self.lam_indx = None
self.nlam = None
self.nlam_max = None
self.interp_arr = None
self.order = None
if load:
self.loadpixsol(infile, infiledir)
def loadpixsol(self, infile=None, infiledir='./calibrations'):
'''
Loads existing wavelength calibration file
Parameters
----------
infile: String
Name of the file
infiledir: String
Directory in which the file resides
'''
if infile is None:
infile = re.sub('//', '/', infiledir + '/PSFloc.fits')
hdulist = fits.open(infile)
try:
self.xindx = hdulist[0].data
self.yindx = hdulist[1].data
self.lam_indx = hdulist[2].data
self.nlam = hdulist[3].data.astype(int)
except:
raise RuntimeError("File " + infile +
" does not appear to contain a CHARIS wavelength solution in the appropriate format.")
self.nlam_max = np.amax(self.nlam)
def savepixsol(self, outdir="calibrations/"):
'''
Saves wavelength calibration file
Parameters
----------
outdir: String
Directory in which to put the file. The file is name PSFloc.fits and is a
multi-extension FITS file, each extension corresponding to:
0. the list of wavelengths at which the calibration is done
1. a 2D ndarray with the X position of all lenslets
2. a 2D ndarray with the Y position of all lenslets
3. a 2D ndarray with the number of valid wavelengths for a given lenslet (some wavelengths fall outside of the detector area)
'''
if not os.path.isdir(outdir):
raise IOError("Attempting to save pixel solution to directory " + outdir + ". Directory does not exist.")
outfile = re.sub('//', '/', outdir + '/PSFloc.fits')
out = fits.HDUList(fits.PrimaryHDU(self.xindx))
out.append(fits.PrimaryHDU(self.yindx))
out.append(fits.PrimaryHDU(self.lam_indx))
out.append(fits.PrimaryHDU(self.nlam.astype(int)))
try:
out.writeto(outfile, overwrite=True)
except:
raise
def geninterparray(self, lam, allcoef, order=3):
'''
Set up array to solve for best-fit polynomial fits to the
coefficients of the wavelength solution. These will be used
to smooth/interpolate the wavelength solution, and
ultimately to compute its inverse.
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
order: int
Order of polynomial wavelength solution
Notes
-----
Populates the attribute interp_arr in PSFLet class
'''
self.interp_arr = np.zeros((order + 1, allcoef.shape[1]))
self.order = order
xarr = np.ones((lam.shape[0], order + 1))
for i in range(1, order + 1):
xarr[:, i] = np.log(lam)**i
for i in range(self.interp_arr.shape[1]):
coef = np.linalg.lstsq(xarr, allcoef[:, i])[0]
self.interp_arr[:, i] = coef
def return_locations_short(self, coef, xindx, yindx):
'''
Returns the x,y detector location of a given lenslet for a given polynomial fit
Parameters
----------
coef: lists floats
Polynomial coefficients of fit for a single wavelength
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
coeforder = int(np.sqrt(coef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def return_res(self, lam, allcoef, xindx, yindx,
order=3, lam1=None, lam2=None):
'''
Returns the spectral resolution and interpolated wavelength array
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of lists floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
lam1: float
Shortest wavelength in nm
lam2: float
Longest wavelength in nm
Returns
-------
interp_lam: array
Array of wavelengths
R: float
Effective spectral resolution
'''
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
n_spline = 100
interp_lam = np.linspace(lam1, lam2, n_spline)
dy = []
dx = []
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(1, interporder + 1):
coef += k * self.interp_arr[k] * np.log(interp_lam[i])**(k - 1)
_dx, _dy = _transform(xindx, yindx, coeforder, coef)
dx += [_dx]
dy += [_dy]
R = np.sqrt(np.asarray(dy)**2 + np.asarray(dx)**2)
return interp_lam, R
def monochrome_coef(self, lam, alllam=None, allcoef=None, order=3):
if self.interp_arr is None:
if alllam is None or allcoef is None:
raise ValueError("Interpolation array has not been computed. Must call monochrome_coef with arrays.")
self.geninterparray(alllam, allcoef, order=order)
coef = np.zeros(self.interp_arr[0].shape)
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
return coef
def return_locations(self, lam, allcoef, xindx, yindx, order=3):
'''
Calculates the detector coordinates of lenslet located at `xindx`, `yindx`
for desired wavelength `lam`
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
Polynomial coefficients of wavelength solution
xindx: int
X index of lenslet in lenslet array
yindx: int
Y index of lenslet in lenslet array
order: int
Order of polynomial wavelength solution
Returns
-------
interp_x: float
X coordinate on the detector
interp_y: float
Y coordinate on the detector
'''
if len(allcoef.shape) == 1:
coeforder = int(np.sqrt(allcoef.shape[0])) - 1
interp_x, interp_y = _transform(xindx, yindx, coeforder, allcoef)
return interp_x, interp_y
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(self.order + 1):
coef += self.interp_arr[k] * np.log(lam)**k
interp_x, interp_y = _transform(xindx, yindx, coeforder, coef)
return interp_x, interp_y
def genpixsol(self, lam, allcoef, order=3, lam1=None, lam2=None):
"""
Calculates the wavelength at the center of each pixel within a microspectrum
Parameters
----------
lam: float
Wavelength in nm
allcoef: list of floats
List describing the polynomial coefficients that best fit the lenslets,
for all wavelengths
order: int
Order of the polynomical fit
lam1: float
Lowest wavelength in nm
lam2: float
Highest wavelength in nm
Notes
-----
This functions fills in most of the fields of the PSFLet class: the array
of xindx, yindx, nlam, lam_indx and nlam_max
"""
###################################################################
# Read in wavelengths of spots, coefficients of wavelength
# solution. Obtain extrapolated limits of wavlength solution
# to 4% below and 3% above limits of the coefficient file by
# default.
###################################################################
if lam1 is None:
lam1 = np.amin(lam) / 1.04
if lam2 is None:
lam2 = np.amax(lam) * 1.03
interporder = order
if self.interp_arr is None:
self.geninterparray(lam, allcoef, order=order)
coeforder = int(np.sqrt(allcoef.shape[1])) - 1
if not (coeforder + 1) * (coeforder + 2) == allcoef.shape[1]:
raise ValueError("Number of coefficients incorrect for polynomial order.")
xindx = np.arange(-100, 101)
xindx, yindx = np.meshgrid(xindx, xindx)
n_spline = 100
interp_x = np.zeros(tuple([n_spline] + list(xindx.shape)))
interp_y = np.zeros(interp_x.shape)
interp_lam = np.linspace(lam1, lam2, n_spline)
for i in range(n_spline):
coef = np.zeros((coeforder + 1) * (coeforder + 2))
for k in range(interporder + 1):
coef += self.interp_arr[k] * np.log(interp_lam[i])**k
interp_x[i], interp_y[i] = _transform(xindx, yindx, coeforder, coef)
x = np.zeros(tuple(list(xindx.shape) + [1000]))
y = np.zeros(x.shape)
nlam = np.zeros(xindx.shape, np.int)
lam_out = np.zeros(y.shape)
good = np.zeros(xindx.shape)
for ix in range(xindx.shape[0]):
for iy in range(xindx.shape[1]):
pix_x = interp_x[:, ix, iy]
pix_y = interp_y[:, ix, iy]
if np.all(pix_x < 0) or np.all(pix_x > 2048) or np.all(pix_y < 0) or np.all(pix_y > 2048):
continue
if pix_y[-1] < pix_y[0]:
try:
tck_y = interpolate.splrep(pix_y[::-1], interp_lam[::-1], k=1, s=0)
except:
print(pix_x, pix_y)
raise
else:
tck_y = interpolate.splrep(pix_y, interp_lam, k=1, s=0)
y1, y2 = [int(np.amin(pix_y)) + 1, int(np.amax(pix_y))]
tck_x = interpolate.splrep(interp_lam, pix_x, k=1, s=0)
nlam[ix, iy] = y2 - y1 + 1
y[ix, iy, :nlam[ix, iy]] = np.arange(y1, y2 + 1)
lam_out[ix, iy, :nlam[ix, iy]] = interpolate.splev(y[ix, iy, :nlam[ix, iy]], tck_y)
x[ix, iy, :nlam[ix, iy]] = interpolate.splev(lam_out[ix, iy, :nlam[ix, iy]], tck_x)
for nlam_max in range(x.shape[-1]):
if np.all(y[:, :, nlam_max] == 0):
break
self.xindx = x[:, :, :nlam_max]
self.yindx = y[:, :, :nlam_max]
self.nlam = nlam
self.lam_indx = lam_out[:, :, :nlam_max]
self.nlam_max = np.amax(nlam)
def _initcoef(order, scale=15.02, phi=np.arctan2(1.926, -1), x0=0, y0=0):
"""
Private function _initcoef in locate_psflets
Create a set of coefficients including a rotation matrix plus zeros.
Parameters
----------
order: int
The polynomial order of the grid distortion
scale: float
The linear separation in pixels of the PSFlets. Default 15.02.
phi: float
The pitch angle of the lenslets. Default atan(1.926)
x0: float
x offset to apply to the central pixel. Default 0
y0: float
y offset to apply to the central pixel. Default 0
Returns
-------
coef: list of floats
A list of length (order+1)*(order+2) to be optimized.
Notes
-----
The list of coefficients has space for a polynomial fit of the
input order (i.e., for order 3, up to terms like x**3 and x**2*y,
but not x**3*y). It is all zeros in the output apart from the
rotation matrix given by scale and phi.
"""
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
n = (order + 1) * (order + 2)
coef = np.zeros((n))
coef[0] = x0
coef[1] = scale * np.cos(phi)
coef[order + 1] = -scale * np.sin(phi)
coef[n / 2] = y0
coef[n / 2 + 1] = scale * np.sin(phi)
coef[n / 2 + order + 1] = scale * np.cos(phi)
return list(coef)
def _pullorder(coef, order=1):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
coef_short = []
i = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= order:
coef_short += [coef[i]]
i += 1
return coef_short
def _insertorder(coefshort, coef):
coeforder = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
shortorder = int(np.sqrt(len(coefshort) + 0.25) - 1.5 + 1e-12)
i = 0
j = 0
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
for ix in range(coeforder + 1):
for iy in range(coeforder - ix + 1):
if ix + iy <= shortorder:
coef[i] = coefshort[j]
j += 1
i += 1
return coef
def _transform(x, y, order, coef, highordercoef=None):
"""
Private function _transform in locate_psflets
Apply the coefficients given to transform the coordinates using
a polynomial.
Parameters
----------
x: ndarray
Rectilinear grid
y: ndarray of floats
Rectilinear grid
order: int
Order of the polynomial fit
coef: list of floats
List of the coefficients. Must match the length required by
order = (order+1)*(order+2)
highordercoef: Boolean
Returns
-------
_x: ndarray
Transformed coordinates
_y: ndarray
Transformed coordinates
"""
try:
if not len(coef) == (order + 1) * (order + 2):
pass # raise ValueError("Number of coefficients incorrect for polynomial order.")
except:
raise AttributeError("order must be integer, coef should be a list.")
try:
if not order == int(order):
raise ValueError("Polynomial order must be integer")
else:
if order < 1 or order > 5:
raise ValueError("Polynomial order must be >0, <=5")
except:
raise ValueError("Polynomial order must be integer")
# n**2 + 3*n + 2 = (n + 1.5)**2 - 0.25
# = (1/4)*((2*n + 3)**2 - 1) = len(coef)
order1 = int(np.sqrt(len(coef) + 0.25) - 1.5 + 1e-12)
_x = np.zeros(np.asarray(x).shape)
_y = np.zeros(np.asarray(y).shape)
i = 0
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order1 + 1):
for iy in range(order1 - ix + 1):
_y += coef[i] * x**ix * y**iy
i += 1
if highordercoef is None:
return [_x, _y]
else:
order2 = int(np.sqrt(len(highordercoef) + 0.25) - 1.5 + 1e-12)
i = 0
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_x += coef[i] * x**ix * y**iy
i += 1
for ix in range(order2 + 1):
for iy in range(order1 - ix + 1):
if ix + iy <= order1:
continue
_y += coef[i] * x**ix * y**iy
i += 1
return [_x, _y]
def _corrval(coef, x, y, filtered, order, trimfrac=0.1, highordercoef=None):
"""
Private function _corrval in locate_psflets
Return the negative of the sum of the middle XX% of the PSFlet
spot fluxes (disregarding those with the most and the least flux
to limit the impact of outliers). Analogous to the trimmed mean.
Parameters
----------
coef: list of floats
coefficients for polynomial transformation
x: ndarray
coordinates of lenslets
y: ndarray
coordinates of lenslets
filtered: ndarray
image convolved with gaussian PSFlet
order: int
order of the polynomial fit
trimfrac: float
fraction of outliers (high & low combined) to trim
Default 0.1 (5% trimmed on the high end, 5% on the low end)
highordercoef: boolean
Returns
-------
score: float
Negative sum of PSFlet fluxes, to be minimized
"""
#################################################################
# Use np.nan for lenslet coordinates outside the CHARIS FOV,
# discard these from the calculation before trimming.
#################################################################
_x, _y = _transform(x, y, order, coef, highordercoef)
vals = ndimage.map_coordinates(filtered, [_y, _x], mode='constant',
cval=np.nan, prefilter=False)
vals_ok = vals[np.where(np.isfinite(vals))]
iclip = int(vals_ok.shape[0] * trimfrac / 2)
vals_sorted = np.sort(vals_ok)
score = -1 * np.sum(vals_sorted[iclip:-iclip])
return score
def locatePSFlets(inImage, polyorder=2, sig=0.7, coef=None, trimfrac=0.1,
phi=np.arctan2(1.926, -1), scale=15.02, fitorder=None):
"""
function locatePSFlets takes an Image class, assumed to be a
monochromatic grid of spots with read noise and shot noise, and
returns the esimated positions of the spot centroids. This is
designed to constrain the domain of the PSF-let fitting later in
the pipeline.
Parameters
----------
imImage: Image class
Assumed to be a monochromatic grid of spots
polyorder: float
order of the polynomial coordinate transformation. Default 2.
sig: float
standard deviation of convolving Gaussian used
for estimating the grid of centroids. Should be close
to the true value for the PSF-let spots. Default 0.7.
coef: list
initial guess of the coefficients of polynomial
coordinate transformation
trimfrac: float
fraction of lenslet outliers (high & low
combined) to trim in the minimization. Default 0.1
(5% trimmed on the high end, 5% on the low end)
Returns
-------
x: 2D ndarray
Estimated spot centroids in x.
y: 2D ndarray
Estimated spot centroids in y.
good:2D boolean ndarray
True for lenslets with spots inside the detector footprint
coef: list of floats
List of best-fit polynomial coefficients
Notes
-----
the coefficients, if not supplied, are initially set to the
known pitch angle and scale. A loop then does a quick check to find
reasonable offsets in x and y. With all of the first-order polynomial
coefficients set, the optimizer refines these and the higher-order
coefficients. This routine seems to be relatively robust down to
per-lenslet signal-to-noise ratios of order unity (or even a little
less).
Important note: as of now (09/2015), the number of lenslets to grid
is hard-coded as 1/10 the dimensionality of the final array. This is
sufficient to cover the detector for the fiducial lenslet spacing.
"""
#############################################################
# Convolve with a Gaussian, centroid the filtered image.
#############################################################
x = np.arange(-1 * int(3 * sig + 1), int(3 * sig + 1) + 1)
x, y = np.meshgrid(x, x)
gaussian = np.exp(-(x**2 + y**2) / (2 * sig**2))
if inImage.ivar is None:
unfiltered = signal.convolve2d(inImage.data, gaussian, mode='same')
else:
unfiltered = signal.convolve2d(inImage.data * inImage.ivar, gaussian, mode='same')
unfiltered /= signal.convolve2d(inImage.ivar, gaussian, mode='same') + 1e-10
filtered = ndimage.interpolation.spline_filter(unfiltered)
#############################################################
# x, y: Grid of lenslet IDs, Lenslet (0, 0) is the center.
#############################################################
gridfrac = 20
ydim, xdim = inImage.data.shape
x = np.arange(-(ydim // gridfrac), ydim // gridfrac + 1)
x, y = np.meshgrid(x, x)
#############################################################
# Set up polynomial coefficients, convert from lenslet
# coordinates to coordinates on the detector array.
# Then optimize the coefficients.
# We want to start with a decent guess, so we use a grid of
# offsets. Seems to be robust down to SNR/PSFlet ~ 1
# Create slice indices for subimages to perform the intial
# fits on. The new dimensionality in both x and y is 2*subsize
#############################################################
if coef is None:
ix_arr = np.arange(0, 14, 0.5)
iy_arr = np.arange(0, 25, 0.5)
log.info("Initializing PSFlet location transformation coefficients")
init = True
else:
ix_arr = np.arange(-3.0, 3.05, 0.2)
iy_arr = np.arange(-3.0, 3.05, 0.2)
coef_save = list(coef[:])
log.info("Initializing transformation coefficients with previous values")
init = False
bestval = 0
subshape = xdim * 3 // 8
_s = x.shape[0] * 3 // 8
subfiltered = ndimage.interpolation.spline_filter(unfiltered[subshape:-subshape, subshape:-subshape])
for ix in ix_arr:
for iy in iy_arr:
if init:
coef = _initcoef(polyorder, x0=ix + xdim / 2. - subshape,
y0=iy + ydim / 2. - subshape, scale=scale, phi=phi)
else:
coef = copy.deepcopy(coef_save)
coef[0] += ix - subshape
coef[(polyorder + 1) * (polyorder + 2) / 2] += iy - subshape
newval = _corrval(coef, x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s],
subfiltered, polyorder, trimfrac)
if newval < bestval:
bestval = newval
coef_opt = copy.deepcopy(coef)
if init:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
res = optimize.minimize(_corrval, coef_opt, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac), method='Powell')
coef_opt = res.x
else:
log.info("Performing initial optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
coef_lin = _pullorder(coef_opt, 1)
res = optimize.minimize(_corrval, coef_lin, args=(
x[_s:-_s, _s:-_s], y[_s:-_s, _s:-_s], subfiltered, polyorder, trimfrac, coef_opt), method='Powell', options={'xtol': 1e-6, 'ftol': 1e-6})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
coef_opt[0] += subshape
coef_opt[(polyorder + 1) * (polyorder + 2) / 2] += subshape
#############################################################
# If we have coefficients from last time, we assume that we
# are now at a slightly higher wavelength, so try out offsets
# that are slightly to the right to get a good initial guess.
#############################################################
log.info("Performing final optimization of PSFlet location transformation coefficients for frame " + inImage.filename)
if not init and fitorder is not None:
coef_lin = _pullorder(coef_opt, fitorder)
res = optimize.minimize(_corrval, coef_lin, args=(x, y, filtered, polyorder, trimfrac,
coef_opt), method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_lin = res.x
coef_opt = _insertorder(coef_lin, coef_opt)
else:
res = optimize.minimize(_corrval, coef_opt, args=(x, y, filtered, polyorder, trimfrac),
method='Powell', options={'xtol': 1e-5, 'ftol': 1e-5})
coef_opt = res.x
if not res.success:
log.info("Optimizing PSFlet location transformation coefficients may have failed for frame " + inImage.filename)
_x, _y = _transform(x, y, polyorder, coef_opt)
#############################################################
# Boolean: do the lenslet PSFlets lie within the detector?
#############################################################
good = (_x > 5) * (_x < xdim - 5) * (_y > 5) * (_y < ydim - 5)
return [_x, _y, good, coef_opt]
| 2.203125 | 2 |