content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import os
from .login import *
from .action import *
from .box import *
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' | python |
"""Created by Alysha Kester-Terry 3/12/2021 for GoodRx
This file is to set up the driver for a specific site.
We want to make this scalable in case there could be multiple environments or web UI URLs we may want to hit.
"""
import logging
def get_app_url(App, environment='test'):
"""To define the search engine URL by type given"""
#TODO define your different environments and how you'd want them to switch
if 'test' in environment:
env = 'test'
else:
env = 'prod'
switcher = {
#TODO you can add the environment into the URLs as vars here if needed
App.google: 'https://google.com',
App.bing: 'https://bing.com',
App.yandex: 'http://yandex.com'
}
app_type = switcher.get(App, 'Invalid environment option, or not yet implemented')
env_url = app_type
logging.debug(msg='The environment url is: {}'.format(env_url))
return env_url
def navigate_to_search_engine(driver, app, environment='test'):
"""To navigate to the appropriate URL
:param app: Web app to hit
:param driver: The webdriver
:param environment: Test, UAT, Dev etc. Comes from run args
"""
url = get_app_url(app, environment)
driver.get(url)
link = driver.current_url
logging.debug(msg='The current url is: {}'.format(link)) | python |
# Copyright 2019 Contributors to Hyperledger Sawtooth
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
from uuid import uuid4
import hashlib
from sanic import Blueprint
from sanic.response import json
from rbac.common import rbac
from rbac.common.crypto.keys import Key
from rbac.common.crypto.secrets import encrypt_private_key
from rbac.server.api.errors import ApiNotImplemented
from rbac.server.api.auth import authorized
from rbac.server.api import utils
from rbac.server.api.proposals import compile_proposal_resource
from rbac.server.db import auth_query
from rbac.server.db import proposals_query
from rbac.server.db import roles_query
from rbac.server.db import users_query
from rbac.common.logs import get_logger
from rbac.common.crypto.secrets import generate_api_key
from rbac.server.db import db_utils
LOGGER = get_logger(__name__)
USERS_BP = Blueprint("users")
@USERS_BP.get("api/users")
@authorized()
async def fetch_all_users(request):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
LOGGER.info(head_block)
start, limit = utils.get_request_paging_info(request)
user_resources = await users_query.fetch_all_user_resources(
conn, head_block.get("num"), start, limit
)
conn.close()
return await utils.create_response(
conn, request.url, user_resources, head_block, start=start, limit=limit
)
@USERS_BP.post("api/users")
async def create_new_user(request):
required_fields = ["name", "username", "password", "email"]
utils.validate_fields(required_fields, request.json)
# Generate keys
txn_key = Key()
txn_user_id = rbac.user.unique_id()
encrypted_private_key = encrypt_private_key(
request.app.config.AES_KEY, txn_key.public_key, txn_key.private_key_bytes
)
# Build create user transaction
batch_list = rbac.user.batch_list(
signer_keypair=txn_key,
signer_user_id=txn_user_id,
user_id=txn_user_id,
name=request.json.get("name"),
username=request.json.get("username"),
email=request.json.get("email"),
metadata=request.json.get("metadata"),
manager=request.json.get("manager"),
key=txn_key.public_key,
)
# Submit transaction and wait for complete
await utils.send(
request.app.config.VAL_CONN, batch_list, request.app.config.TIMEOUT
)
# Save new user in auth table
hashed_password = hashlib.sha256(
request.json.get("password").encode("utf-8")
).hexdigest()
auth_entry = {
"user_id": txn_user_id,
"hashed_password": hashed_password,
"encrypted_private_key": encrypted_private_key,
"username": request.json.get("username"),
"email": request.json.get("email"),
}
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
await auth_query.create_auth_entry(conn, auth_entry)
conn.close()
# Send back success response
return create_user_response(request, txn_user_id)
@USERS_BP.get("api/users/<user_id>")
@authorized()
async def get_user(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
# this takes 4 seconds
user_resource = await users_query.fetch_user_resource(
conn, user_id, head_block.get("num")
)
conn.close()
return await utils.create_response(conn, request.url, user_resource, head_block)
@USERS_BP.get("api/user/<user_id>/summary")
@authorized()
async def get_user_summary(request, user_id):
"""This endpoint is for returning summary data for a user, just it's user_id,name, email."""
head_block = await utils.get_request_block(request)
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
user_resource = await users_query.fetch_user_resource_summary(
conn, user_id, head_block.get("num")
)
conn.close()
return await utils.create_response(conn, request.url, user_resource, head_block)
@USERS_BP.get("api/users/<user_id>/summary")
@authorized()
async def get_users_summary(request, user_id):
"""This endpoint is for returning summary data for a user, just it's user_id,name, email."""
head_block = await utils.get_request_block(request)
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
user_resource = await users_query.fetch_user_resource_summary(
conn, user_id, head_block.get("num")
)
conn.close()
return await utils.create_response(conn, request.url, user_resource, head_block)
@USERS_BP.get("api/users/<user_id>/relationships")
@authorized()
async def get_user_relationships(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
user_resource = await users_query.fetch_user_relationships(
conn, user_id, head_block.get("num")
)
conn.close()
return await utils.create_response(conn, request.url, user_resource, head_block)
@USERS_BP.patch("api/users/<user_id>")
@authorized()
async def update_user(request, user_id):
raise ApiNotImplemented()
@USERS_BP.put("api/users/<user_id>/manager")
@authorized()
async def update_manager(request, user_id):
required_fields = ["id"]
utils.validate_fields(required_fields, request.json)
txn_key, txn_user_id = await utils.get_transactor_key(request)
proposal_id = str(uuid4())
batch_list = rbac.user.manager.propose.batch_list(
signer_keypair=txn_key,
signer_user_id=txn_user_id,
proposal_id=proposal_id,
user_id=user_id,
new_manager_id=request.json.get("id"),
reason=request.json.get("reason"),
metadata=request.json.get("metadata"),
)
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
await utils.send(conn, batch_list, request.app.config.TIMEOUT)
conn.close()
return json({"proposal_id": proposal_id})
@USERS_BP.get("api/users/<user_id>/proposals/open")
@authorized()
async def fetch_open_proposals(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
start, limit = utils.get_request_paging_info(request)
proposals = await proposals_query.fetch_all_proposal_resources(
conn, head_block.get("num"), start, limit
)
proposal_resources = []
for proposal in proposals:
proposal_resource = await compile_proposal_resource(
conn, proposal, head_block.get("num")
)
proposal_resources.append(proposal_resource)
open_proposals = []
for proposal_resource in proposal_resources:
if (
proposal_resource["status"] == "OPEN"
and user_id in proposal_resource["approvers"]
):
open_proposals.append(proposal_resource)
conn.close()
return await utils.create_response(
conn, request.url, open_proposals, head_block, start=start, limit=limit
)
@USERS_BP.get("api/users/<user_id>/proposals/confirmed")
@authorized()
async def fetch_confirmed_proposals(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
start, limit = utils.get_request_paging_info(request)
proposals = await proposals_query.fetch_all_proposal_resources(
conn, head_block.get("num"), start, limit
)
proposal_resources = []
for proposal in proposals:
proposal_resource = await compile_proposal_resource(
conn, proposal, head_block.get("num")
)
proposal_resources.append(proposal_resource)
confirmed_proposals = []
for proposal_resource in proposal_resources:
if (
proposal_resource["status"] == "CONFIRMED"
and user_id in proposal_resource["approvers"]
):
confirmed_proposals.append(proposal_resource)
conn.close()
return await utils.create_response(
conn, request.url, confirmed_proposals, head_block, start=start, limit=limit
)
@USERS_BP.get("api/users/<user_id>/proposals/rejected")
@authorized()
async def fetch_rejected_proposals(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
start, limit = utils.get_request_paging_info(request)
proposals = await proposals_query.fetch_all_proposal_resources(
conn, head_block.get("num"), start, limit
)
proposal_resources = []
for proposal in proposals:
proposal_resource = await compile_proposal_resource(
conn, proposal, head_block.get("num")
)
proposal_resources.append(proposal_resource)
rejected_proposals = []
for proposal_resource in proposal_resources:
if (
proposal_resource["status"] == "REJECTED"
and user_id in proposal_resource["approvers"]
):
rejected_proposals.append(proposal_resource)
conn.close()
return await utils.create_response(
conn, request.url, rejected_proposals, head_block, start=start, limit=limit
)
@USERS_BP.patch("api/users/<user_id>/roles/expired")
@authorized()
async def update_expired_roles(request, user_id):
"""Manually expire user role membership"""
head_block = await utils.get_request_block(request)
required_fields = ["id"]
utils.validate_fields(required_fields, request.json)
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
await roles_query.expire_role_member(
conn, request.json.get("id"), user_id, head_block.get("num")
)
conn.close()
return json({"role_id": request.json.get("id")})
@USERS_BP.get("api/users/<user_id>/roles/recommended")
@authorized()
async def fetch_recommended_roles(request, user_id):
conn = await db_utils.create_connection(
request.app.config.DB_HOST,
request.app.config.DB_PORT,
request.app.config.DB_NAME,
)
head_block = await utils.get_request_block(request)
start, limit = utils.get_request_paging_info(request)
recommended_resources = await roles_query.fetch_all_role_resources(
conn, head_block.get("num"), 0, 10
)
conn.close()
return await utils.create_response(
conn, request.url, recommended_resources, head_block, start=start, limit=limit
)
def create_user_response(request, user_id):
token = generate_api_key(request.app.config.SECRET_KEY, user_id)
user_resource = {
"id": user_id,
"name": request.json.get("name"),
"username": request.json.get("username"),
"email": request.json.get("email"),
"ownerOf": [],
"administratorOf": [],
"memberOf": [],
"proposals": [],
}
if request.json.get("manager"):
user_resource["manager"] = request.json.get("manager")
if request.json.get("metadata"):
user_resource["metadata"] = request.json.get("metadata")
return utils.create_authorization_response(
token, {"message": "Authorization successful", "user": user_resource}
)
| python |
##############################################
# sudo apt-get install -y python3-picamera
# sudo -H pip3 install imutils --upgrade
##############################################
import multiprocessing as mp
import sys
from time import sleep
import argparse
import cv2
import numpy as np
import time
try:
from armv7l.openvino.inference_engine import IENetwork, IEPlugin
except:
from openvino.inference_engine import IENetwork, IEPlugin
import heapq
import threading
try:
from imutils.video.pivideostream import PiVideoStream
from imutils.video.filevideostream import FileVideoStream
import imutils
except:
pass
lastresults = None
threads = []
processes = []
frameBuffer = None
results = None
fps = ""
detectfps = ""
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
cam = None
vs = None
window_name = ""
elapsedtime = 0.0
g_plugin = None
g_inferred_request = None
g_heap_request = None
g_inferred_cnt = 0
g_number_of_allocated_ncs = 0
LABELS = ["neutral", "happy", "sad", "surprise", "anger"]
COLORS = np.random.uniform(0, 255, size=(len(LABELS), 3))
def camThread(LABELS, resultsEm, frameBuffer, camera_width, camera_height, vidfps, number_of_camera, mode_of_camera):
global fps
global detectfps
global lastresults
global framecount
global detectframecount
global time1
global time2
global cam
global vs
global window_name
if mode_of_camera == 0:
cam = cv2.VideoCapture(number_of_camera)
if cam.isOpened() != True:
print("USB Camera Open Error!!!")
sys.exit(0)
cam.set(cv2.CAP_PROP_FPS, vidfps)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, camera_width)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, camera_height)
window_name = "USB Camera"
else:
vs = PiVideoStream((camera_width, camera_height), vidfps).start()
sleep(3)
window_name = "PiCamera"
cv2.namedWindow(window_name, cv2.WINDOW_AUTOSIZE)
while True:
t1 = time.perf_counter()
# USB Camera Stream or PiCamera Stream Read
color_image = None
if mode_of_camera == 0:
s, color_image = cam.read()
if not s:
continue
else:
color_image = vs.read()
if frameBuffer.full():
frameBuffer.get()
frames = color_image
height = color_image.shape[0]
width = color_image.shape[1]
frameBuffer.put(color_image.copy())
res = None
if not resultsEm.empty():
res = resultsEm.get(False)
# print("[LOG] ".format(type(res)))
# print(res)
detectframecount += 1
imdraw = overlay_on_image(frames, res)
lastresults = res
else:
imdraw = overlay_on_image(frames, lastresults)
cv2.imshow(window_name, cv2.resize(imdraw, (width, height)))
if cv2.waitKey(1) & 0xFF == ord('q'):
sys.exit(0)
## Print FPS
framecount += 1
if framecount >= 25:
fps = "(Playback) {:.1f} FPS".format(time1 / 25)
detectfps = "(Detection) {:.1f} FPS".format(detectframecount / time2)
framecount = 0
detectframecount = 0
time1 = 0
time2 = 0
t2 = time.perf_counter()
elapsedTime = t2 - t1
time1 += 1 / elapsedTime
time2 += elapsedTime
# l = Search list
# x = Search target value
def searchlist(l, x, notfoundvalue=-1):
if x in l:
return l.index(x)
else:
return notfoundvalue
def async_infer(ncsworkerFd, ncsworkerEm):
while True:
ncsworkerFd.predict_async()
ncsworkerEm.predict_async()
class BaseNcsWorker():
def __init__(self, devid, model_path, number_of_ncs):
global g_plugin
global g_inferred_request
global g_heap_request
global g_inferred_cnt
global g_number_of_allocated_ncs
self.devid = devid
if number_of_ncs == 0:
self.num_requests = 4
elif number_of_ncs == 1:
self.num_requests = 4
elif number_of_ncs == 2:
self.num_requests = 2
elif number_of_ncs >= 3:
self.num_requests = 1
print("g_number_of_allocated_ncs =", g_number_of_allocated_ncs, "number_of_ncs =", number_of_ncs)
if g_number_of_allocated_ncs < 1:
self.plugin = IEPlugin(device="MYRIAD")
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
g_plugin = self.plugin
g_inferred_request = self.inferred_request
g_heap_request = self.heap_request
g_inferred_cnt = self.inferred_cnt
g_number_of_allocated_ncs += 1
else:
self.plugin = g_plugin
self.inferred_request = g_inferred_request
self.heap_request = g_heap_request
self.inferred_cnt = g_inferred_cnt
self.model_xml = model_path + ".xml"
self.model_bin = model_path + ".bin"
self.net = IENetwork(model=self.model_xml, weights=self.model_bin)
self.input_blob = next(iter(self.net.inputs))
self.exec_net = self.plugin.load(network=self.net, num_requests=self.num_requests)
class NcsWorkerFd(BaseNcsWorker):
def __init__(self, devid, frameBuffer, resultsFd, model_path, number_of_ncs):
super().__init__(devid, model_path, number_of_ncs)
self.frameBuffer = frameBuffer
self.resultsFd = resultsFd
def image_preprocessing(self, color_image):
prepimg = cv2.resize(color_image, (300, 300))
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def predict_async(self):
try:
if self.frameBuffer.empty():
return
color_image = self.frameBuffer.get()
prepimg = self.image_preprocessing(color_image)
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1:
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
self.exec_net.requests[reqnum].wait(-1)
out = self.exec_net.requests[reqnum].outputs["detection_out"].flatten()
detection_list = []
face_image_list = []
for detection in out.reshape(-1, 7):
confidence = float(detection[2])
if confidence > 0.3:
detection[3] = int(detection[3] * color_image.shape[1])
detection[4] = int(detection[4] * color_image.shape[0])
detection[5] = int(detection[5] * color_image.shape[1])
detection[6] = int(detection[6] * color_image.shape[0])
if (detection[6] - detection[4]) > 0 and (detection[5] - detection[3]) > 0:
detection_list.extend(detection)
face_image_list.extend([color_image[int(detection[4]):int(detection[6]),
int(detection[3]):int(detection[5]), :]])
if len(detection_list) > 0:
self.resultsFd.put([detection_list, face_image_list])
self.inferred_request[reqnum] = 0
except:
import traceback
traceback.print_exc()
class NcsWorkerEm(BaseNcsWorker):
def __init__(self, devid, resultsFd, resultsEm, model_path, number_of_ncs):
super().__init__(devid, model_path, number_of_ncs)
self.resultsFd = resultsFd
self.resultsEm = resultsEm
def image_preprocessing(self, color_image):
try:
prepimg = cv2.resize(color_image, (64, 64))
except:
prepimg = np.full((64, 64, 3), 128)
prepimg = prepimg[np.newaxis, :, :, :] # Batch size axis add
prepimg = prepimg.transpose((0, 3, 1, 2)) # NHWC to NCHW
return prepimg
def predict_async(self):
try:
if self.resultsFd.empty():
return
resultFd = self.resultsFd.get()
detection_list = resultFd[0]
face_image_list = resultFd[1]
emotion_list = []
max_face_image_list_cnt = len(face_image_list)
image_idx = 0
end_cnt_processing = 0
heapflg = False
cnt = 0
dev = 0
if max_face_image_list_cnt <= 0:
detection_list.extend([""])
self.resultsEm.put([detection_list])
return
while True:
reqnum = searchlist(self.inferred_request, 0)
if reqnum > -1 and image_idx <= (max_face_image_list_cnt - 1) and len(face_image_list[image_idx]) > 0:
if len(face_image_list[image_idx]) == []:
image_idx += 1
continue
else:
prepimg = self.image_preprocessing(face_image_list[image_idx])
image_idx += 1
self.exec_net.start_async(request_id=reqnum, inputs={self.input_blob: prepimg})
self.inferred_request[reqnum] = 1
self.inferred_cnt += 1
if self.inferred_cnt == sys.maxsize:
self.inferred_request = [0] * self.num_requests
self.heap_request = []
self.inferred_cnt = 0
heapq.heappush(self.heap_request, (self.inferred_cnt, reqnum))
heapflg = True
if heapflg:
cnt, dev = heapq.heappop(self.heap_request)
heapflg = False
if self.exec_net.requests[dev].wait(0) == 0:
self.exec_net.requests[dev].wait(-1)
out = self.exec_net.requests[dev].outputs["prob_emotion"].flatten()
emotion = LABELS[int(np.argmax(out))]
detection_list.extend([emotion])
self.resultsEm.put([detection_list])
self.inferred_request[dev] = 0
end_cnt_processing += 1
if end_cnt_processing >= max_face_image_list_cnt:
break
else:
heapq.heappush(self.heap_request, (cnt, dev))
heapflg = True
except:
import traceback
traceback.print_exc()
def inferencer(resultsFd, resultsEm, frameBuffer, number_of_ncs, fd_model_path, em_model_path):
# Init infer threads
threads = []
for devid in range(number_of_ncs):
# Face Detection, Emotion Recognition start
thworker = threading.Thread(target=async_infer,
args=(NcsWorkerFd(devid, frameBuffer, resultsFd, fd_model_path, number_of_ncs),
NcsWorkerEm(devid, resultsFd, resultsEm, em_model_path, 0),))
thworker.start()
threads.append(thworker)
print("Thread-" + str(devid))
for th in threads:
th.join()
def overlay_on_image(frames, object_infos):
try:
color_image = frames
if isinstance(object_infos, type(None)):
return color_image
# Show images
height = color_image.shape[0]
width = color_image.shape[1]
img_cp = color_image.copy()
for object_info in object_infos:
if object_info[2] == 0.0:
break
if (not np.isfinite(object_info[0]) or
not np.isfinite(object_info[1]) or
not np.isfinite(object_info[2]) or
not np.isfinite(object_info[3]) or
not np.isfinite(object_info[4]) or
not np.isfinite(object_info[5]) or
not np.isfinite(object_info[6])):
continue
min_score_percent = 60
percentage = int(object_info[2] * 100)
if (percentage <= min_score_percent):
continue
box_left = int(object_info[3])
box_top = int(object_info[4])
box_bottom = int(object_info[6])
emotion = str(object_info[7])
label_text = emotion + " (" + str(percentage) + "%)"
label_text_color = (255, 255, 255)
# info fps
cv2.putText(img_cp, fps, (width - 170, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38, 0, 255), 1, cv2.LINE_AA)
cv2.putText(img_cp, detectfps, (width - 170, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (38, 0, 255), 1,
cv2.LINE_AA)
# background of expression list
overlay = img_cp.copy()
opacity = 0.4
cv2.rectangle(img_cp, (box_left + box_bottom + 10 - 250, box_top - 25),
(box_left + box_bottom - 50, box_top + 25),
(64, 64, 64), cv2.FILLED)
cv2.addWeighted(overlay, opacity, img_cp, 1 - opacity, 0, img_cp)
# connect face and expressions
cv2.line(img_cp, (int((box_left + box_left + box_bottom - 250) / 2), box_top + 15),
(box_left + box_bottom - 250, box_top - 20),
(255, 255, 255), 1)
cv2.line(img_cp, (box_left + box_bottom - 250, box_top - 20),
(box_left + box_bottom + 10 - 250, box_top - 20),
(255, 255, 255), 1)
cv2.putText(img_cp, label_text, (int(box_left + box_bottom + 15 - 250), int(box_top - 12 + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, label_text_color,
1)
return img_cp
except:
import traceback
traceback.print_exc()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-cm', '--modeofcamera', dest='mode_of_camera', type=int, default=0,
help='Camera Mode. 0:=USB Camera, 1:=PiCamera (Default=0)')
parser.add_argument('-cn', '--numberofcamera', dest='number_of_camera', type=int, default=0,
help='USB camera number. (Default=0)')
parser.add_argument('-wd', '--width', dest='camera_width', type=int, default=640,
help='Width of the frames in the video stream. (Default=640)')
parser.add_argument('-ht', '--height', dest='camera_height', type=int, default=480,
help='Height of the frames in the video stream. (Default=480)')
parser.add_argument('-numncs', '--numberofncs', dest='number_of_ncs', type=int, default=1,
help='Number of NCS. (Default=1)')
parser.add_argument('-vidfps', '--fpsofvideo', dest='fps_of_video', type=int, default=30,
help='FPS of Video. (Default=30)')
parser.add_argument('-fdmp', '--facedetectionmodelpath', dest='fd_model_path',
default='./model/face-detection-retail-0004',
help='Face Detection model path. (xml and bin. Except extension.)')
parser.add_argument('-emmp', '--emotionrecognitionmodelpath', dest='em_model_path',
default='./model/emotions-recognition-retail-0003',
help='Emotion Recognition model path. (xml and bin. Except extension.)')
args = parser.parse_args()
mode_of_camera = args.mode_of_camera
number_of_camera = args.number_of_camera
camera_width = args.camera_width
camera_height = args.camera_height
number_of_ncs = args.number_of_ncs
vidfps = args.fps_of_video
fd_model_path = args.fd_model_path
em_model_path = args.em_model_path
try:
mp.set_start_method('forkserver')
frameBuffer = mp.Queue(10)
resultsFd = mp.Queue() # Face Detection Queue
resultsEm = mp.Queue() # Emotion Recognition Queue
# Start streaming
p = mp.Process(target=camThread,
args=(LABELS, resultsEm, frameBuffer, camera_width, camera_height, vidfps, number_of_camera,
mode_of_camera),
daemon=True)
p.start()
processes.append(p)
# Start detection MultiStick
# Activation of inferencer
p = mp.Process(target=inferencer,
args=(resultsFd, resultsEm, frameBuffer, number_of_ncs, fd_model_path, em_model_path),
daemon=True)
p.start()
processes.append(p)
while True:
sleep(1)
except:
import traceback
traceback.print_exc()
finally:
for p in range(len(processes)):
processes[p].terminate()
print("\n\nFinished\n\n")
| python |
from argparse import ArgumentParser
from os import rename, walk
from os.path import join, splitext
PLACEHOLDER_VARIABLE = 'base-angular-app'
PLACEHOLDER_TITLE = 'Base Angular App'
PLACEHOLDER_OWNER = 'BaseAngularAppAuthors'
EXCLUDED_DIRECTORIES = ['.git', '.idea', 'node_modules']
EXCLUDED_FILES = ['replacer.py']
EXCLUDED_EXTENSIONS = ['.pyc']
def replace(file_path, site_variable, site_title, owner):
modified = False
with open(file_path, 'rb') as file_handle:
contents = file_handle.read()
if bytearray(PLACEHOLDER_VARIABLE, 'utf-8') in contents:
contents = contents.replace(bytearray(PLACEHOLDER_VARIABLE, 'utf-8'), bytearray(site_variable, 'utf-8'))
modified = True
if bytearray(PLACEHOLDER_OWNER, 'utf-8') in contents:
contents = contents.replace(bytearray(PLACEHOLDER_OWNER, 'utf-8'), bytearray(owner, 'utf-8'))
modified = True
if bytearray(PLACEHOLDER_TITLE, 'utf-8') in contents:
contents = contents.replace(bytearray(PLACEHOLDER_TITLE, 'utf-8'), bytearray(site_title, 'utf-8'))
modified = True
if modified:
with open(file_path, 'wb') as file_handle:
file_handle.write(contents)
print('Updated {0}'.format(file_path))
else:
print('No changes to {0}'.format(file_path))
def replace_in_files(site_variable, site_title, owner):
for root, dirs, files in walk('.'):
# First, make sure we don't touch anything in excluded directories
for excluded in EXCLUDED_DIRECTORIES:
if excluded in dirs:
dirs.remove(excluded)
print('Skipping {0}'.format(join(root, excluded)))
for name in files:
# Make sure we don't want to skip this file because of its name or extension
if name in EXCLUDED_FILES:
print('Skipping {0}'.format(join(root, name)))
continue
if splitext(name)[1] in EXCLUDED_EXTENSIONS:
print('Skipping {0}'.format(join(root, name)))
continue
full_path = join(root, name)
# Find and replace anything in the contents of the file
replace(full_path, site_variable, site_title, owner)
if __name__ == "__main__":
print('Enter the name of the site in a form suitable for a variable. This should consist of only lowercase characters and dashes (e.g., my-angular-app)')
site_variable = input('Site Variable: ')
print('\nEnter the name of the site in your preferred human-readable form. This can contain mixed case, spaces, symbols, etc. (e.g., My Angular App)')
site_title = input('Site Title: ')
print('\nEnter the name of the owner of this site. This name will appear in the copyright information for this site')
owner = input('Owner: ')
replace_in_files(site_variable, site_title, owner)
| python |
import os
import datetime
import json
from officy import JsonFile, Dir, File, Stime
from rumpy import RumClient
father_dir = os.path.dirname(os.path.dirname(__file__))
seedsfile = os.path.join(father_dir, "data", "seeds.json")
infofile = os.path.join(father_dir, "data", "groupsinfo.json")
FLAG_JOINGROUPS = True
PORT = 58356
if FLAG_JOINGROUPS:
bot = RumClient(port=PORT)
def search_groups(blocks_num=50, last_update_days=-30):
groupsinfo = JsonFile(infofile).read()
last_update = f"{Stime.days_later(datetime.date.today(),last_update_days)}"
gids = []
for group_id in groupsinfo:
if groupsinfo[group_id]["highest_height"] >= blocks_num:
if groupsinfo[group_id]["last_update"] >= last_update:
gids.append(group_id)
return gids
def _check_name(name):
names = ["测试", "test", "mytest", "去中心"]
for i in names:
if i in name:
return False
return True
def init_mdfile(gids):
seeds = JsonFile(seedsfile).read()
groupsinfo = JsonFile(infofile).read()
lines = []
for gid in gids:
seed = seeds.get(gid)
if not seed:
continue
name = seed["group_name"]
if not _check_name(name):
continue
if groupsinfo[gid]["abandoned"]:
continue
# join the groups
if FLAG_JOINGROUPS:
bot.group.join(seed)
lines.extend(
[
f'### {seed["group_name"]}\n\n',
f'{seed["app_key"]} | 区块高度: {groupsinfo[gid]["highest_height"]}\n\n',
f'{Stime.ts2datetime(seed["genesis_block"]["TimeStamp"]).date()} 创建 | {groupsinfo[gid]["last_update"][:10]} 更新\n\n',
"```seed\n",
json.dumps(seed, ensure_ascii=False),
"\n```\n\n",
]
)
File("seeds_toshare.md").writelines(lines)
otherfile = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(__file__))),
"rum-docs",
"docs",
"rum-app",
"README.md",
)
print(otherfile)
data = File(otherfile).read()
flag = "\n## 更多种子\n"
lines = [data.split(flag)[0], flag, "\n"] + lines
File(otherfile).writelines(lines)
if __name__ == "__main__":
groupseeds = search_groups(blocks_num=20, last_update_days=-30)
init_mdfile(groupseeds)
| python |
from gquant.dataframe_flow import Node
from gquant.dataframe_flow._port_type_node import _PortTypesMixin
from gquant.dataframe_flow.portsSpecSchema import ConfSchema
class DropNode(Node, _PortTypesMixin):
def init(self):
_PortTypesMixin.init(self)
cols_required = {}
self.required = {
self.INPUT_PORT_NAME: cols_required
}
def columns_setup(self):
if 'columns' in self.conf:
dropped = {}
for k in self.conf['columns']:
dropped[k] = None
return _PortTypesMixin.deletion_columns_setup(self,
dropped)
else:
return _PortTypesMixin.columns_setup(self)
def ports_setup(self):
return _PortTypesMixin.ports_setup(self)
def conf_schema(self):
json = {
"title": "Drop Column configure",
"type": "object",
"description": """Drop a few columns from the dataframe""",
"properties": {
"columns": {
"type": "array",
"items": {
"type": "string"
},
"description": """array of columns to be droped"""
}
},
"required": ["columns"],
}
ui = {
"columns": {
"items": {
"ui:widget": "text"
}
},
}
input_columns = self.get_input_columns()
if self.INPUT_PORT_NAME in input_columns:
col_from_inport = input_columns[self.INPUT_PORT_NAME]
enums = [col for col in col_from_inport.keys()]
json['properties']['columns']['items']['enum'] = enums
ui = {}
return ConfSchema(json=json, ui=ui)
else:
ui = {
"column": {"ui:widget": "text"}
}
return ConfSchema(json=json, ui=ui)
def process(self, inputs):
"""
Drop a few columns from the dataframe that are defined in the `columns`
in the nodes' conf
Arguments
-------
inputs: list
list of input dataframes.
Returns
-------
dataframe
"""
input_df = inputs[self.INPUT_PORT_NAME]
column_names = self.conf['columns']
return {self.OUTPUT_PORT_NAME: input_df.drop(column_names, axis=1)}
| python |
# todo: strict version
class Accessor:
def split_key(self, k, *, sep="/"):
return [normalize_json_pointer(x) for x in k.lstrip(sep).split(sep)]
def split_key_pair(self, k, *, sep="@"):
if sep not in k:
return self.split_key(k), []
else:
access_keys, build_keys = k.split(sep, 1)
return self.split_key(access_keys), self.split_key(build_keys)
def access(self, access_keys, d, default=None):
for i, k in enumerate(access_keys):
if k == "*":
if len(access_keys) - 1 == i:
continue # on last, no effect
else:
next_key = access_keys[i + 1]
rest_keys = access_keys[i + 1 :]
if next_key.endswith("[]"):
next_key = next_key.rstrip("[]")
for gk, v in d.items():
if hasattr(v, "__contains__") and next_key in v:
return self.access(rest_keys, d[gk])
return default
elif k == "*[]":
if len(access_keys) - 1 == i:
continue # on last, no effect
else:
next_key = access_keys[i + 1]
rest_keys = access_keys[i + 1 :]
candidates = []
for gk, v in d.items():
if hasattr(v, "__contains__") and next_key in v:
candidates.append(v)
if candidates:
return [self.access(rest_keys, v) for v in candidates]
return default
elif k.endswith("[]"):
k = k.rstrip("[]")
rest_keys = access_keys[i + 1 :]
return [self.access(rest_keys, e) for e in d[k]]
elif k.isdecimal():
try:
d = d[int(k)]
except IndexError:
return default
else:
try:
d = d[k]
except KeyError:
return default
return d
def normalize_json_pointer(ref):
if "~" not in ref:
return ref
return ref.replace("~1", "/").replace("~0", "~")
| python |
#!/usr/bin/env python3
from os import listdir
from os.path import isdir, isfile, join
def get_focus(day):
day = int(day.split(" ")[1].split(":")[0])
if day == 1:
return "Chest"
elif day == 2:
return "Quads"
elif day == 3:
return "Back"
elif day == 4:
return "Glute & Ham"
elif day == 5:
return "Shoulders & Arms"
else:
return "Nothing..."
def parse_workout(workout_file):
workouts = []
with open(workout_file, "r") as file:
workout = []
day = None
for line in file.readlines():
if line.startswith("Week") or line.startswith("Exercise"):
continue
if line.startswith("Day"):
if day:
workouts.append(workout)
workout = []
day = line.rstrip()
continue
l = line.rstrip().split("\t")
l.append(day)
workout.append(l)
workouts.append(workout)
ex = []
for workout in workouts:
exercises = []
for exercise in workout:
name = exercise[0]
_set = exercise[1]
kg = exercise[2]
reps = exercise[3]
day = exercise[4]
focus = get_focus(day)
e = '{"name":"%s", "kg":"%s", "sets":"%s"}' % (name, kg, _set)
exercises.append(e)
title = '{"focus":"%s", "exercises":[%s], "reps":"%s"}' % (focus, ",".join(exercises), reps)
ex.append(title)
x = '{"workouts": ['
x += ",".join(ex)
x += ']}'
return x
def write_to_file(file_name, content):
f = open(file_name, "w")
f.write(content)
f.close()
def main():
cycles = [d for d in listdir(".") if isdir(join(".", d)) and d.startswith("cycle")]
for cycle in cycles:
print("parsing %s" % cycle)
workouts = ["%s/%s" % (cycle, f) for f in listdir(cycle) if isfile(join(cycle, f))]
for workout in workouts:
print(" workout : %s" % workout)
parsed_workout = parse_workout(workout)
file_name = "%s/json/%s" % (cycle, workout.split("/")[1].replace(".txt", ".json"))
write_to_file(file_name, parsed_workout)
print("done")
if __name__ == "__main__":
main()
| python |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy import interpolate
import torch
import tqdm
from neural_clbf.controllers import NeuralObsBFController, ObsMPCController
from neural_clbf.experiments import (
RolloutSuccessRateExperiment,
ExperimentSuite,
ObsBFVerificationExperiment,
)
import neural_clbf.evaluation.turtle2d.scenes as scene_utils
@torch.no_grad()
def eval_and_plot_turtlebot_room():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_26f34ff/"
neural_controller = NeuralObsBFController.load_from_checkpoint(log_dir + "v0.ckpt")
# Get the experiments
contour_experiment = neural_controller.experiment_suite.experiments[0]
rollout_experiment = neural_controller.experiment_suite.experiments[1]
# Modify contour parameters
contour_experiment.n_grid = 30
contour_experiment.domain = [(-4.0, 0.0), (-2.0, 2.0)]
# Modify rollout parameters
rollout_experiment.t_sim = 10
rollout_experiment.start_x = torch.tensor(
[
# Start from same room as goal (OK, 10s)
[-2.5, -2.5, np.pi / 2],
[-4.0, 0.0, 0.0],
# # Start from table room (OK, 20s)
# [-13.5, 1.0, 0.0],
# [-11.83, -4.8, 0.0],
# # Start from chair room (OK, 80)
# [-13.5, -13.5, 0.0],
# [-7.0, -8.0, 0.0],
# Start from chair room (testing)
# [-1.0, -13.5, 0.0], # (OK, 80)
# [-3.0, -12, 0.0], # (OK, 200)
# [-3.8, -11, 0.0], # (OK, 100)
]
)
neural_controller.lookahead_grid_n = 8
neural_controller.controller_period = 0.1
neural_controller.dynamics_model.dt = 0.01
neural_controller.lookahead_dual_penalty = 1e3
# neural_controller.debug_mode_exploratory = True
# neural_controller.debug_mode_goal_seeking = True
# Modify scene
scene = scene_utils.room_4()
neural_controller.dynamics_model.scene = scene
# Run the experiments and plot
rollout_experiment.run_and_plot(neural_controller, display_plots=True)
# contour_experiment.run_and_plot(neural_controller, display_plots=True)
def eval_and_plot_turtlebot_bugtrap():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_04c9147/"
neural_controller = NeuralObsBFController.load_from_checkpoint(log_dir + "v0.ckpt")
# Get the experiments
cbf_contour_experiment = neural_controller.experiment_suite.experiments[0]
clf_contour_experiment = neural_controller.experiment_suite.experiments[1]
rollout_experiment = neural_controller.experiment_suite.experiments[2]
# Modify contour parameters
cbf_contour_experiment.n_grid = 30
cbf_contour_experiment.domain = [(-4.0, 0.0), (-2.0, 2.0)]
clf_contour_experiment.n_grid = 30
clf_contour_experiment.domain = [(-4.0, 0.0), (-2.0, 2.0)]
# Modify rollout parameters
rollout_experiment.t_sim = 4
rollout_experiment.start_x = torch.tensor(
[
[-3.0, -0.1, 0.0],
]
)
neural_controller.lookahead_grid_n = 8
neural_controller.controller_period = 0.1
neural_controller.dynamics_model.dt = 0.01
neural_controller.lookahead_dual_penalty = 1e3
# neural_controller.debug_mode_exploratory = True
# neural_controller.debug_mode_goal_seeking = True
# Modify scene
scene = scene_utils.bugtrap()
neural_controller.dynamics_model.scene = scene
# Run the experiments and plot
rollout_experiment.run_and_plot(neural_controller, display_plots=True)
# cbf_contour_experiment.run_and_plot(neural_controller, display_plots=True)
def eval_and_plot_turtlebot_training():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_8439378/"
neural_controller = NeuralObsBFController.load_from_checkpoint(
log_dir + "v0_ep72.ckpt"
)
# Get the experiment
rollout_experiment = neural_controller.experiment_suite.experiments[-1]
# Modify rollout parameters
rollout_experiment.t_sim = 4
neural_controller.lookahead_grid_n = 8
neural_controller.controller_period = 0.1
neural_controller.dynamics_model.dt = 0.01
neural_controller.lookahead_dual_penalty = 1e3
# neural_controller.debug_mode_exploratory = True
# neural_controller.debug_mode_goal_seeking = True
# Run the experiments and plot
rollout_experiment.run_and_plot(neural_controller, display_plots=True)
# Also run with an MPC controller
mpc_controller = ObsMPCController(
neural_controller.dynamics_model,
neural_controller.controller_period,
neural_controller.experiment_suite,
neural_controller.validation_dynamics_model,
)
rollout_experiment.run_and_plot(mpc_controller, display_plots=True)
def eval_turtlebot_neural_cbf_mpc_success_rates():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_8439378/"
neural_controller = NeuralObsBFController.load_from_checkpoint(
log_dir + "v0_ep72.ckpt"
)
# Make the experiment
rollout_experiment = RolloutSuccessRateExperiment(
"success_rate",
"Neural oCBF/oCLF (ours)",
n_sims=500,
t_sim=10.0,
)
experiment_suite = ExperimentSuite([rollout_experiment])
# # Run the experiments and save the results
# experiment_suite.run_all_and_save_to_csv(
# neural_controller, log_dir + "experiments_neural_ocbf"
# )
# Also run with an MPC controller
mpc_controller = ObsMPCController(
neural_controller.dynamics_model,
neural_controller.controller_period,
neural_controller.experiment_suite,
neural_controller.validation_dynamics_model,
)
rollout_experiment.algorithm_name = "MPC"
experiment_suite.run_all_and_save_to_csv(
mpc_controller, log_dir + "experiments_mpc_contingent"
)
# # Also run with a state-based controller
# log_dir = "saved_models/perception/turtlebot2d_state/commit_f63b307/"
# neural_state_controller = NeuralObsBFController.load_from_checkpoint(
# log_dir + "v0.ckpt"
# )
# experiment_suite.run_all_and_save_to_csv(
# neural_state_controller, log_dir + "experiments_neural_scbf"
# )
def eval_and_plot_turtlebot_select_scene():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_8439378/"
neural_controller = NeuralObsBFController.load_from_checkpoint(
log_dir + "v0_ep72.ckpt"
)
# Get the experiment
rollout_experiment = neural_controller.experiment_suite.experiments[-1]
# Modify rollout parameters
rollout_experiment.t_sim = 10
rollout_experiment.start_x = torch.tensor(
[
[-4.0, 4.0, 0.0],
]
)
# experiment_suite = ExperimentSuite([rollout_experiment])
# Load the selected scene
neural_controller.dynamics_model.scene = scene_utils.saved_random_scene()
# Run the experiments and plot
rollout_experiment.run_and_plot(neural_controller, display_plots=True)
# experiment_suite.run_all_and_save_to_csv(
# neural_controller, log_dir + "experiments_neural_ocbf"
# )
# Also run with an MPC controller
mpc_controller = ObsMPCController(
neural_controller.dynamics_model,
neural_controller.controller_period,
neural_controller.experiment_suite,
neural_controller.validation_dynamics_model,
)
rollout_experiment.run_and_plot(mpc_controller, display_plots=True)
# experiment_suite.run_all_and_save_to_csv(
# mpc_controller, log_dir + "experiments_mpc_contingent"
# )
# # Also run with a state-based controller
# log_dir = "saved_models/perception/turtlebot2d_state/commit_f63b307/"
# neural_state_controller = NeuralObsBFController.load_from_checkpoint(
# log_dir + "v0.ckpt"
# )
# neural_state_controller.dynamics_model.scene = scene_utils.saved_random_scene()
# experiment_suite.run_all_and_save_to_csv(
# neural_state_controller, log_dir + "experiments_neural_scbf"
# )
def plot_select_scene():
# Load data
log_dir = "saved_models/perception/turtlebot2d/commit_8439378/"
state_log_dir = "saved_models/perception/turtlebot2d_state/commit_f63b307/"
ocbf_df = pd.read_csv(
log_dir + "experiments_neural_ocbf/2021-09-01_17_57_56/Rollout.csv"
)
scbf_df = pd.read_csv(
state_log_dir + "experiments_neural_scbf/2021-09-01_17_58_44/Rollout.csv"
)
mpc_df = pd.read_csv(
log_dir + "experiments_mpc_contingent/2021-11-12_14_46_18/Rollout.csv"
)
ppo_df = pd.read_csv(log_dir + "experiments_ppo/2021-09-01_21_32_00/trace.csv")
# Add the start point and smooth the ppo trace
start = pd.DataFrame([{"$x$": -4.0, "$y$": 4.0, "$t$": 0.0}])
ppo_df = pd.concat([start, ppo_df])
# Set the color scheme
sns.set_theme(context="talk", style="white")
sns.set_style({"font.family": "serif"})
# Create the axes
fig, ax = plt.subplots()
# Plot the environment
scene_utils.saved_random_scene().plot(ax)
ax.plot(
[], [], color=sns.color_palette()[0], label="Observation-based CBF/CLF (ours)"
)
ax.plot([], [], color=sns.color_palette()[1], label="State-based CBF/CLF")
ax.plot([], [], color=sns.color_palette()[2], label="MPC")
ax.plot([], [], color=sns.color_palette()[3], label="PPO")
# Plot oCBF
ax.plot(
ocbf_df["$x$"].to_numpy(),
ocbf_df["$y$"].to_numpy(),
linestyle="-",
linewidth=5,
color=sns.color_palette()[0],
)
# Plot sCBF
ax.plot(
scbf_df["$x$"].to_numpy(),
scbf_df["$y$"].to_numpy(),
linestyle="-",
color=sns.color_palette()[1],
)
# Plot MPC
ax.plot(
mpc_df["$x$"].to_numpy(),
mpc_df["$y$"].to_numpy(),
linestyle="-",
color=sns.color_palette()[2],
)
# Plot PPO smoothed
ppo_t = ppo_df["$t$"].to_numpy()
mpc_t = mpc_df["t"].to_numpy()
ppo_x = ppo_df["$x$"].to_numpy()
ppo_y = ppo_df["$y$"].to_numpy()
x_smooth = interpolate.interp1d(ppo_t, ppo_x, kind="cubic")
y_smooth = interpolate.interp1d(ppo_t, ppo_y, kind="cubic")
ax.plot(
x_smooth(mpc_t),
y_smooth(mpc_t),
linestyle=":",
color=sns.color_palette()[3],
)
ax.legend(loc="lower left")
ax.set_ylim([-2, 5.5])
ax.set_xlim([-5.5, 3])
ax.set_aspect("equal")
plt.tight_layout()
plt.show()
def validate_neural_cbf():
# Load the checkpoint file. This should include the experiment suite used during
# training.
log_dir = "saved_models/perception/turtlebot2d/commit_8439378/"
neural_controller = NeuralObsBFController.load_from_checkpoint(
log_dir + "v0_ep72.ckpt"
)
# Make the verification experiment
verification_experiment = ObsBFVerificationExperiment("verification", 1000)
# Increase the dual penalty so any violations of the CBF condition are clear
neural_controller.lookahead_dual_penalty = 1e8
# Run the experiments and save the results. Gotta do this multiple times
# to accomodate memory
num_infeasible = 0
prog_bar_range = tqdm.trange(100, desc="Validating BF", leave=True)
for i in prog_bar_range:
df = verification_experiment.run(neural_controller)
num_infeasible += df["# infeasible"][0]
print(f"Total samples {100 * 1000}, # infeasible: {num_infeasible}")
if __name__ == "__main__":
# eval_and_plot_turtlebot_room()
# eval_and_plot_turtlebot_bugtrap()
# eval_and_plot_turtlebot_training()
# eval_turtlebot_neural_cbf_mpc_success_rates()
# eval_and_plot_turtlebot_select_scene()
plot_select_scene()
# validate_neural_cbf()
| python |
import pandas as pd
from pdia.extendedInfoParser.parseExtendedInfo import errorCode
def parseCalculatorEvents(eInfo):
"""Parse a calculator event string, return parsed object or None
"""
assert (isinstance(eInfo, pd.Series))
try:
res = eInfo.apply(lambda x: {"Calculator": x})
except:
# print "\nWarning: parseCalculatorEvents(): some rows of ExtendedInfo is not a string"
# return parseDefault(eInfo)
res = eInfo.apply(lambda x: errorCode)
return res
def parseCalculatorBuffer(eInfo):
"""Parse a calculator buffer string, return parsed object or None
"""
assert (isinstance(eInfo, pd.Series))
try:
res = eInfo.apply(lambda x: {"CalculatorBuffer": x})
except:
# print "\nWarning: parseCalculatorBuffer(): some rows of ExtendedInfo is not a string"
# return parseDefault(eInfo)
res = eInfo.apply(lambda x: errorCode)
return res | python |
import csv
import datetime as dt
import hashlib
import io
import re
from decimal import Decimal
from django.utils.dateparse import parse_date
from django.utils.encoding import force_str
from django.utils.text import slugify
def parse_zkb_csv(data):
f = io.StringIO()
f.write(force_str(data, encoding="utf-8", errors="ignore"))
f.seek(0)
dialect = csv.Sniffer().sniff(f.read(4096))
f.seek(0)
reader = csv.reader(f, dialect)
next(reader) # Skip first line
entries = []
while True:
try:
row = next(reader)
except StopIteration:
break
if not row:
continue
try:
day = dt.datetime.strptime(row[8], "%d.%m.%Y").date()
amount = row[7] and Decimal(row[7])
reference = row[4]
except (AttributeError, IndexError, ValueError):
continue
if day and amount:
details = next(reader)
entries.append(
{
"reference_number": reference,
"value_date": day,
"total": amount,
"payment_notice": "; ".join(
filter(None, (details[1], details[10], row[4]))
),
}
)
return entries
def postfinance_preprocess_notice(payment_notice):
"""Remove spaces from potential invoice numbers"""
return re.sub(
r"\b([0-9]{4}\s*-\s*[0-9]{4}\s*-\s*[0-9]{4})\b",
lambda match: re.sub(r"\s+", "", match.group(0)),
payment_notice,
)
def postfinance_reference_number(payment_notice, day):
"""Either pull out the bank reference or create a hash from the notice"""
match = re.search(r"\b([0-9]{6}[A-Z]{2}[0-9A-Z]{6,10})$", payment_notice)
return "pf-{}".format(
match.group(1)
if match
else hashlib.md5(
slugify(payment_notice + day.isoformat()).encode("utf-8")
).hexdigest()
)
def parse_postfinance_csv(data):
f = io.StringIO()
f.write(force_str(data, encoding="latin-1", errors="ignore"))
f.seek(0)
dialect = csv.Sniffer().sniff(f.read(4096))
f.seek(0)
reader = csv.reader(f, dialect)
next(reader) # Skip first line
entries = []
for row in reader:
if not row:
continue
try:
day = parse_date(row[4])
except (IndexError, ValueError):
continue
if day is None or not row[2]: # Only credit
continue
payment_notice = postfinance_preprocess_notice(row[1])
entries.append(
{
"reference_number": postfinance_reference_number(payment_notice, day),
"value_date": day,
"total": Decimal(row[2]),
"payment_notice": payment_notice,
}
)
return entries
| python |
import time
import pygame
from pygame.locals import K_ESCAPE, K_SPACE, QUIT, USEREVENT
COUNTDOWN_DELAY = 4
def timerFunc(countdown, background):
print("Timer CallBack", time.time())
print(countdown)
print("--")
# Display some text
font = pygame.font.Font(None, 36)
text = font.render(str(countdown), 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
textpos.centery = background.get_rect().centery
background.blit(text, textpos)
if countdown == 0:
print("SHOOT")
def top_text(background):
# Display some text
font = pygame.font.Font(None, 36)
text = font.render("space to shoot / esc to quit", 1, (10, 10, 10))
textpos = text.get_rect()
textpos.centerx = background.get_rect().centerx
background.blit(text, textpos)
def main():
pygame.init()
countdown = COUNTDOWN_DELAY
stop_photobooth = False
screen = pygame.display.set_mode((400, 300))
pygame.display.set_caption('Photobooth')
# Fill background
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((30, 250, 120))
top_text(background)
# Blit everything to the screen
screen.blit(background, (0, 0))
pygame.display.flip()
while not stop_photobooth:
background.fill((30, 250, 120))
top_text(background)
for event in pygame.event.get():
# any other key event input
if event.type == QUIT:
stop_photobooth = True
if event.type == USEREVENT+1:
if countdown == -1:
pygame.time.set_timer(USEREVENT+1, 0)
countdown = COUNTDOWN_DELAY
else:
timerFunc(countdown, background) #calling the function wheever we get timer event.
countdown -= 1
# get key current state
keys = pygame.key.get_pressed()
if keys[K_SPACE]:
pygame.time.set_timer(USEREVENT+1, 1000)
elif keys[K_ESCAPE]:
print("quit")
stop_photobooth = True
screen.blit(background, (0, 0))
pygame.display.flip()
if __name__ == "__main__":
main()
| python |
#!/usr/bin/env python
# coding=utf-8
import re
import time
import string
from urlparse import urlparse
from comm.request import Req
from conf.settings import DICT_PATH
from core.data import result
from core.data import fuzz_urls
from Queue import Empty
class FuzzFileScan(Req):
def __init__(self, site, timeout, delay, threads):
super(FuzzFileScan, self).__init__(site, timeout, delay, threads)
self.fuzzed_urls = []
self.test_urls = []
def load_suffix_dict(self):
with open(DICT_PATH+'/fuzz.txt', 'r') as f:
return f.readlines()
def filter_links(self, url):
"""
静态文件类型不测试
"""
pattern = re.compile(r'/.*\.(?!html|htm|js|css|jpg|png|jpeg|gif|svg|pdf|avi|mp4|mp3)')
ret = re.match(pattern, url)
return ret
def gen_dict(self, url):
o = urlparse(url)
ret = []
if self.filter_links(o[2]):
for stuffix in self.load_suffix_dict():
to_fuzz_url = o[0] + '://' + o[1] + o[2] + string.strip(stuffix)
ret.append(to_fuzz_url)
return ret
return []
def fuzz(self, urls):
for url in urls:
if self.get_is_vul(url):
self.fuzzed_urls.append(url)
def start(self):
print '[%s] Start Fuzz File Scan ...' % time.strftime('%H:%M:%S')
while True:
try:
url = fuzz_urls.get(True, 1)
to_fuzz_url_list = self.gen_dict(url)
except Empty, e:
if self.pool.undone_tasks():
continue
else:
break
self.pool.spawn(self.fuzz, to_fuzz_url_list)
print '[%s] Stop Fuzz File Scan!' % time.strftime('%H:%M:%S')
print '[%s] %s Founded' % (time.strftime('%H:%M:%S'), len(self.fuzzed_urls))
result.fuzz = self.fuzzed_urls | python |
from gql.schema import make_schema_from_path
from pathlib import Path
def test_make_schema_from_path():
schema = make_schema_from_path(str(Path(__file__).parent / 'schema'))
assert set(schema.query_type.fields.keys()) == {'me', 'addresses'}
assert set(schema.mutation_type.fields.keys()) == {'createAddress'}
| python |
# -*- coding: utf8 -*-
import requests, json, time, os
requests.packages.urllib3.disable_warnings()
cookie = os.environ.get("cookie_smzdm")
def main(*arg):
try:
msg = ""
SCKEY = os.environ.get('SCKEY')
s = requests.Session()
s.headers.update({'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36'})
t = round(int(time.time() * 1000))
url = f'https://zhiyou.smzdm.com/user/checkin/jsonp_checkin?_={t}'
headers = {
"cookie" : cookie,
'Referer': 'https://www.smzdm.com/'
}
r = s.get(url, headers=headers, verify=False)
print(r.text.encode('latin-1').decode('unicode_escape'))
if r.json()["error_code"] != 0 and SCKEY:
scurl = f"https://sc.ftqq.com/{SCKEY}.send"
data = {
"text" : "smzdm Cookie过期",
"desp" : r.text
}
requests.post(scurl, data=data)
print("smzdm cookie失效")
msg += "smzdm cookie失效"
else:
msg += "smzdm签到成功"
except Exception as e:
print('repr(e):', repr(e))
msg += '运行出错,repr(e):'+repr(e)
return msg + "\n"
def smzdm_pc(*arg):
msg = ""
global cookie
clist = cookie.split("\n")
i = 0
while i < len(clist):
msg += f"第 {i+1} 个账号开始执行任务\n"
cookie = clist[i]
msg += main(cookie)
i += 1
return msg
if __name__ == "__main__":
if cookie:
print("----------什么值得买开始尝试签到----------")
smzdm_pc()
print("----------什么值得买签到执行完毕----------")
| python |
from setuptools import setup
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="streambook",
author="Alexander Rush",
author_email="[email protected]",
version="0.1.2",
packages=["streambook"],
long_description=long_description,
long_description_content_type="text/markdown",
package_data={"streambook": []},
setup_requires=["pytest-runner"],
install_requires=["streamlit", "jupytext", "watchdog", "in_place", "mistune", "typer"],
tests_require=["pytest"],
python_requires=">=3.6",
entry_points={
"console_scripts": [
"streambook = streambook.cli:app",
],
},
)
| python |
class WebserialError(Exception):
pass
class EqualChapterError(WebserialError):
pass
class NoChaptersFoundError(WebserialError):
pass
class LocalAheadOfRemoteError(WebserialError):
pass
| python |
"""
Lambdas
AS known as expression lambda or lambdas. Function anonymous
# FUnction Python
def sum(a, b):
return a + b
def function(x):
return 3 * x + 1
print(function(4)) # 13
print(function(7)) # 22
# Expression lambda
lambda x: 3 * x + 1
# How can I use expression lambda?
calculation = lambda x: 3 * x + 1
print(calculation(4)) # 13
print(calculation(7)) # 22
# We can use expression lambda with multiply inputs90
complete_name = lambda name, surname: name.strip().title() + ' ' + surname.strip().title()
print(complete_name( 'angelina', 'JOLIE')) # Angelina Jolie
print(complete_name( ' YUMI ', ' OUCHI ')) # Yumi Ouchi
love = lambda: 'How not love python'
one = lambda x: 3*x+1
two = lambda x, y: (x * y) ** 0.5
three = lambda x, y, z: x + y + z
print(love()) # print(love())
print(one(1)) # 4
print(two(2, 3)) # 2.449489742783178
print(three(2, 3, 4)) # 9
# Type error with more parameters
people = ['Lais Balbe', 'Danilo Crazy', 'Anielle Matos', 'Rafael Duda', 'Yumi Ouchi', 'Ada Victoria']
print(people) # ['Lais Balbe', 'Danilo Crazy', 'Anielle Matos', 'Rafael Duda', 'Yumi Ouchi', 'Ada Victoria']
people.sort(key=lambda surname: surname.split(' ')[-1].lower())
print(people) # ['Lais Balbe', 'Danilo Crazy', 'Rafael Duda', 'Anielle Matos', 'Yumi Ouchi', 'Ada Victoria']
"""
# Quad function
def quad_function(a, b, c):
""" Return a * x ** 2 + b * x + c"""
return lambda x: a * x ** 2 + b * x + c
quadrad = quad_function(2, 3, -5)
print(quadrad(0)) # -5
print(quadrad(1)) # 0
print(quadrad(2)) # 9
print(quad_function(1, 2, 3)(2)) # 11
| python |
from userbot.utils import admin_cmd
from telethon.tl.functions.users import GetFullUserRequest
import asyncio
@borg.on(admin_cmd(pattern="pmto ?(.*)"))
async def pmto(event):
if event.reply_to_msg_id:
reply_message = await event.get_reply_message()
chat_id=await event.client(GetFullUserRequest(reply_message.from_id))
msg = event.pattern_match.group(1)
try:
await borg.send_message(chat_id, msg)
await event.edit("Message sent!")
await asyncio.sleep(3)
await event.delete()
except BaseException:
await event.edit("Something went wrong.")
else:
a = event.pattern_match.group(1)
b = a.split(" ")
chat_id = b[0]
try:
chat_id = int(chat_id)
except BaseException:
pass
msg = ""
for i in b[1:]:
msg += i + " "
if msg == "":
return
try:
await borg.send_message(chat_id, msg)
await event.edit("Message sent!")
await asyncio.sleep(3)
await event.delete()
except BaseException:
await event.edit("Something went wrong.")
| python |
import asyncio
import sys
import os
project_root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
sys.path.insert(0, project_root)
from ahk import AHK, AsyncAHK
from unittest import TestCase, IsolatedAsyncioTestCase
from PIL import Image
from itertools import product
import time
class TestScreen(IsolatedAsyncioTestCase):
def setUp(self):
"""
Record all open windows
:return:
"""
self.ahk = AsyncAHK()
self.before_windows = asyncio.run(self.ahk.windows())
im = Image.new('RGB', (20, 20))
for coord in product(range(20), range(20)):
im.putpixel(coord, (255, 0, 0))
self.im = im
im.show()
time.sleep(2)
async def asyncTearDown(self):
for win in await self.ahk.windows():
if win not in self.before_windows:
await win.close()
break
async def test_pixel_search(self):
result = await self.ahk.pixel_search(0xFF0000)
self.assertIsNotNone(result)
async def test_image_search(self):
self.im.save('testimage.png')
position = await self.ahk.image_search('testimage.png')
self.assertIsNotNone(position)
async def test_pixel_get_color(self):
x, y = await self.ahk.pixel_search(0xFF0000)
result = await self.ahk.pixel_get_color(x, y)
self.assertIsNotNone(result)
self.assertEqual(int(result, 16), 0xFF0000)
| python |
from django.contrib import admin
from certificates.models import ProductionSiteCertificate
from certificates.models import DoubleCountingRegistration, DoubleCountingRegistrationInputOutput
class ProductionSiteCertificateAdmin(admin.ModelAdmin):
list_display = ('production_site', 'get_certificate_type', 'certificate')
search_fields = ('production_site__name', )
def get_certificate_type(self, obj):
return obj.certificate.certificate_type
get_certificate_type.short_description = 'Type'
admin.site.register(ProductionSiteCertificate, ProductionSiteCertificateAdmin)
@admin.register(DoubleCountingRegistration)
class DoubleCountingRegistrationAdmin(admin.ModelAdmin):
list_display = ('certificate_id', 'certificate_holder', 'valid_from', 'valid_until')
search_fields = ('certificate_id', 'certificate_holder',)
@admin.register(DoubleCountingRegistrationInputOutput)
class DoubleCountingRegistrationAdmin(admin.ModelAdmin):
list_display = ('get_certid', 'get_holder', 'biofuel', 'feedstock')
search_fields = ('certificate__certificate_id', 'certificate__certificate_holder', )
def get_certid(self, obj):
return obj.certificate.certificate_id
get_certid.short_description = 'ID'
def get_holder(self, obj):
return obj.certificate.certificate_holder
get_holder.short_description = 'Holder'
| python |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: heartbeat.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='heartbeat.proto',
package='',
syntax='proto2',
serialized_options=_b('Z#clusterfuzz/protos/untrusted_runner'),
serialized_pb=_b('\n\x0fheartbeat.proto\"\x12\n\x10HeartbeatRequest\"\x13\n\x11HeartbeatResponse2:\n\tHeartbeat\x12-\n\x04\x42\x65\x61t\x12\x11.HeartbeatRequest\x1a\x12.HeartbeatResponseB%Z#clusterfuzz/protos/untrusted_runner')
)
_HEARTBEATREQUEST = _descriptor.Descriptor(
name='HeartbeatRequest',
full_name='HeartbeatRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=37,
)
_HEARTBEATRESPONSE = _descriptor.Descriptor(
name='HeartbeatResponse',
full_name='HeartbeatResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=39,
serialized_end=58,
)
DESCRIPTOR.message_types_by_name['HeartbeatRequest'] = _HEARTBEATREQUEST
DESCRIPTOR.message_types_by_name['HeartbeatResponse'] = _HEARTBEATRESPONSE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
HeartbeatRequest = _reflection.GeneratedProtocolMessageType('HeartbeatRequest', (_message.Message,), dict(
DESCRIPTOR = _HEARTBEATREQUEST,
__module__ = 'heartbeat_pb2'
# @@protoc_insertion_point(class_scope:HeartbeatRequest)
))
_sym_db.RegisterMessage(HeartbeatRequest)
HeartbeatResponse = _reflection.GeneratedProtocolMessageType('HeartbeatResponse', (_message.Message,), dict(
DESCRIPTOR = _HEARTBEATRESPONSE,
__module__ = 'heartbeat_pb2'
# @@protoc_insertion_point(class_scope:HeartbeatResponse)
))
_sym_db.RegisterMessage(HeartbeatResponse)
DESCRIPTOR._options = None
_HEARTBEAT = _descriptor.ServiceDescriptor(
name='Heartbeat',
full_name='Heartbeat',
file=DESCRIPTOR,
index=0,
serialized_options=None,
serialized_start=60,
serialized_end=118,
methods=[
_descriptor.MethodDescriptor(
name='Beat',
full_name='Heartbeat.Beat',
index=0,
containing_service=None,
input_type=_HEARTBEATREQUEST,
output_type=_HEARTBEATRESPONSE,
serialized_options=None,
),
])
_sym_db.RegisterServiceDescriptor(_HEARTBEAT)
DESCRIPTOR.services_by_name['Heartbeat'] = _HEARTBEAT
# @@protoc_insertion_point(module_scope)
| python |
"""tipo torneo
Revision ID: 016
Revises: 015
Create Date: 2014-05-27 22:50:52.173711
"""
# revision identifiers, used by Alembic.
revision = '017'
down_revision = '016'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_table('tipo_torneo',
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('nombre', sa.String(256), nullable=False),
sa.Column('tipo', sa.String(256), nullable=False),
sa.Column('es_escuela', sa.Boolean, nullable=False),
sa.Column('numero_de_rondas', sa.Integer, nullable=False),
sa.Column('numero_de_series', sa.Integer, nullable=False),
sa.Column('numero_de_flechas_por_serie', sa.Integer, nullable=False),
sa.Column('distancia_ronda_1', sa.Integer, nullable=False),
sa.Column('series_de_practica_ronda_1', sa.Integer, nullable=False),
sa.Column('distancia_ronda_2', sa.Integer, nullable=False),
sa.Column('series_de_practica_ronda_2', sa.Integer, nullable=False),
# partir de la 3 ronda, si puede llegar a ser null porque dependiendo
# del tipo de torneo tenemos que no puede haber una tercer ronda
sa.Column('distancia_ronda_3', sa.Integer),
sa.Column('series_de_practica_ronda_3', sa.Integer),
sa.Column('distancia_ronda_4', sa.Integer),
sa.Column('series_de_practica_ronda_4', sa.Integer),
)
data = [
('18m - 80', 'Indoor', True, 2, 10, 3, 18, 2, 18, 0, None, None, None, None),
('18m - 60', 'Indoor', True, 2, 10, 3, 18, 2, 18, 0, None, None, None, None),
('18m - 40', 'Indoor', True, 2, 10, 3, 18, 2, 18, 0, None, None, None, None),
('18m - Triple Spot', 'Indoor', False, 2, 10, 3, 18, 2, 18, 0, None, None, None, None),
('20-20', 'Outdoor 70-70', True, 2, 6, 6, 20, 2, 20, 0, None, None, None, None),
('30-30', 'Outdoor 70-70', True, 2, 6, 6, 30, 2, 30, 0, None, None, None, None),
('50-50', 'Outdoor 70-70', True, 2, 6, 6, 50, 2, 50, 0, None, None, None, None),
('60-60', 'Outdoor 70-70', False, 2, 6, 6, 60, 2, 60, 0, None, None, None, None),
('70-70', 'Outdoor 70-70', False, 2, 6, 6, 70, 2, 70, 0, None, None, None, None),
('20-20-20-20', 'Outdoor 1440', True, 4, 6, 6, 20, 2, 20, 2, 20, 2, 20, 2),
('30-30-20-20', 'Outdoor 1440', True, 4, 6, 6, 30, 2, 30, 2, 20, 2, 20, 2),
('50-50-30-30', 'Outdoor 1440', True, 4, 6, 6, 50, 2, 50, 2, 30, 2, 30, 2),
('60-50-40-30', 'Outdoor 1440', True, 4, 6, 6, 60, 2, 50, 2, 30, 2, 30, 2),
('70-60-50-30', 'Outdoor 1440', True, 4, 6, 6, 70, 2, 60, 2, 50, 2, 30, 2),
('70-60-50-30 (Cadete Varones)', 'Outdoor 1440', False, 4, 6, 6, 70, 2, 60, 2, 50, 2, 30, 2),
('60-50-40-30 (Cadete Mujeres)', 'Outdoor 1440', False, 4, 6, 6, 60, 2, 50, 2, 40, 2, 30, 2),
('70-60-50-30 (Juvenil Mujeres)', 'Outdoor 1440', False, 4, 6, 6, 70, 2, 60, 2, 50, 2, 30, 2),
('90-70-50-30 (Juvenil Varones)', 'Outdoor 1440', False, 4, 6, 6, 90, 2, 70, 2, 50, 2, 30, 2),
('70-60-50-30 (Senior Mujeres)', 'Outdoor 1440', False, 4, 6, 6, 70, 2, 60, 2, 50, 2, 30, 2),
('90-70-50-30 (Senior Varones)', 'Outdoor 1440', False, 4, 6, 6, 90, 2, 70, 2, 50, 2, 30, 2),
('70-60-50-30 (Master Varones)', 'Outdoor 1440', False, 4, 6, 6, 70, 2, 60, 2, 50, 2, 30, 2),
('60-50-40-30 (Master Mujeres)', 'Outdoor 1440', False, 4, 6, 6, 60, 2, 50, 2, 40, 2, 30, 2),
]
for index, valores in enumerate(data):
sql = 'INSERT INTO tipo_torneo (id, '\
'nombre, '\
'tipo, '\
'es_escuela, '\
'numero_de_rondas, '\
'numero_de_series, '\
'numero_de_flechas_por_serie, '\
'distancia_ronda_1, '\
'series_de_practica_ronda_1, '\
'distancia_ronda_2, '\
'series_de_practica_ronda_2 '
if valores[-1]:
# entonces tiene la informacion de la 3 y 4 ronda
sql += ', '\
'distancia_ronda_3, '\
'series_de_practica_ronda_3, '\
'distancia_ronda_4, '\
'series_de_practica_ronda_4'
sql += ') VALUES ('\
"%s, "\
"'%s', "\
"'%s', "\
"%s, "\
"%s, "\
"%s, "\
"%s, "\
"%s, "\
"%s, "\
"%s, "\
"%s "
if valores[-1]:
sql += ', '\
'%s, '\
'%s, '\
'%s, '\
'%s'
sql += ')'
if not valores[-1]:
valores = valores[:-4]
sql_values = (index + 1, ) + valores
insert_sql = sql % sql_values
op.execute(insert_sql)
def downgrade():
op.drop_table('tipo_torneo')
| python |
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
if head is None or head.next is None:
return None
slow = head
fast = head
while fast.next and fast.next.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
if fast.next is None or fast.next.next is None:
return None
slow = head
while slow != fast:
slow = slow.next
fast = fast.next
return slow
| python |
import logging
import subprocess
LOG = logging.getLogger(__name__)
def run(*cmd, **kwargs):
"""Log and run a command.
Optional kwargs:
cwd: current working directory (string)
capture: capture stdout and return it (bool)
capture_stderr: redirect stderr to stdout and return it (bool)
env: environment variables (dict)
"""
cwd = kwargs.get('cwd')
capture = kwargs.get('capture')
capture_stderr = kwargs.get('capture_stderr')
env = kwargs.get('env')
LOG.info('%s', ' '.join(cmd))
if capture or capture_stderr:
stderr = subprocess.STDOUT if capture_stderr else None
return subprocess.check_output(cmd, stderr=stderr, cwd=cwd)
subprocess.check_call(cmd, cwd=cwd, env=env)
| python |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mask_selection2.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
import config
import numpy as np
class Ui_MaskWindow(object):
def setupUi(self, MaskWindow):
MaskWindow.setObjectName("MaskWindow")
MaskWindow.resize(331, 225)
self.centralwidget = QtWidgets.QWidget(MaskWindow)
self.centralwidget.setObjectName("centralwidget")
self.label_3 = QtWidgets.QLabel(self.centralwidget)
self.label_3.setGeometry(QtCore.QRect(20, 80, 161, 16))
self.label_3.setObjectName("label_3")
self.label = QtWidgets.QLabel(self.centralwidget)
self.label.setGeometry(QtCore.QRect(20, 120, 301, 16))
self.label.setObjectName("label")
self.label_4 = QtWidgets.QLabel(self.centralwidget)
self.label_4.setGeometry(QtCore.QRect(20, 140, 181, 16))
self.label_4.setObjectName("label_4")
self.label_6 = QtWidgets.QLabel(self.centralwidget)
self.label_6.setGeometry(QtCore.QRect(20, 180, 141, 16))
self.label_6.setObjectName("label_6")
self.pushButton = QtWidgets.QPushButton(self.centralwidget)
self.pushButton.setGeometry(QtCore.QRect(120, 10, 91, 31))
self.pushButton.setObjectName("pushButton")
self.label_2 = QtWidgets.QLabel(self.centralwidget)
self.label_2.setGeometry(QtCore.QRect(20, 100, 271, 16))
self.label_2.setObjectName("label_2")
self.label_5 = QtWidgets.QLabel(self.centralwidget)
self.label_5.setGeometry(QtCore.QRect(20, 160, 241, 16))
self.label_5.setObjectName("label_5")
self.textEdit = QtWidgets.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(20, 50, 291, 21))
self.textEdit.setObjectName("textEdit")
MaskWindow.setCentralWidget(self.centralwidget)
self.statusbar = QtWidgets.QStatusBar(MaskWindow)
self.statusbar.setObjectName("statusbar")
MaskWindow.setStatusBar(self.statusbar)
self.pushButton.clicked.connect(self.change_mask_function)
self.retranslateUi(MaskWindow)
QtCore.QMetaObject.connectSlotsByName(MaskWindow)
def change_mask_function(self):
config.x=self.textEdit.toPlainText()
config.y=True #internal parameter to set whether or the mask function is a function or a txt
print('Function defined')
def retranslateUi(self, MaskWindow):
_translate = QtCore.QCoreApplication.translate
MaskWindow.setWindowTitle(_translate("MaskWindow", "Define phase mask"))
self.label_3.setText(_translate("MaskWindow", "Available parameters:"))
self.label.setText(_translate("MaskWindow", "phi (azimutal coordinate, from 0 to 2pi)"))
self.label_4.setText(_translate("MaskWindow", "w0 (gaussian beam radius, mm)"))
self.label_6.setText(_translate("MaskWindow", "k (wavenumber, 1/mm)"))
self.pushButton.setText(_translate("MaskWindow", "Define mask"))
self.label_2.setText(_translate("MaskWindow", " rho (radial coordinate, from 0 to phase mask radius)"))
self.label_5.setText(_translate("MaskWindow", "f (focal distance of objective lens, mm)"))
self.textEdit.setHtml(_translate("MaskWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:8.25pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\">np.exp(1j*phi)</p></body></html>"))
try:
self.textEdit.setText(config.x)#if a phase mask has already been given
except:
pass
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MaskWindow = QtWidgets.QMainWindow()
ui = Ui_MaskWindow()
ui.setupUi(MaskWindow)
MaskWindow.show()
sys.exit(app.exec_())
| python |
import argparse, os
import torch
from sampling_free.config import cfg
from sampling_free.data import make_data_loader
from sampling_free.engine import do_inference
from sampling_free.modeling import build_model
from sampling_free.utils import Checkpointer, setup_logger, mkdir
def eval_checkpoint(cfg, model, output_dir, num_gpus):
iou_types = ("bbox",)
if cfg.MODEL.MASK_ON:
iou_types = iou_types + ("segm",)
output_folders = [None] * len(cfg.DATASETS.TEST)
dataset_names = cfg.DATASETS.TEST
for idx, dataset_name in enumerate(dataset_names):
output_folder = os.path.join(output_dir, dataset_name)
mkdir(output_folder)
output_folders[idx] = output_folder
data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=num_gpus>1)
for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val):
model.eval()
results = do_inference(
model,
data_loader_val,
iou_types=iou_types,
output_folder=output_folder,
)
def main():
parser = argparse.ArgumentParser(description="sampling-free")
parser.add_argument(
"--config-file",
default="",
metavar="FILE",
help="path to config file",
)
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
args.num_gpus = int(os.environ["WORLD_SIZE"])
args.device_id = int(os.environ["LOCAL_RANK"])
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled = True
if args.num_gpus > 1:
torch.cuda.set_device(args.device_id)
torch.distributed.init_process_group(backend="nccl")
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
output_dir = args.config_file.replace("config.yaml", "inference")
if output_dir:
mkdir(output_dir)
logger = setup_logger("sampling-free", output_dir, args.device_id)
logger.info("Using {} GPUs".format(args.num_gpus))
logger.info(args)
logger.info("Collecting env info (might take some time)")
from torch.utils.collect_env import get_pretty_env_info
logger.info("\n" + get_pretty_env_info())
logger.info("Loaded configuration file {}".format(args.config_file))
with open(args.config_file, "r") as cf:
config_str = "\n" + cf.read()
logger.info(config_str)
logger.info("Running with config:\n{}".format(cfg))
model = build_model(cfg).cuda(args.device_id)
if args.num_gpus > 1:
logger.info("Use PyTorch DDP inference")
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.device_id]
)
_ = Checkpointer(cfg, model)
eval_checkpoint(cfg, model, output_dir, args.num_gpus)
if __name__ == "__main__":
main()
| python |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 24 19:08:39 2019
@author: Tim Hohmann et al. - "Evaluation of machine learning models for
automatic detection of DNA double strand breaks after irradiation using a gH2AX
foci assay", PLOS One, 2020
"""
# main file for training machine learning models using previously labeled data
###############################################################################
# Parameters and file path that have to be set manually:
# Parameters:
# min area of nucleus
min_area = 4000
# color channel of nucleus. 0 = red, 1 = grenn, 2 = blue. for grayscale images
# this value is ignored.
nuc_chan = 2
# color channel of foci. 0 = red, 1 = grenn, 2 = blue. for grayscale images
# this value is ignored.
foci_chan = 1
# color channel of marked image. 0 = red, 1 = grenn, 2 = blue. corresponds to
# the color of the markings in the manually labeled foci images
mark_chan = 0
# adjust image size - might be usefull to save calculation time. needs to be
# identical for foci and nucleus images
# image rescale factor:
rescale_factor = 1.0
# take only those PCA components cumulatively explaining var_max of the variance
# 0<var_max<=1.
var_max = 0.95
# randomly sample a proportion of the training data from each image (0<sampling<=1).
# speeds up training process if smaller than 1
sampling = 1
# used filter sizes
filt_range = [2,3,4,5,8,10,15,20,25,30,35]
# scaling range for frangi filter
sc_range = list(range(2,11))
#frequency range for gabor filter
freq = [0.08,0.10,0.13,0.16,0.2]
# Name used for saving the trained model and related images:
model_name = "MLP"
# directory containing the foci images:
im_path_foci = "D:\\Sample Images\\foci"
# directory containing the manually labeled foci images:
im_path_foci_marked = "D:\\Sample Images\\foci_marked"
# directory containing the nucleus images:
im_path_dapi = "D:\\Sample Images\\dapi"
###############################################################################
###############################################################################
###############################################################################
# turn of warnings, this is especially annoying with sklearn stuff
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# Get packages:
# For directory, file handling etc.
import os
import sys
# import pandas:
import pandas as pd
# import numpy
import numpy as np
# For image analysis:
from skimage.io import imread, imshow
from skimage.io import imsave
from skimage.transform import rescale
# import packages related to (pre-) processing of data and model results:
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import GridSearchCV
# import model packages:
from sklearn.naive_bayes import ComplementNB
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier, BaggingClassifier, VotingClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import precision_score, recall_score
sys.path.append(os.getcwd())
main_file_path = os.getcwd()
# self written functions:
from FociDetectImgAnalysis import get_nuclei, GoToFiles
from GetFociData import get_labeled_data
from GetBestParams import image_pipeline
###############################################################################
# Start analysis of dapi images
# go to nucleus folder:
os.chdir(im_path_dapi)
print("Analyzing nucleus images ...")
# go to the directory containing the foci images:
#im_path = os.getcwd()+"\Sample Images\foci"
#os.chdir(im_path)
# start reading in image files:
stats = []
# get file names
save_path = "Single Nuclei"
files = GoToFiles(im_path_dapi,save_path)
for file_num in range(len(files)):
file_name, extension = os.path.splitext(files[file_num])
# print(file_name + " " + extension)
if extension in [".png",".tif",".jpg",".bmp"]:
# read image:
image = imread(files[file_num])
image = rescale(image, rescale_factor, order=1,preserve_range = True)
image = np.uint8(image)
#imshow(image)
# get region props of the blue channel:
if(len(image.shape)<3):
stats.append(get_nuclei(image[:,:],file_name))
else:
stats.append(get_nuclei(image[:,:,nuc_chan],file_name))
# Get x and y data for model training and the coordinate for each image:
# y_data is boolean with True were pixel was marked as foci and false otherwise
x_data, y_data, coords = get_labeled_data(im_path_foci,stats,im_path_foci_marked,filt_range,freq,sc_range,rescale_factor,foci_chan, mark_chan)
# When done with everything go back to the main folder:
os.chdir(main_file_path)
###############################################################################
# This part is for model training assuming "get_labeled_data" was ran successfully
# get trainingsdata:
x_train_transformed, y_train, idx, s1, s2, p, mnmx = image_pipeline(x_data,y_data,removed_im = [],var_max = var_max, sampling = sampling)
# Chose the model to train:
# neural network:
model = MLPClassifier(alpha=0.1, batch_size = 2000, learning_rate = "adaptive", learning_rate_init = 0.1, max_iter = 300, tol = 10**-4, early_stopping = True)
parameters = {'batch_size':[100,500,1000,2000,3000,4000,5000],'alpha':[10**-4,10**-3,10**-2,10**-1,1]}
# random forest:
# clf = RandomForestClassifier(criterion = "entropy",min_weight_fraction_leaf = 0.005, n_estimators = 15,max_depth = 50, min_samples_leaf = 10,min_samples_split = 100, n_jobs = -1)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=5)
# parameters = {'base_estimator__min_weight_fraction_leaf':[0.0001,0.001,0.005],'base_estimator__n_estimators':[5,10,15,20],'base_estimator__min_samples_leaf':[10,20,100]}
# complement naive bayes:
# clf = ComplementNB(alpha = 0.0, norm = True)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=15)
# parameters = {'base_estimator__alpha': [0,0.01,0.02,0.03,0.04,0.05,0.06], 'base_estimator__norm': [True, False]}
# support vector machine:
# linear svm
# clf = LinearSVC(penalty = "l2", loss = "hinge", C = 2, class_weight = "balanced", max_iter = 5000)
# model = AdaBoostClassifier(base_estimator = clf, n_estimators=5,algorithm='SAMME')
# parameters = {"base_estimator__C": [0.1,0.3,0.6,1,2,3]}
print("Performing grid search ...")
# get best model parameters:
clf = GridSearchCV(model, parameters, cv = 3)
clf.fit(x_train_transformed, y_train)
###############################################################################
# train models based on on all but one of the images and test on the remaining
# one. Do this for all combinations of images.
# Save images and some resulting statistics.
# save path:
save_path = im_path_foci + "\Results Model Validation"
# set model:
# neural network:
# model = MLPClassifier(alpha=0.1, batch_size = 2000, learning_rate = "adaptive", learning_rate_init = 0.1, max_iter = 300, tol = 10**-4, early_stopping = True)
model = clf.best_estimator_
im_stats = []
# create data sets leaving out one image:
print("Training model (leave one out) ...")
for im in range(len(x_data)):
print("Current Image:" + str(im+1))
removed_im = [im]
x_train_transformed, y_train, idx, s1, s2, p, mnmx = image_pipeline(x_data,y_data,removed_im,var_max = 0.95, sampling = 1)
# use some defined model and train it with the x-featues:
model.fit(x_train_transformed, y_train)
# create variables for test image:
x_vals_im = pd.DataFrame(x_data[removed_im[0]])
y_vals_im = pd.DataFrame(y_data[removed_im[0]])
# rescale data
x_image_transformed = s1.transform(x_vals_im)
# do PCA to reduce parameter number:
x_image_transformed = p.transform(x_image_transformed)
# cumulated sum of variance explained. take only data explaining 95% of
# variance
x_image_transformed = x_image_transformed[:,idx]
x_image_transformed = s2.transform(x_image_transformed)
x_image_transformed = mnmx.transform(x_image_transformed)
#predict labels:
y_test_pred = model.predict(x_image_transformed)
# prediction and confusion matrix
conf_mat = confusion_matrix(y_vals_im, y_test_pred)
v1 = model.score(x_image_transformed, y_vals_im)
v2 = precision_score(y_vals_im, y_test_pred) # sensitivity
v3 = recall_score(y_vals_im, y_test_pred)
im_stats.extend(["Image_"+str(removed_im[0]),conf_mat,v1,v2,v3])
# apply model to test data:
files = GoToFiles(im_path_foci,"\Results Model Validation")
image = imread(files[removed_im[0]])
temp_var = image.copy()
for i in range(len(y_test_pred)):
if y_test_pred[i] == True:
temp_var[coords[removed_im[0]][i][0],coords[removed_im[0]][i][1]] = 255
files = GoToFiles(save_path)
save_name = "Im_"+ str(removed_im[0]+1) + "_" + model_name + ".png"
imsave(save_name,temp_var)
# write the statistics data for each analyzed image:
import csv
files = GoToFiles(save_path)
with open("image_statistics.txt", 'w') as output:
wr = csv.writer(output,lineterminator='\n')
for val in im_stats:
wr.writerow([val])
###############################################################################
# train specific model on whole dataset and save fitted model.
# save path:
print("Training model (all images) ...")
save_path = im_path_foci + "\Trained Models" + "\\" + model_name
# set model:
# neural network:
# model = MLPClassifier(alpha=0.1, batch_size = 2000, learning_rate = "adaptive", learning_rate_init = 0.1, max_iter = 300, tol = 10**-4, early_stopping = True)
model = clf.best_estimator_
removed_im = []
x_train_transformed, y_train, idx, s1, s2, p, mnmx = image_pipeline(x_data,y_data,removed_im,var_max = 0.95, sampling = 1)
files = GoToFiles(im_path_foci,"\Trained Models" + "\\" + model_name)
GoToFiles(save_path)
# save scalings:
save_name = "PCA.p"
import pickle
with open(save_name, "wb") as fp: #Pickling
pickle.dump(p, fp)
save_name ="STD_Scaler1.p"
with open(save_name, "wb") as fp: #Pickling
pickle.dump(s1, fp)
save_name = "STD_Scaler2.p"
with open(save_name, "wb") as fp: #Pickling
pickle.dump(s2, fp)
save_name = "MinMax_Scaler.p"
with open(save_name, "wb") as fp: #Pickling
pickle.dump(mnmx, fp)
save_name = "idx.p"
with open(save_name, "wb") as fp: #Pickling
pickle.dump(idx, fp)
# use some defined model and train it with the x-featues:
model.fit(x_train_transformed, y_train)
files = GoToFiles(save_path)
# save trained model:
save_name = model_name +"_Trained"+ ".p"
with open(save_name, "wb") as fp: #Pickling
pickle.dump(model, fp)
| python |
import textwrap
import uuid
from moviepy.editor import *
import os
from moviepy.video.tools.drawing import *
os.chdir('../')
cwd = os.getcwd()
os.chdir('src')
TITLE_FONT_SIZE = 40
FONT_SIZE = 35
TITLE_FONT_COLOR = 'white'
BGM_PATH = rf'{cwd}\assets\bgm.mp3'
STATIC_PATH = rf'{cwd}\assets\static2.mp4'
SIZE = (1080, 1920)
BG_COLOR = (16,16,16)
VIDEO_PATH = rf'{cwd}\data\video'
FONT = 'Open-Sans-Semibold'
FONT_AUTHOR = 'Open-Sans-Bold'
BACKGROUND_IMAGE = rf'{cwd}\assets\background_image.jpg'
ARROW_IMAGE = rf'{cwd}\assets\Arrows.png'
def generate_title(text, audio_path):
background_clip = ImageClip(BACKGROUND_IMAGE)
audio_clip = AudioFileClip(audio_path)
font_size = TITLE_FONT_SIZE
wrapped_text = textwrap.fill(text, width=40)
txt_clip = TextClip(wrapped_text,fontsize=font_size, font=FONT_AUTHOR, color=TITLE_FONT_COLOR, align="west")
txt_clip = txt_clip.set_position((165, 213+680-txt_clip.size[1]/2))
clip = CompositeVideoClip([background_clip, txt_clip])
clip.audio = audio_clip
clip.duration = audio_clip.duration
static_clip = VideoFileClip(STATIC_PATH)
clip = concatenate_videoclips([clip, static_clip])
return clip
def generate_clip(post, comment):
text = comment.body
audio_path = comment.body_audio
background_clip = ImageClip(BACKGROUND_IMAGE)
audio_clip = AudioFileClip(audio_path)
font_size = FONT_SIZE
author_font_size = 32
wrapped_text = textwrap.fill(text, width=47)
txt_clip = TextClip(wrapped_text,fontsize=font_size, font=FONT, color=TITLE_FONT_COLOR, align="west", interline=2)
# txt_clip = txt_clip.set_position("center")
txt_clip_pos = (165, 213+680-txt_clip.size[1]/2)
txt_clip = txt_clip.set_position(txt_clip_pos)
author_clip = TextClip(f"{comment.author}", fontsize=author_font_size, font=FONT_AUTHOR, color="white")
author_pos = (txt_clip_pos[0], txt_clip_pos[1] - author_font_size - 42)
author_clip = author_clip.set_position(author_pos)
score_clip = TextClip(f"{comment.score} points", fontsize=author_font_size, font=FONT, color='#818384')
score_pos = (author_pos[0] + author_clip.size[0] + 30, author_pos[1])
score_clip = score_clip.set_position(score_pos)
arrow_clip = ImageClip(ARROW_IMAGE)
arrow_pos = (author_pos[0]-arrow_clip.size[0], author_pos[1]-10)
arrow_clip = arrow_clip.set_position(arrow_pos)
clip = CompositeVideoClip([background_clip, txt_clip, author_clip, score_clip, arrow_clip])
clip.audio = audio_clip
clip.duration = audio_clip.duration
static_clip = VideoFileClip(STATIC_PATH)
clip = concatenate_videoclips([clip, static_clip])
return clip
def generate_video(context):
post = context["post"]
clips = []
clips.append(generate_title(post.title, post.title_audio))
for comment in post.comments:
comment_clip = generate_clip(post, comment)
# overlay reply
if comment.reply:
# TODO this
pass
clips.append(comment_clip)
video = concatenate_videoclips(clips)
background_audio_clip = AudioFileClip(BGM_PATH)
background_audio_clip = afx.audio_loop(background_audio_clip, duration=video.duration)
background_audio_clip = background_audio_clip.fx(afx.volumex, 0.1)
video.audio = CompositeAudioClip([video.audio, background_audio_clip])
video_id = uuid.uuid4()
path = fr"{VIDEO_PATH}\{video_id}.mp4"
context["video_path"] = path
context["video_id"] = video_id
video.write_videofile(path, fps=24, codec='libx264',bitrate='6291456', threads=4)
| python |
from __future__ import absolute_import
from flask import request, abort
from flask.views import MethodView
from huskar_sdk_v2.consts import OVERALL
from more_itertools import first
from huskar_api import settings
from huskar_api.models import huskar_client
from huskar_api.models.auth import Authority
from huskar_api.models.const import ROUTE_DEFAULT_INTENT
from huskar_api.models.exceptions import OutOfSyncError, EmptyClusterError
from huskar_api.models.route import RouteManagement
from huskar_api.models.utils import retry
from huskar_api.service.admin.application_auth import (
check_application_auth, check_application)
from huskar_api.service.admin.exc import NoAuthError
from huskar_api.service.utils import check_cluster_name
from .utils import login_required, api_response, audit_log
class ServiceRouteView(MethodView):
@login_required
def get(self, application_name, cluster_name):
"""Gets the outgoing route of specific cluster.
Example of response::
{
"status": "SUCCESS",
"message": "",
"data": {
"route": [
{"application_name": "base.foo", "intent": "direct",
"cluster_name": "alta1-channel-stable-1"},
{"application_name": "base.bar", "intent": "direct",
"cluster_name": "alta1-channel-stable-1"},
{"application_name": "base.baz", "intent": "direct",
"cluster_name": null},
]
}
}
:param application_name: The name of source application.
:param cluster_name: The name of source cluster.
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: The result is in the response.
"""
check_application(application_name)
check_cluster_name(cluster_name, application_name)
facade = RouteManagement(huskar_client, application_name, cluster_name)
route = sorted({
'application_name': route[0], 'intent': route[1],
'cluster_name': route[2],
} for route in facade.list_route())
return api_response({'route': route})
@login_required
def put(self, application_name, cluster_name, destination):
"""Changes the outgoing route of specific cluster.
:param application_name: The name of source application.
:param cluster_name: The name of source cluster.
:param destination: The name of destination application.
:form intent: The intent of route. (``direct``)
:form cluster_name: The name of destination cluster.
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: Operation success.
"""
# forbidden request when cluster_name in FORCE_ROUTING_CLUSTERS
if cluster_name in settings.FORCE_ROUTING_CLUSTERS:
abort(403, 'Can not modify {}\'s value'.format(cluster_name))
self._check_auth(application_name, destination)
check_cluster_name(cluster_name, application_name)
self._put(application_name, cluster_name, destination)
return api_response()
@login_required
def delete(self, application_name, cluster_name, destination):
"""Discards the outgoing route of specific cluster.
:param application_name: The name of source application.
:param cluster_name: The name of source cluster.
:param destination: The name of destination application.
:form intent: The intent of route. (``direct``)
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: Operation success.
"""
self._check_auth(application_name, destination)
check_cluster_name(cluster_name, application_name)
self._delete(application_name, cluster_name, destination)
return api_response()
def _get_intent(self):
intent = request.form.get('intent', ROUTE_DEFAULT_INTENT)
if intent not in settings.ROUTE_INTENT_LIST:
intent_list = u', '.join(settings.ROUTE_INTENT_LIST)
abort(400, u'intent must be one of %s' % intent_list)
return intent
def _check_auth(self, application_name, dest_application_name):
try:
check_application_auth(dest_application_name, Authority.WRITE)
except NoAuthError:
check_application_auth(application_name, Authority.WRITE)
@retry(OutOfSyncError, interval=1, max_retry=3)
def _put(self, application_name, cluster_name, dest_application_name):
dest_cluster_name = request.form['cluster_name'].strip()
check_cluster_name(dest_cluster_name, dest_application_name)
intent = self._get_intent()
facade = RouteManagement(huskar_client, application_name, cluster_name)
try:
facade.set_route(dest_application_name, dest_cluster_name, intent)
except EmptyClusterError as e:
abort(400, unicode(e))
audit_log.emit(
audit_log.types.UPDATE_ROUTE, application_name=application_name,
cluster_name=cluster_name, intent=intent,
dest_application_name=dest_application_name,
dest_cluster_name=dest_cluster_name)
@retry(OutOfSyncError, interval=1, max_retry=3)
def _delete(self, application_name, cluster_name, dest_application_name):
intent = self._get_intent()
facade = RouteManagement(huskar_client, application_name, cluster_name)
dest_cluster_name = facade.discard_route(dest_application_name, intent)
audit_log.emit(
audit_log.types.DELETE_ROUTE, application_name=application_name,
cluster_name=cluster_name, intent=intent,
dest_application_name=dest_application_name,
dest_cluster_name=dest_cluster_name)
class ServiceDefaultRouteView(MethodView):
@login_required
def get(self, application_name):
"""Gets the default route policy of specific application.
Example of response::
{
"status": "SUCCESS",
"message": "",
"data": {
"default_route": {
"overall": {
"direct": "channel-stable-2"
},
"altb1": {
"direct": "channel-stable-1"
}
},
"global_default_route": {
"direct": "channel-stable-2"
}
}
}
:param application_name: The name of specific application.
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: The result is in the response.
"""
check_application(application_name)
facade = RouteManagement(huskar_client, application_name, None)
default_route = facade.get_default_route()
return api_response({
'default_route': default_route,
'global_default_route': settings.ROUTE_DEFAULT_POLICY})
@login_required
@retry(OutOfSyncError, interval=1, max_retry=3)
def put(self, application_name):
"""Creates or updates a default route policy of specific application.
:param application_name: The name of specific application.
:form ezone: Optional. The ezone of default route. Default: ``overall``
:form intent: Optional. The intent of default route. Default:
``direct``
:form cluster_name: The name of destination cluster. The cluster name
must not be ezone prefixed.
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: Operation success.
"""
check_application_auth(application_name, Authority.WRITE)
ezone = request.form.get('ezone') or OVERALL
intent = request.form.get('intent') or ROUTE_DEFAULT_INTENT
cluster_name = request.form['cluster_name']
check_cluster_name(cluster_name, application_name)
facade = RouteManagement(huskar_client, application_name, None)
try:
default_route = facade.set_default_route(
ezone, intent, cluster_name)
except ValueError as e:
# TODO: Use a better validator instead
return api_response(
status='InvalidArgument', message=first(e.args, '')), 400
audit_log.emit(
audit_log.types.UPDATE_DEFAULT_ROUTE,
application_name=application_name, ezone=ezone, intent=intent,
cluster_name=cluster_name)
return api_response({
'default_route': default_route,
'global_default_route': settings.ROUTE_DEFAULT_POLICY})
@login_required
def delete(self, application_name):
"""Discards a default route policy of specific application.
:param application_name: The name of specific application.
:form ezone: Optional. The ezone of default route. Default: ``overall``
:form intent: Optional. The intent of default route. Default:
``direct``
:<header Authorization: Huskar Token (See :ref:`token`)
:status 200: Operation success.
"""
check_application_auth(application_name, Authority.WRITE)
ezone = request.form.get('ezone') or OVERALL
intent = request.form.get('intent') or ROUTE_DEFAULT_INTENT
facade = RouteManagement(huskar_client, application_name, None)
try:
default_route = facade.discard_default_route(ezone, intent)
except ValueError as e:
# TODO: Use a better validator instead
return api_response(
status='InvalidArgument', message=first(e.args, '')), 400
audit_log.emit(
audit_log.types.DELETE_DEFAULT_ROUTE,
application_name=application_name, ezone=ezone, intent=intent)
return api_response({
'default_route': default_route,
'global_default_route': settings.ROUTE_DEFAULT_POLICY})
| python |
import os
import numpy as np
import glob
import fire
import pandas as pd
from PIL import Image
import platform
# fix for windows
if platform.system() == 'Windows':
print('INFO: Path to openslide ddl is manually added to the path.')
openslide_path = r'C:\Users\ls19k424\Documents\openslide-win64-20171122\bin'
os.environ['PATH'] = openslide_path + ";" + os.environ['PATH']
import openslide
from wsi_to_png import PngExtractor
class TMAPngExtractor(PngExtractor):
"""
This Object extracts (patches of) an mrxs file to a png format.
:param file_path: string
path to the mrxs file with the TMA spots
:param output_path: string
path to the output folder. The output format is the same name as the mrxs file,
with an appendix if multiple patches are extracted.
:param coord_csv: string
Path to the csv file. Expects this set of headers: "Centroid X (pixels)", "Centroid Y (pixels)", "Radius (pixels)" (in pixel values)
:param level: int (optional)
Level of the mrxs file that should be used for the conversion (default is 0).
:param overwrite: overides exisiting output
:param adjust_coord: default True. Adjusts the QuPath coordinates for the missing white border. (not necessary for ASAP extracted coordinates)
"""
def __init__(self, file_path: str, output_path: str, coord_csv: str, level: int = 0, overwrite: bool = False, adjust_coord: bool = True):
# initiate properties from parent class
super().__init__(file_path=file_path, output_path=output_path, level=level, overwrite=overwrite)
# instantiate class parameters
self.adjust_coord = adjust_coord
self.coord_csv = coord_csv
# overwrite
@property
def coord_files(self):
if self.coord_csv:
return glob.glob(os.path.join(self.coord_csv, f'*{self.staining}.xml')) if os.path.isdir(
self.coord_csv) else [self.coord_csv]
else:
return None
# overwrite
@property
def files_to_process(self):
# we only have one file to process
if len(self.wsi_files) == 1:
filename = os.path.splitext(os.path.basename(self.file_path))[0]
output_file_name = os.path.join(self.output_path,
f'{filename}-level{self.level}-TMAid')
# skip existing files, if overwrite = False
if not self.overwrite and os.path.isfile(f'{output_file_name}.png'):
print(f'File {output_file_name} already exists. Output saving is skipped. To overwrite add --overwrite.')
else:
return output_file_name, self.file_path, self.coord_csv
def _crop_wsi(self, wsi):
# This function crops the white space around the WSI away, so that it fits together with the
# coordinates extracted from QuPath. This is not necessary if the coordinates come from ASAP
#incorrect_WSI = wsi.read_region((0, 0), self.level, wsi.level_dimensions[self.level])
x, y = wsi.properties[openslide.PROPERTY_NAME_BOUNDS_X], wsi.properties[openslide.PROPERTY_NAME_BOUNDS_Y]
dim = (int(int(x)), int(int(y)))
w, h = wsi.properties[openslide.PROPERTY_NAME_BOUNDS_WIDTH], wsi.properties[
openslide.PROPERTY_NAME_BOUNDS_HEIGHT]
wh = (int(int(w) / 2 ** self.level), int(int(h) / 2 ** self.level))
return wsi.read_region(location=dim, level=self.level, size=wh)
# overwrite
def process_files(self):
# process the files with coordinates
if os.path.isfile(self.file_path) and os.path.isfile(self.coord_csv):
output_file_path_prefix, mrxs_path, coord_path = self.files_to_process
assert os.path.isfile(mrxs_path)
wsi_img = openslide.open_slide(mrxs_path)
if self.adjust_coord:
x, y = wsi_img.properties[openslide.PROPERTY_NAME_BOUNDS_X], wsi_img.properties[openslide.PROPERTY_NAME_BOUNDS_Y]
coords = self.parse_csv(coord_path, adjust_x=int(x), adjust_y=int(y))
else:
coords = self.parse_csv(coord_path)
# iterate over the patch-coordinates(s)
for tma_id, coord in coords:
output_file_path = f'{output_file_path_prefix}{tma_id}.png'
# skip existing files, if overwrite = False
if not self.overwrite and os.path.isfile(output_file_path):
print(f'File {output_file_path} already exists. Output saving is skipped. To overwrite add --overwrite.')
else:
# extract the patch
# coord = [[12578.9619, 43432.1758], [15987.166, 43432.1758], [15987.166, 46571.3086], [12578.9619, 46571.3086]]
png = self.extract_crop(wsi_img, coord)
# save the image
print(f'Saving image {output_file_path}')
Image.fromarray(png[:, :, :3]).save(output_file_path)
else:
# Something went wrong
print('mrxs and/or csv file paths are invalid.')
def parse_csv(self, coord_csv, adjust_x=0, adjust_y=0):
# reads the csv file and retrieves the coordinates and the TMA spot index
# coordinates have to be returned as [tl, tr, br, bl] ((0,0) is top-left)
csv = pd.read_csv(coord_csv, sep=';')
inds_coords = [self._get_inds_coords(row, adjust_x, adjust_y) for index, row in csv.iterrows()]
return [i for i in inds_coords if i] # remove None entries
def _get_inds_coords(self, csv_row, adjust_x=0, adjust_y=0):
# coordinates have to be returned as [[top-left], [top-right], [bottom-right], [bottom-left]] ((0,0) is top-left)
# only get coordinates if there is an id
if not np.isnan(csv_row['Core Unique ID']):
c_x, c_y = csv_row['Centroid X (pixels)'], csv_row['Centroid Y (pixels)']
# adjust coordinates, if we work with QuPath-extracted coordinates
radius = csv_row['Radius (pixels)']
coords = [[c_x - radius + adjust_x, c_y - radius + adjust_y], [c_x + radius + adjust_x, c_y - radius + adjust_y],
[c_x + radius + adjust_x, c_y + radius + adjust_y], [c_x - radius + adjust_x, c_y + radius + adjust_y]]
id = int(csv_row['Core Unique ID'])
return id, coords
def extract_tma(file_path: str, coord_csv: str, output_path: str, level: int = 0, overwrite: bool = False, adjust_coord: bool = True):
png_extractor = TMAPngExtractor(file_path=file_path, coord_csv=coord_csv, output_path=output_path, level=level,
overwrite=overwrite, adjust_coord=adjust_coord)
# process the files
png_extractor.process_files()
if __name__ == '__main__':
fire.Fire(extract_tma)
| python |
#client.py
#!/usr/bin/python # This is client.py file
import socket # Import socket module
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 3333 # Reserve a port for your service.
msg = ''
s.connect((host, port))
while msg != 'SAIR':
print('Digite Mensagem:')
msg = input()
print('Mensagem enviada.')
s.send(msg.encode()) #Preciso transformar a string em array de bytes
print('Esperando resposta.')
resposta = s.recv(1024)
print('Resposta Recebida:',resposta.decode())
print('Desconectando')
s.close() | python |
"""
Calculations that deal with seismic moment tensors.
Notes from Lay and Wallace Chapter 8:
* Decomposition 1: Mij = isotropic + deviatoric
* Decomposition 2: Mij = isotropic + 3 vector dipoles
* Decomposition 3: Mij = isotropic + 3 double couples
* Decomposition 4: Mij = isotropic + 3 CLVDs
* Decomposition 5: Mij = isotropic + major DC + minor DC
* Decomposition 6: Mij = isotropic + DC + CLVD
The most useful in practice are Decomposition 1 and Decomposition 6.
"""
import numpy as np
def get_MT(mrr, mtt, mpp, mrt, mrp, mtp):
"""Build a matrix from the six components of the moment tensor"""
MT = np.array([[mrr, mrt, mrp], [mrt, mtt, mtp], [mrp, mtp, mpp]]);
return MT;
def diagonalize_MT(MT):
"""Return a diagonal matrix whose elements are the ordered eigenvalues."""
eigvals, eigvecs = np.linalg.eig(MT);
eigvals = sorted(eigvals)[::-1];
return np.diag(eigvals);
def get_deviatoric_MT(MT):
"""Get deviatoric MT (returns a matrix)"""
iso_MT = get_iso_MT(MT);
M_dev = np.subtract(MT, iso_MT);
return M_dev;
def get_iso_MT(MT):
"""Return the isotropic moment tensor (returns a matrix)"""
x = (1 / 3) * np.trace(MT);
iso_MT = np.multiply(np.eye(3), x);
return iso_MT
def get_clvd_dc_from_deviatoric_MT(MT):
"""
Return the dc and clvd components of a deviatoric MT, from Shearer Equation 9.14.
Returns two matricies.
"""
eigenvalues = np.diag(MT);
assert(eigenvalues[0] > eigenvalues[1] > eigenvalues[2]), ValueError("Deviatoric eigenvalues out of order.")
dc_component = (1/2)*(eigenvalues[0]-eigenvalues[2]);
clvd_component = eigenvalues[1]*(1/2);
M_dc = np.diag([dc_component, 0, -dc_component]);
M_clvd = np.diag([-clvd_component, 2*clvd_component, -clvd_component]);
return M_clvd, M_dc;
def decompose_iso_dc_clvd(MT):
"""
A useful function to decompose a full moment tensor into an isotropic part, a double-couple, and a CLVD component.
Returns three matrices.
"""
diag_MT = diagonalize_MT(MT); # equivalent to a coordinate transformation
M_iso = get_iso_MT(diag_MT); # get the trace
M_dev = get_deviatoric_MT(diag_MT);
M_dev = diagonalize_MT(M_dev); # diagonalized in the proper order
M_clvd, M_dc = get_clvd_dc_from_deviatoric_MT(M_dev);
return M_iso, M_clvd, M_dc;
# def get_separate_scalar_moments(MT):
# """return isotropic, clvd, and double couple moments. Not frequently used."""
# M_iso, M_clvd, M_dc = decompose_iso_dc_clvd(MT);
# iso_moment = abs(M_iso[0][0]);
# clvd_moment = abs(M_clvd[0][0]);
# dc_moment = abs(M_dc[0][0]);
# return iso_moment, clvd_moment, dc_moment;
def get_total_scalar_moment(MT):
"""Shearer Equation 9.8: quadratic sum of element of moment tensor components, in newton-meters"""
MT = np.divide(MT, 1e16); # done to prevent computer buffer overflow
total = 0;
for i in range(3):
for j in range(3):
total = total + MT[i][j]*MT[i][j];
Mo = (1/np.sqrt(2)) * np.sqrt(total);
Mo = np.multiply(Mo, 1e16);
return Mo;
def get_percent_double_couple(MT):
"""Get the percent double couple and percent clvd moment from a deviatoric moment tensor.
When isotropic term is involved, this can get more complicated and there are several approaches.
See Shearer equation 9.17 for epsilon.
See Vavrycuk, 2001 for other approaches when isotropic component is involved. """
m_dev = diagonalize_MT(get_deviatoric_MT(MT));
epsilon = np.diag(m_dev)[1] / np.max([np.abs(np.diag(m_dev)[0]), np.abs(np.diag(m_dev)[2])]);
fraction = epsilon * 2;
perc_clvd = 100 * (abs(fraction));
perc_dc = 100 - perc_clvd;
return perc_dc, perc_clvd;
| python |
#!/usr/bin/python
import sys,os
FUNC_NAME_LEN_MAX = 48
def ext_time_sec(l):
l = l.strip()
tks = l.split(" ")
try:
return float(tks[-1][:-1])
except:
print 'Invalid line to extract time duration: ' + l;
return None
def print_array_info(arr,detail=False):
arr.sort(reverse=True)
n = len(arr)
s = sum(arr)
avg = float(s)/float(n)
print 'sum: %-*f Len: %-*d Avg: %f' % (12,s,6,n,avg)
if detail:
tenths = [arr[i] for i in range(0,n,n/10)]
ratios = [sum([arr[j] for j in range(i,min(i+n/10,n))])/s for i in range(0,n,n/10)]
head = [arr[i] for i in range(min(10,n))]
tail = [arr[i] for i in range(max(-10,0-n),0)]
print 'Tenths: ' + str(tenths)
print 'Ratios: ' + str(ratios)
print ' Head: ' + str(head)
print ' Tail: ' + str(tail)
return [n,s,avg]
#This simulates the bottom-up summary based program analysis and estimates and amount of time we can save, compared to top-down approach.
#NOTE that the time saving is just a upper bound, as even we use function summary, at each callsite we still need to apply that summary,
#which also costs some time. (i.e. in this function we assume no extra costs for summary application).
stk = []
visited_funcs = {}
def calc_bottom_up_time(name,lvl,t):
global stk,visited_funcs
n = {'name':name,'level':lvl,'time':t,'reduction':0.0}
i = len(stk) - 1
s = 0.0
while i >= 0 and stk[i]['level'] > lvl:
s += stk[i]['reduction']
i -= 1
#If we have already visited current func, we have its summary and save the time to analyze it again.
if visited_funcs.has_key(name):
s = t
else:
visited_funcs[name] = 1
n['reduction'] = s
#push to stack and do reduction.
stk = stk[:i+1]
stk.append(n)
def time_analysis(tl):
global stk
t_inst = {
'visitLoadInst' : [],
'visitStoreInst' : [],
'visitGetElementPtrInst' : [],
}
ft = {}
with open(tl,'r') as f:
for l in f:
if not l.startswith('[TIMING]'):
continue
if l.find('All main anlysis done') >= 0:
break
if l.find('End func') >= 0:
#Statistics about the function analysis time.
#E.g.
#[TIMING] End func(5) svm_has_high_real_mode_segbase in: 121127
#Get the func name and its call depth
tks = l.split(' ')
ls = l[l.find('(')+1 : l.find(')')]
if not ls.isdigit() or len(tks) < 6:
print 'Invalid line to time a function execution: ' + l
continue
level = int(ls)
nm = tks[3]
t = ext_time_sec(l)
ft.setdefault(level,{}).setdefault(nm,[])
ft[level][nm] += [t]
calc_bottom_up_time(nm,level,t)
else:
#Statistics about the inst analysis time.
for ki in t_inst:
if l.find(ki) >= 0:
t = ext_time_sec(l)
if t:
t_inst[ki] += [t]
#Ok, it's the time to show the statistics.
#For insts:
print '=============INSTS============='
for ki in t_inst:
print '===== %-*s:' % (20,ki),
print_array_info(t_inst[ki],True)
#For funcs:
f_cnt = {}
print '=============FUNCS============='
for lvl in sorted(ft.keys()):
print '======== LEVEL: ' + str(lvl)
names = sorted(ft[lvl].keys(),key=lambda x:sum(ft[lvl][x]),reverse=True)
for nm in names:
print '%-*s' % (FUNC_NAME_LEN_MAX,nm),
[n,s,avg] = print_array_info(ft[lvl][nm])
[on,oavg] = f_cnt.setdefault(nm,[0,0.0])
f_cnt[nm] = [n+on,(float(n)*avg+float(on)*oavg)/float(n+on)]
print '=============DUPLICATED FUNCS============='
dcnt = 0
for nm in sorted(f_cnt.keys(),key = lambda x : f_cnt[x][0]*f_cnt[x][1]):
[n,avg] = f_cnt[nm]
if n > 1:
dcnt += 1
print '%-*s cnt: %-*d avg: %-*f total: %-*f' % (FUNC_NAME_LEN_MAX,nm,5,n,12,avg,12,n*avg)
print 'Ratio: %d/%d' % (dcnt,len(f_cnt))
print stk
orig = 0.0
save = 0.0
for n in stk:
orig += n['time']
save += n['reduction']
print 'Total: %f, Bottom-Up Saving: %f, After Saving: %f' % (orig,save,orig-save)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'Usage: ./time_analyzer.py log'
else:
time_analysis(sys.argv[1])
| python |
from stockfish import Stockfish
class StockfishPlayer:
def __init__(self, stockfishpath):
conf = {
"Write Debug Log": "false",
"Contempt": 0,
"Min Split Depth": 0,
"Threads": 1,
"Ponder": "false",
"Hash": 16,
"MultiPV": 1,
"Skill Level": 20,
"Move Overhead": 30,
"Minimum Thinking Time": 20,
"Slow Mover": 80,
"UCI_Chess960": "false",
}
self.stockfish = Stockfish(stockfishpath, parameters=conf)
def get_best_move(self, fen):
self.stockfish.set_fen_position(fen)
return self.stockfish.get_best_move()
| python |
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2017 Dan Persons ([email protected])
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from os import popen
import threading
from argparse import ArgumentParser
__version__ = '0.1'
def parseargs():
"""set config options"""
parser = ArgumentParser()
parser.add_argument('--version', action='version',
version='%(prog)s ' + str(__version__))
parser.add_argument('-n', action='store',
default='100', dest='tries',
help=('set the number of passwords to check'))
parser.add_argument('--file', action='store',
default='rand-wordlist.txt',
help=('set the wordlist file'))
parser.add_argument('--hydra-args', action='store',
default='', dest='hargs',
help=('pass additional arguments to hydra'))
parser.add_argument('--generate', action='store_true',
help=('generate a random wordlist to use'))
parser.add_argument('--user', action='store',
default='root',
help=('set the username to use (default: root)'))
parser.add_argument('--service', action='store',
default='ssh',
help=('set the service to target (default: ssh)'))
parser.add_argument('hosts', nargs='*',
metavar='HOST',
help=('set the target host'))
args = parser.parse_args()
return args
def wordlistgen(outfile):
"""Generate a 10 word random wordlist"""
# Use completely random characters to minimize chance of a success
popen('pwgen -sy1 10 10 > ' + outfile)
def wordlistsim(wordlist, host, user, service, runs, hargs):
"""Simulate a brute force attack by repeating a 10 word list"""
for n in range(int(runs)):
print('Starting run ' + str(n + 1) + '/' + str(runs) + \
' on host ' + host + '.')
o = popen('hydra -t 4 -l ' + user + ' ' + hargs + ' -P ' + \
wordlist + ' ' + host + ' ' + service).read()
def wordlistsimstart(wordlist, hosts, username, servicename, tries, hargs):
"""Start brute force attack simulations (one thread per host)"""
# Set the number of runs
with open(wordlist, 'r') as f:
lines = f.readlines()
nruns = int(tries) // len(lines)
del(lines)
print('Target hosts: ' + str(hosts) + '\n' + \
'User: ' + username + '\n' + \
'Service: ' + servicename + '\n' + \
'Max # of tries: ' + tries + '\n' + \
'Additional hydra args: ' + hargs)
# Start one thread per host and work in parallel
for host in hosts:
thread = threading.Thread(name=host,
target=wordlistsim,
args=(wordlist, host, username,
servicename, nruns,
hargs))
thread.start()
def runsim():
"""Run wordlist simulator program"""
args = parseargs()
if args.generate:
wordlistgen(args.file)
else:
wordlistsimstart(args.file, args.hosts, args.user, args.service,
args.tries, args.hargs)
def main():
runsim()
if __name__ == "__main__":
runsim()
| python |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, Dict
from ax.core.base import Base
if TYPE_CHECKING: # pragma: no cover
# import as module to make sphinx-autodoc-typehints happy
from ax import core # noqa F401
class Runner(Base, ABC):
"""Abstract base class for custom runner classes"""
@abstractmethod
def run(self, trial: "core.base_trial.BaseTrial") -> Dict[str, Any]:
"""Deploys a trial based on custom runner subclass implementation.
Args:
trial: The trial to deploy.
Returns:
Dict of run metadata from the deployment process.
"""
pass # pragma: no cover
def stop(self, trial: "core.base_trial.BaseTrial") -> None:
"""Stop a trial based on custom runner subclass implementation.
Optional to implement
Args:
trial: The trial to deploy.
"""
pass
@property
def staging_required(self) -> bool:
"""Whether the trial goes to staged or running state once deployed."""
return False
| python |
"""
AUTHOR - Atharva Deshpande
GITHUB - https://github.com/AtharvaD11
QUESTION LINK - https://www.codechef.com/LRNDSA01/problems/FCTRL
"""
"""
ALGRITHM - Factorials of all n>= 5 contains trailing zeroes. So, we keep on dividing n till n becomes < 5 and simultaneously keep on summing the quotient. The total sum of all
the quotients is our number of zeroes.
"""
*****************************************
t = int(input())
for _ in range(t):
n = int(input())
zeroes = 0
quo = n
while True:
quo = quo//5
zeroes += quo
if quo < 5:
break
print(zeroes)
*******************************************
| python |
from mlflow.pyfunc import PyFuncModel
from mlserver.types import InferenceRequest
from mlserver_mlflow import MLflowRuntime
from mlserver_mlflow.encoding import DefaultOutputName
def test_load(runtime: MLflowRuntime):
assert runtime.ready
assert type(runtime._model) == PyFuncModel
async def test_predict(runtime: MLflowRuntime, inference_request: InferenceRequest):
response = await runtime.predict(inference_request)
outputs = response.outputs
assert len(outputs) == 1
assert outputs[0].name == DefaultOutputName
| python |
#!/usr/bin/env python
from __future__ import print_function
import math
def intercept(pt, dt, pb, n):
return math.pow((pb * float(dt)**(n+1.0))/float(pt), 1.0/(n+1.0))
def main():
for i in range(0, 4):
print(intercept(45, 30, 30, i))
if __name__ == '__main__':
main()
| python |
from django.apps import AppConfig
class EmailAppConfig(AppConfig):
name = 'app.emails'
label = 'email_app'
verbose_name = 'Emails App'
default_app_config = 'app.emails.EmailAppConfig'
| python |
from django import template
# These are custom template filters.
register = template.Library()
@register.filter(name='author_url_converter')
def author_url_converter(value):
# We take in the value, in this case the url of our author, and then return the one that takes them to the front-facing profile page.
return value.split('api/')[0] + value.split('api/')[-1]
| python |
import socket
import re
from threading import Thread
from threading import Lock
import os
from mimetypes import MimeTypes
import Queue
class ClientHandler:
def __init__(self, clientSocket, clientAddress):
print "New thread."
self.mSocket = clientSocket
self.mSocket.settimeout(15)
self.mAddress = clientAddress
self.mRequestQueue = Queue.Queue()
self.mStop = False
self.mStopLock = Lock()
self.mThread = Thread(target=self.doInBackground)
self.mRequestRe = re.compile("^(.|\r\n)*?(GET (.)* HTTP\\/1\\.(1|0)\r\n(.|\r\n)+?\r\n\r\n)")
self.mGetCounter = 0
self.mShouldStop = False
def runLoop(self):
self.mBuffer = ''
try:
while not self.mShouldStop:
self.mSocket.settimeout(30)
data = self.mSocket.recv(1024)
if not data:
break
self.mBuffer = self.mBuffer + data
#Check if the buffer contains requests
self.parseReceive()
#Send requested files if we have them
self.sendFile()
print "Thread handled: " + str(self.mGetCounter) + " gets."
except socket.timeout:
if len(self.mBuffer.strip()) != 0:
self.sendBadRequest()
else:
self.sendTimeout()
except socket.error:
try:
self.mSocket.close()
#Dont know how to do a "NOP" in python
except socket.error:
x = 10
def parseReceive(self):
while True:
#Check if the regex matches
matches = self.mRequestRe.match(self.mBuffer)
if not matches:
break
#This one is the part of the string we need
match = matches.groups()[1]
if not match:
break
else:
#Make a request based on string
request = HttpRequest(match)
if not request.mInvalid:
self.mGetCounter = self.mGetCounter + 1
#Get path and make it safe
path = str(request.mPath)
if path.startswith("/"):
path = "." + path
#Respond with index if they enter a directory
if path.endswith("/"):
path = path + "index.html"
request.mPath = path
self.mRequestQueue.put(request)
#Remove this match from buffer
self.mBuffer = self.mBuffer[len(match):]
def sendFile(self):
#If we don't need to send files, return
if self.mRequestQueue.empty():
return
#Get some values
request = self.mRequestQueue.get()
filepath = request.getURL()
connection = "close"
if request.containsKey("Connection"):
connection = request.getParam("Connection")
#Check if file exists
if not os.path.isfile(filepath):
print filepath
self.sendNotFound()
if connection.lower() == "close":
self.mShouldStop = True
return
try:
#Open file
file = open(filepath, "rb")
size = os.path.getsize(filepath)
#Get mime type
mime = MimeTypes()
mime_type = mime.guess_type(filepath)
if file.closed:
self.sendNotFound()
if connection.lower() == "close":
self.mShouldStop = True
return
#Send header
self.mSocket.send("HTTP/1.1 200 OK\r\nContent-Length: " + str(size) + "\r\nContent-Type: " + str(mime_type[0]) + "\r\n" + "Connection: " + connection + "\r\n\r\n")
#Init
bufferData = file.read(5024)
self.mSocket.send(bufferData)
#Keep sending as long as we have data
while bufferData:
bufferData = file.read(5024)
if not bufferData:
break
self.mSocket.send(bufferData)
#Do a non blocking read to support more than one get request per socket
self.mSocket.setblocking(0)
try:
data = self.mSocket.recv(1024)
#If we have data, try to parse requests
if data:
self.mBuffer = self.mBuffer + data
self.parseReceive()
except:
data = False
#Enable blocking again
self.mSocket.setblocking(1)
file.close()
if connection.lower() == "close":
print "Shutting down connection because of connection: close"
self.mSocket.close()
self.mShouldStop = True
except:
self.sendBadRequest()
print "Exception in send."
#Work off the files recursive because we check for more requests in the send loop
self.sendFile()
def sendNotFound(self):
self.mSocket.send("HTTP/1.1 404 Not Found\r\n\r\n<html><body>404 File not found.</body></html>")
def sendBadRequest(self):
try:
self.mSocket.send("HTTP/1.1 400 Bad Request\r\n\r\n<html><body>400 Bad Request.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 400 Bad request."
def sendTimeout(self):
try:
self.mSocket.send("HTTP/1.1 408 Request Timeout\r\n\r\n<html><body>408 Request Timeout.</body></html>")
self.mSocket.close()
self.mShouldStop = True
except:
print "Exception occurred when sending 408 Request Timeout."
def doInBackground(self):
stop = self.runLoop()
self.mSocket.close()
def getStop(self):
self.mStopLock.acquire()
retVal = self.mStop
self.mStopLock.release()
return retVal
def setStop(self, value):
self.mStopLock.acquire()
self.mStop = value
self.mStopLock.release()
def execute(self):
self.mThread.start()
def join(self):
self.mThread.join()
class HttpRequest:
def __init__(self, text):
self.mInvalid = True
self.mParams = dict()
self.mPath = False
self.parse(text)
def init(self, method, url, http1_1):
self.mHeaders = dict()
#If the url is empty, we need to add a / for root
if not url[2]:
self.setUrl("/")
else:
self.setUrl(url[2])
self.setHost(url[1])
self.setMethod(str(method).strip())
self.setHttp1_1(http1_1)
def setHeader(self, headerName, headerValue):
self.mHeaders[headerName] = headerValue
def clearHeaders(self):
self.mHeaders.clear()
def setHost(self, host):
self.setHeader("Host", host)
def setUrl(self, url):
self.mUrl = url
def setMethod(self, method):
self.mMethod = method
def setHttp1_1(self, http1_1):
if http1_1:
self.mVersion = "1.1"
else:
self.mVersion = "1.0"
def containsKey(self, key):
return (key in self.mParams)
def getParam(self, key):
return self.mParams[key]
def getHeaderQuery(self):
toRet = ""
for key, value in self.mHeaders.iteritems():
toRet = toRet + str(key) + ": " + str(value) + "\r\n"
#self.printResponse(oRet
return toRet
def getIdentifierQuery(self):
toRet = ""
toRet = toRet + str(self.mMethod) + " " + self.mUrl + " HTTP/" + self.mVersion + "\r\n"
#self.printResponse(oRet
return toRet
def getHttpRequest(self):
toRet = self.getIdentifierQuery()
toRet = toRet + self.getHeaderQuery()
toRet = toRet + "\r\n\r\n"
#self.printResponse(oRet
return toRet
def getURL(self):
return self.mPath
def parse(self, text):
self.mInvalid = False
#Split on \r\n
splits = re.split("\r\n", text)
httpFound = False
for split in splits:
#If we have not found the first line
if not httpFound:
#Check if the line matches the first line of an HTTP request
if re.match("GET .* HTTP\\/1\\.(1|0)", split):
httpFound = True
url = split[4:].strip()
versNo = url[len(url) - 9:]
url = url[0: len(url) - len(versNo)].strip()
versNo = versNo[6:].strip()
self.mPath = url
if versNo == "1.0":
self.mHttpVersion = 10
elif versNo == "1.1":
self.mHttpVersion = 11
elif versNo == "2.0":
self.mHttpVersion = 20
else:
continue
#If we have found the first line
else:
#We should be able to split on ":"
if re.match(".*:.*", split):
headerSplit = re.split(":", split)
left = headerSplit[0].strip()
right = ""
#There might be more than one ":", just concatenate
for i in range(1, len(headerSplit)):
if i == 1:
right = headerSplit[i]
else:
right = right + ":" + headerSplit[i]
right = right.strip()
self.mParams[left] = right
class HttpServer:
def __init__(self):
self.mInvalid = False
self.mPort = 8080
def open(self):
self.mSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.mSocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.mSocket.bind(("localhost", self.mPort))
self.mSocket.listen(1)
self.mStop = False
def close(self):
self.mSocket.close()
def runServer(self):
self.open()
counter = 0
while True:
clientSocket, clientAddress = self.mSocket.accept()
#Make new handler
clientHandler = ClientHandler(clientSocket, clientAddress)
#Start it
clientHandler.execute()
counter = counter + 1
print "Sockets opened: " + str(counter)
server = HttpServer()
server.runServer()
| python |
# load the train and test
# train algo
# save the metrices, params
import os
import warnings
import sys
import pandas as pd
import numpy as np
from sklearn.metrics import precision_recall_fscore_support, accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from get_data import read_params
import argparse
import joblib
import json
warnings.filterwarnings("ignore")
def eval_metrics(actual, pred):
scores_array = np.sqrt(precision_recall_fscore_support(actual, pred))
Accuracy = accuracy_score(actual, pred)
return scores_array[0], scores_array[1], Accuracy
def train_and_evaluate(config_path):
config = read_params(config_path)
test_data_path = config["split_data"]["test_path"]
train_data_path = config["split_data"]["train_path"]
random_state = config["base"]["random_state"]
model_dir = config["model_dir"]
n_estimators_rd = config["estimators"]["RandomForestClassifier"]["params"]["n_estimators"]
min_samples_split_rd = config["estimators"]["RandomForestClassifier"]["params"]["min_samples_split"]
min_samples_leaf_rd = config["estimators"]["RandomForestClassifier"]["params"]["min_samples_leaf"]
max_features_rd = config["estimators"]["RandomForestClassifier"]["params"]["max_features"]
max_depth_rd = config["estimators"]["RandomForestClassifier"]["params"]["max_depth"]
criterion_rd = config["estimators"]["RandomForestClassifier"]["params"]["criterion"]
target = [config["base"]["target_col"]]
train = pd.read_csv(train_data_path, sep=",")
test = pd.read_csv(test_data_path, sep=",")
train_y = train[target]
test_y = test[target]
selected_features = ["Contract", "OnlineSecurity", "TechSupport", "tenure", "MonthlyCharges", "SeniorCitizen", "Dependents"]
train_x = train[selected_features]
test_x = test[selected_features]
classifier = RandomForestClassifier(
n_estimators=n_estimators_rd,
min_samples_split=min_samples_split_rd,
min_samples_leaf=min_samples_leaf_rd,
max_features=max_features_rd,
max_depth=max_depth_rd,
criterion=criterion_rd,
random_state=random_state)
classifier.fit(train_x, train_y)
predicted_qualities = classifier.predict(test_x)
(precision, recall, accuracy) = eval_metrics(test_y, predicted_qualities)
print(" Precision: %s" % precision[0])
print(" Recall: %s" % recall[0])
print(" Model_Score: %s" % accuracy)
#####################################################
scores_file = config["reports"]["scores"]
params_file = config["reports"]["params"]
with open(scores_file, "w") as f:
scores = {
"Precision": precision[0],
"Recall": recall[0],
"Model_Score": accuracy
}
json.dump(scores, f, indent=4)
with open(params_file, "w") as f:
params = {
"n_estimators":n_estimators_rd,
"min_samples_split":min_samples_split_rd,
"min_samples_leaf":min_samples_leaf_rd,
"max_features":max_features_rd,
"max_depth":max_depth_rd,
"criterion":criterion_rd
}
json.dump(params, f, indent=4)
#####################################################
os.makedirs(model_dir, exist_ok=True)
model_path = os.path.join(model_dir, "model.joblib")
joblib.dump(classifier, model_path)
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
train_and_evaluate(config_path=parsed_args.config) | python |
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
from matplotlib import cm
import numpy as np
# 6 October 2021
# https://matplotlib.org/3.1.0/tutorials/colors/colormap-manipulation.html
# https://stackoverflow.com/questions/11647261/create-a-colormap-with-white-centered-around-zero
# https://matplotlib.org/stable/api/_as_gen/matplotlib.colors.TwoSlopeNorm.html#matplotlib.colors.TwoSlopeNorm
epic_orbl = LinearSegmentedColormap.from_list(name='EpicOrangeBlue',
# RGBA (red, green, blue, alpha)
colors =[(1, 0.549019608, 0, 1), # darkorange
(1, 1., 1, 1),
(0.325490196, 0.2, 0.929411765, 1)], # royalblue
N=255
)
tol_precip_colors = [
"#90C987",
"#4EB256",
"#7BAFDE",
"#6195CF",
"#F7CB45",
"#EE8026",
"#DC050C",
"#A5170E",
"#72190E",
"#882E72",
"#000000"
]
precip_colormap = ListedColormap(tol_precip_colors)
# own colormaps
epic_cmaps = {
'epic_orbl' : epic_orbl,
'epic_precip' : precip_colormap
}
| python |
from flask import Blueprint, jsonify
from flask_user import current_user, login_required, roles_accepted
from flask import Blueprint, redirect, render_template
from app.models.pizza_models import Restaurant, PromoCode
# When using a Flask app factory we must use a blueprint to avoid needing 'app' for '@app.route'
restaurants_blueprint = Blueprint('restaurants', __name__, template_folder='templates')
@restaurants_blueprint.route('/')
@login_required
def index():
restaurants = Restaurant.query.all()
return render_template('pages/restaurants/index.html',restaurants=restaurants);
@restaurants_blueprint.route('/<slug>')
@login_required
def view(slug):
restaurant = Restaurant.query.filter_by(slug=slug).first_or_404()
promo_count = PromoCode.query.filter_by(restaurant_id=restaurant.id).count()
return render_template('pages/restaurants/view.html', restaurant=restaurant, promo_count=promo_count); | python |
'''
Created on 2015/11/08
@author: _
'''
from inspect import getargspec
ORIGINAL_ARGSPEC_ATTRIBUTE = "__originArgSpec__"
def getOriginalArgSpec(func):
'''
Returns a ArgSpec of the function
@param func: a target function
@return: ArgSpec instance of the function
'''
if not hasattr(func, ORIGINAL_ARGSPEC_ATTRIBUTE):
return getargspec(func)
else:
return func.__originArgSpec__
def inheritOriginalArgSpec(func, originFunc):
'''
Save a ArgSpec of the originalFunc to func
@param func: a target function
@param originalFunc: an original function
@return: func which is save ArgSpec of the original function to attribute ORIGINAL_ARGSPEC_ATTRIBUTE
'''
func.__originArgSpec__ = getOriginalArgSpec(originFunc)
return func
| python |
from rift.data.handler import get_handler
from rift import log
LOG = log.get_logger()
TENANT_COLLECTION = "tenants"
class Tenant(object):
def __init__(self, tenant_id, name=None):
self.tenant_id = tenant_id
self.name = name
def as_dict(self):
return {
"tenant_id": self.tenant_id,
"name": self.name,
}
@classmethod
def build_tenant_from_dict(cls, tenant_dict):
kwargs = {
'tenant_id': tenant_dict.get("tenant_id"),
'name': tenant_dict.get("name")
}
return Tenant(**kwargs)
@classmethod
def save_tenant(cls, tenant):
db_handler = get_handler()
db_handler.insert_document(
object_name=TENANT_COLLECTION, document=tenant.as_dict()
)
@classmethod
def get_tenant(cls, tenant_id):
db_handler = get_handler()
tenant_dict = db_handler.get_document(
object_name=TENANT_COLLECTION,
query_filter={"tenant_id": tenant_id})
# Create Tenant if it doesn't exist
if not tenant_dict:
LOG.info('Tenant {0} not found. Creating...'.format(tenant_id))
tenant = cls(tenant_id)
cls.save_tenant(tenant)
else:
tenant = Tenant.build_tenant_from_dict(tenant_dict)
return tenant
@classmethod
def update_tenant(cls, tenant):
db_handler = get_handler()
db_handler.update_document(
object_name=TENANT_COLLECTION,
document=tenant.as_dict(),
query_filter={"tenant_id": tenant.tenant_id}
)
| python |
DB_NAME = "server.db"
ANALYTICS_NAME_JSON = "analytics.json"
ANALYTICS_NAME_PNG = "analytics.png"
| python |
from typing import List
class RemoveCoveredIntervals:
def get_intervals(self, intervals: List[List[int]]) -> int:
intervals.sort(key=lambda x: (x[0], -x[1]))
resultIntervals = [intervals[0]]
for current_left, current_right in intervals[1:]:
previous_left, previous_right = resultIntervals[-1]
if previous_left <= current_left and previous_right >= current_right:
continue
resultIntervals.append([current_left, current_right])
return len(resultIntervals)
| python |
# coding: utf-8
'''
Created on Oct 20, 2014
@author: tmahrt
To be used in conjunction with get_pitch_and_intensity.praat.
For brevity, 'pitch_and_intensity' is referred to as 'PI'
'''
import os
from os.path import join
import math
import io
from praatio import dataio
from praatio import tgio
from praatio.utilities import utils
from praatio.utilities import myMath
from praatio import praatio_scripts
class OverwriteException(Exception):
def __str__(self):
return ("Performing this operation will result in the pitch files "
"being overwritten. Please change the output directory "
"to an alternative location or add a suffix to the output. ")
def _extractPIPiecewise(inputFN, outputFN, praatEXE,
minPitch, maxPitch, tgFN, tierName,
tmpOutputPath, sampleStep=0.01, silenceThreshold=0.03,
forceRegenerate=True, undefinedValue=None,
medianFilterWindowSize=0, pitchQuadInterp=False):
'''
Extracts pitch and int from each labeled interval in a textgrid
This has the benefit of being faster than using _extractPIFile if only
labeled regions need to have their pitch values sampled, particularly
for longer files.
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
'''
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
windowSize = medianFilterWindowSize
assert(os.path.exists(inputFN))
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
utils.makeDir(tmpOutputPath)
splitAudioList = praatio_scripts.splitAudioOnTier(inputFN,
tgFN,
tierName,
tmpOutputPath,
False)
allPIList = []
for start, _, fn in splitAudioList:
tmpTrackName = os.path.splitext(fn)[0] + ".txt"
piList = _extractPIFile(join(tmpOutputPath, fn),
join(tmpOutputPath, tmpTrackName),
praatEXE, minPitch, maxPitch,
sampleStep, silenceThreshold,
forceRegenerate=True,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp)
piList = [("%0.3f" % (float(time) + start), str(pV), str(iV))
for time, pV, iV in piList]
allPIList.extend(piList)
allPIList = [",".join(row) for row in allPIList]
with open(outputFN, "w") as fd:
fd.write("\n".join(allPIList) + "\n")
piList = loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
return piList
def _extractPIFile(inputFN, outputFN, praatEXE,
minPitch, maxPitch, sampleStep=0.01, silenceThreshold=0.03,
forceRegenerate=True, tgFN=None, tierName=None,
undefinedValue=None, medianFilterWindowSize=0,
pitchQuadInterp=False):
'''
Extracts pitch and intensity values from an audio file
Returns the result as a list. Will load the serialized result
if this has already been called on the appropriate files before
'''
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
assert(os.path.exists(inputFN))
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
if tgFN is None or tierName is None:
argList = [inputFN, outputFN, sampleStep,
minPitch, maxPitch, silenceThreshold, -1, -1,
medianFilterWindowSize, doInterpolation]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
else:
argList = [inputFN, outputFN, tgFN, tierName, sampleStep,
minPitch, maxPitch, silenceThreshold,
medianFilterWindowSize, doInterpolation]
scriptName = "get_pitch_and_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
piList = loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
return piList
def extractIntensity(inputFN, outputFN, praatEXE,
minPitch, sampleStep=0.01, forceRegenerate=True,
undefinedValue=None):
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
assert(os.path.exists(inputFN))
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
# The praat script uses append mode, so we need to clear any prior
# result
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [inputFN, outputFN, sampleStep,
minPitch, -1, -1]
scriptName = "get_intensity.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
iList = loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
return iList
def extractPitchTier(wavFN, outputFN, praatEXE,
minPitch, maxPitch, sampleStep=0.01,
silenceThreshold=0.03, forceRegenerate=True,
medianFilterWindowSize=0,
pitchQuadInterp=False):
'''
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
pitchQuadInterp - if True, quadratically interpolate pitch
'''
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
assert(os.path.exists(wavFN))
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [wavFN, outputFN, sampleStep,
minPitch, maxPitch, silenceThreshold,
medianFilterWindowSize, doInterpolation]
scriptName = "get_pitchtier.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
pitchTier = dataio.open2DPointObject(outputFN)
return pitchTier
def extractPitch(wavFN, outputFN, praatEXE,
minPitch, maxPitch, sampleStep=0.01,
silenceThreshold=0.03, forceRegenerate=True,
undefinedValue=None, medianFilterWindowSize=0,
pitchQuadInterp=False):
'''
Extract pitch at regular intervals from the input wav file
Data is output to a text file and then returned in a list in the form
[(timeV1, pitchV1), (timeV2, pitchV2), ...]
sampleStep - the frequency to sample pitch at
silenceThreshold - segments with lower intensity won't be analyzed
for pitch
forceRegenerate - if running this function for the same file, if False
just read in the existing pitch file
undefinedValue - if None remove from the dataset, otherset set to
undefinedValue
pitchQuadInterp - if True, quadratically interpolate pitch
'''
outputPath = os.path.split(outputFN)[0]
utils.makeDir(outputPath)
if pitchQuadInterp is True:
doInterpolation = 1
else:
doInterpolation = 0
assert(os.path.exists(wavFN))
firstTime = not os.path.exists(outputFN)
if firstTime or forceRegenerate is True:
if os.path.exists(outputFN):
os.remove(outputFN)
argList = [wavFN, outputFN, sampleStep,
minPitch, maxPitch, silenceThreshold, -1, -1,
medianFilterWindowSize, doInterpolation]
scriptName = "get_pitch.praat"
scriptFN = join(utils.scriptsPath, scriptName)
utils.runPraatScript(praatEXE, scriptFN, argList)
piList = loadTimeSeriesData(outputFN, undefinedValue=undefinedValue)
return piList
def extractPI(inputFN, outputFN, praatEXE,
minPitch, maxPitch, sampleStep=0.01,
silenceThreshold=0.03, forceRegenerate=True,
tgFN=None, tierName=None, tmpOutputPath=None,
undefinedValue=None, medianFilterWindowSize=0,
pitchQuadInterp=False):
'''
Extracts pitch and intensity from a file wholesale or piecewise
If the parameters for a tg are passed in, this will only extract labeled
segments in a tier of the tg. Otherwise, pitch will be extracted from
the entire file.
male: minPitch=50; maxPitch=350
female: minPitch=75; maxPitch=450
'''
outputPath = os.path.split(outputFN)[0]
windowSize = medianFilterWindowSize
if tgFN is None or tierName is None:
piList = _extractPIFile(inputFN, outputFN,
praatEXE, minPitch, maxPitch,
sampleStep, silenceThreshold, forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp)
else:
if tmpOutputPath is None:
tmpOutputPath = join(outputPath, "piecewise_output")
piList = _extractPIPiecewise(inputFN, outputFN,
praatEXE, minPitch, maxPitch,
tgFN, tierName, tmpOutputPath, sampleStep,
silenceThreshold, forceRegenerate,
undefinedValue=undefinedValue,
medianFilterWindowSize=windowSize,
pitchQuadInterp=pitchQuadInterp)
return piList
def loadTimeSeriesData(fn, undefinedValue=None):
'''
For reading the output of get_pitch_and_intensity or get_intensity
Data should be of the form
[(time1, value1a, value1b, ...),
(time2, value2a, value2b, ...), ]
'''
name = os.path.splitext(os.path.split(fn)[1])[0]
try:
with io.open(fn, "r", encoding='utf-8') as fd:
data = fd.read()
except IOError:
print("No pitch track for: %s" % name)
raise
dataList = data.splitlines()
dataList = [row.split(',') for row in dataList if row != '']
# The new praat script includes a header
if dataList[0][0] == "time":
dataList = dataList[1:]
newDataList = []
for row in dataList:
time = float(row.pop(0))
entry = [time, ]
doSkip = False
for value in row:
if '--' in value:
if undefinedValue is not None:
value = undefinedValue
else:
doSkip = True
break
else:
value = float(value)
entry.append(value)
if doSkip is True:
continue
newDataList.append(entry)
dataList = newDataList
return dataList
def generatePIMeasures(dataList, tgFN, tierName, doPitch,
medianFilterWindowSize=None):
'''
Generates processed values for the labeled intervals in a textgrid
nullLabelList - labels to ignore in the textgrid. Defaults to ["",]
if 'doPitch'=true get pitch measures; if =false get rms intensity
'''
tg = tgio.openTextgrid(tgFN)
piData = tg.tierDict[tierName].getValuesInIntervals(dataList)
outputList = []
for interval, entryList in piData:
label = interval[0]
if doPitch:
tmpValList = [f0Val for _, f0Val, _ in entryList]
f0Measures = getPitchMeasures(tmpValList, tgFN, label,
medianFilterWindowSize, True)
outputList.append(list(f0Measures))
else:
tmpValList = [intensityVal for _, _, intensityVal in entryList]
tmpValList = [intensityVal for intensityVal in tmpValList
if intensityVal != 0.0]
rmsIntensity = 0
if len(tmpValList) != 0:
rmsIntensity = myMath.rms(tmpValList)
outputList.append([rmsIntensity, ])
return outputList
def getPitchMeasures(f0Values, name=None, label=None,
medianFilterWindowSize=None,
filterZeroFlag=False,):
'''
Get various measures (min, max, etc) for the passed in list of pitch values
name is the name of the file. Label is the label of the current interval.
Both of these labels are only used debugging and can be ignored if desired.
medianFilterWindowSize: None -> no median filtering
filterZeroFlag:True -> zero values are removed
'''
if name is None:
name = "unspecified"
if label is None:
label = "unspecified"
if medianFilterWindowSize is not None:
f0Values = myMath.medianFilter(f0Values, medianFilterWindowSize,
useEdgePadding=True)
if filterZeroFlag:
f0Values = [f0Val for f0Val in f0Values if int(f0Val) != 0]
if len(f0Values) == 0:
myStr = u"No pitch data for file: %s, label: %s" % (name, label)
print(myStr.encode('ascii', 'replace'))
counts = 0
meanF0 = 0
maxF0 = 0
minF0 = 0
rangeF0 = 0
variance = 0
std = 0
else:
counts = float(len(f0Values))
meanF0 = sum(f0Values) / counts
maxF0 = max(f0Values)
minF0 = min(f0Values)
rangeF0 = maxF0 - minF0
variance = sum([(val - meanF0) ** 2 for val in f0Values]) / counts
std = math.sqrt(variance)
return (meanF0, maxF0, minF0, rangeF0, variance, std)
def detectPitchErrors(pitchList, maxJumpThreshold=0.70, tgToMark=None):
'''
Detect pitch halving and doubling errors.
If a textgrid is passed in, it adds the markings to the textgrid
'''
assert(maxJumpThreshold >= 0.0 and maxJumpThreshold <= 1.0)
errorList = []
for i in range(1, len(pitchList)):
lastPitch = pitchList[i - 1][1]
currentPitch = pitchList[i][1]
ceilingCutoff = currentPitch / maxJumpThreshold
floorCutoff = currentPitch * maxJumpThreshold
if((lastPitch <= floorCutoff) or (lastPitch >= ceilingCutoff)):
currentTime = pitchList[i][0]
errorList.append([currentTime, currentPitch / lastPitch])
if tgToMark is not None:
tierName = "pitch errors"
assert(tierName not in tgToMark.tierNameList)
pointTier = tgio.PointTier(tierName, errorList,
tgToMark.minTimestamp,
tgToMark.maxTimestamp)
tgToMark.addTier(pointTier)
return errorList, tgToMark
| python |
"""
In a row of seats, 1 represents a person sitting in that seat, and 0 represents that the seat is empty. There is
at least one empty seat, and at least one person sitting. Alex wants to sit in the seat such that the distance
between him and the closest person to him is maximized. Return that maximum distance to closest person.
Example 1:
Input: [1,0,0,0,1,0,1]
Output: 2
Explanation: If Alex sits in the second open seat (seats[2]), then the closest person has distance 2. If Alex
sits in any other open seat, the closest person has distance 1. Thus, the maximum distance to the
closest person is 2.
Example 2:
Input: [1,0,0,0]
Output: 3
Explanation: If Alex sits in the last seat, the closest person is 3 seats away. This is the maximum distance
possible, so the answer is 3.
Constraints:
1. 2 <= seats.length <= 20000
2. seats contains only 0s or 1s, at least one 0, and at least one 1.
"""
class Solution:
def maxDistToClosest(self, seats):
res, prev = 0, None
for i in range(len(seats)):
if seats[i]:
if prev is None:
res = i
else:
res = max(res, (i - prev) // 2)
prev = i
return max(res, i - prev)
| python |
import re
import string
import shortuuid
alphabet = string.ascii_lowercase + string.digits
su = shortuuid.ShortUUID(alphabet=alphabet)
def shortuuid_random():
return str(su.random(length=8))
def to_snake_case(words):
regex_pattern = r'(?<!^)(?=[A-Z])'
if isinstance(words, str):
return re.sub(regex_pattern, '_', words.strip('_')).lower()
elif isinstance(words, list):
return [
re.sub(regex_pattern, '_', word).lower()
for word in words
]
else:
print('Helper to_snake_case function received bad input. Expected String or List.')
def get_fields_from_list_of_dicts(data: list):
top_level_columns = []
for row in data:
top_level_columns.extend(list(row.keys()))
return get_unique_fields_from_list(top_level_columns)
def get_unique_fields_from_list(fields: list):
return sorted(list(set(fields)))
def get_column_order_by_least_null(df) -> list:
null_count = df.isnull().sum(axis=0).sort_values()
return null_count.index.tolist()
| python |
from collections import MutableSet
from . import helpers
FORWARD = 1 # used to look at Node children
BACKWARD = -1 # used to look at Node parents
class NodeSet(MutableSet):
"""
A mutable set which automatically populates parent/child node sets.
For example, if this NodeSet contains `children` nodes and a new node was added, that
node's `parent` NodeSet will automatically be populated with the owner of this NodeSet.
"""
__slots__ = ('owner', 'items', 'direction')
def __init__(self, owner, items, direction):
"""
:type owner: Node
:type items: set[Node]
:type direction: int
"""
self.owner = owner
self.items = set()
self.direction = direction
self.update(items)
def __iter__(self):
return iter(self.items)
def __len__(self):
return len(self.items)
def add(self, value):
"""
Adds the node to this NodeSet and populates the node's NodeSet with the owner
of this NodeSet.
:type value: Node
"""
if value not in self:
value.direction(self.direction * -1).items.add(self.owner)
return self.items.add(value)
def discard(self, value):
"""
Removes the node from this NodeSet and removes this NodeSet's owner from the
node's NodeSets.
:type value: Node
"""
if value in self:
value.direction(self.direction * -1).items.discard(self.owner)
return self.items.discard(value)
def update(self, nodes):
for node in nodes:
self.add(node)
def discard_many(self, nodes):
for node in nodes:
self.discard(node)
def one(self, raise_on_empty=False):
"""
Returns an item from this NodeSet if there is only one item.
:type raise_on_empty: bool
:rtype: Node | None
:raises: ValueError
"""
if not self.items and raise_on_empty:
raise ValueError('Called NodeSet.one on empty set')
elif len(self.items) > 1:
raise ValueError('Called NodeSet.one on set with multiple values')
return next(iter(self.items), None)
def __contains__(self, x):
return self.items.__contains__(x)
def __repr__(self):
return 'NodeSet{}'.format(tuple(self.items))
class Node(object):
__slots__ = ('parents', 'children', 'data')
def __init__(self, data=None, parents=None, children=None):
self.parents = NodeSet(self, [] if parents is None else parents, BACKWARD)
self.children = NodeSet(self, [] if children is None else children, FORWARD)
self.data = data
def __repr__(self):
return '<{} {}>'.format(type(self).__name__, self.data)
@property
def connections(self):
"""
Returns all parents and children associated with this Node.
:rtype: set[Node]
"""
return set(list(self.parents) + list(self.children))
def direction(self, direction):
"""
Returns this node's parents if direction is BACKWARD, else, returns children nodes.
:int direction: int
:rtype: NodeSet
"""
return self.parents if direction == BACKWARD else self.children
def depth_first_traversal(self, callback, direction, obj=None):
"""
Executes a depth-first traversal from this node in a given direction. Raising
a StopIteration will terminate the traversal.
:type callback: (Node, object) -> ()
:type direction: int
:type obj: Any
:return: Returns `obj` (or None if no `obj` is supplied).
:rtype: Any
"""
return helpers.depth_first_traversal_for_node(node=self, callback=callback, direction=direction, obj=obj)
def breadth_first_traversal(self, callback, direction, obj=None):
"""
Executes a breadth-first traversal from this node in a given direction. Raising
a StopIteration will terminate the traversal.
:type callback: (Node, object) -> ()
:type direction: int
:type obj: Any
:return: Returns `obj` (or None if no `obj` is supplied).
:rtype: Any
"""
return helpers.breadth_first_traversal_for_node(node=self, callback=callback, direction=direction, obj=obj)
def walk_links(self, callback, direction, obj=None):
"""
Walks the each link for this node. Raising a StopIteration will terminate the
traversal.
:type callback: (Node, Node, object) -> ()
:type direction: int
:type obj: Any
:return: Returns `obj` (or None if no `obj` is supplied).
:rtype: Any
"""
return helpers.walk_links_for_node(node=self, callback=callback, direction=direction, obj=obj)
def root(self):
"""
Returns the root node of this node if it only has one root node.
:rtype: Node
:raises: ValueError
"""
roots = self.roots()
if len(roots) > 1:
raise ValueError('Node.root is not applicable when the node has multiple roots')
return next(iter(roots))
def gather_nodes(self, direction=None):
"""
Returns all nodes in the tree. Nodes can be restricted by specifying a direction.
:type direction: int
:rtype: set[Node]
"""
return helpers.gather_nodes(node=self, direction=direction)
def flatten(self, direction=None):
"""
Returns a list of node lists representing a path on the tree.
:type direction: int | None
:rtype: list[list[treestruct.Node]]
"""
return helpers.flatten_from_node(node=self, direction=direction)
def roots(self):
"""
Returns all roots (any parent nodes with no parents) of this node.
:rtype: set[Node]
"""
return helpers.roots_for_node(node=self)
def leaves(self):
"""
Returns all leaves (any child nodes with no children) of this node.
:rtype: set[Node]
"""
return helpers.leaves_for_node(node=self)
def delete(self, direction=None):
"""
Removes this node from the NodeSets of connected nodes. If direction is
given, only remove the node from the connected nodes in the given direction.
:type direction: int
:rtype: Node
"""
return helpers.delete_node_relationships(node=self, direction=direction)
def clone(self):
"""
Clones the node and all its child nodes and forms a new root.
:rtype: Node
"""
return helpers.clone_subtree(node=self, cls=type(self))
def find_all(self, condition, direction=None):
"""
Returns all nodes which match the given condition.
:type condition: (Node) -> bool
:type direction: int
:rtype: set[Node]
"""
return helpers.find_nodes(node=self, condition=condition, direction=direction)
def find(self, condition, direction=None, raise_on_empty=False):
"""
Returns a single node which matches the given condition.
:type condition: (Node) -> bool
:type direction: int
:type raise_on_empty: bool
:rtype: Node | None
:raises: ValueError
"""
return helpers.find_node(node=self, condition=condition, direction=direction, raise_on_empty=raise_on_empty)
def to_dict(self, data_converter=None):
"""
Converts this node's complete structure into a dictionary.
:type data_converter: (Any) -> (Any) | None
:rtype: list[dict]
"""
return helpers.to_dict_from_node(node=self, data_converter=data_converter)
@classmethod
def from_dict(cls, tree_dict, data_converter=None):
"""
Converts a dict into a tree of Nodes, with the return value being the
root node.
:param tree_dict: dict
:type data_converter: (Any) -> (Any) | None
:rtype: Node
"""
return helpers.from_dict(tree_dict=tree_dict, data_converter=data_converter, cls=cls)
@classmethod
def from_nodes(cls, nodes):
"""
Creates a flat tree structure from a list of nodes. It is assumed that the first Node
in the list is the root and each subsequent Node is a child. Any existing parents or
children will be disregarded.
:type nodes: collections.Sequence[Node]
:rtype: Node
"""
return helpers.node_from_node_sequence(nodes=nodes, cls=cls)
| python |
# -*- coding: utf-8 -*-
"""
__title__ = '画出类、大组、小组的树状曲线'
__author__ = xiongliff
__mtime__ = '2019/7/16'
"""
import json
import os
from innojoy_calculate.common import const
from innojoy_calculate.utils import read_xls_data
class Node:
def __init__(self, id, value,is_leaf, parent_id):
self.id =id
self.value = value
self.is_leaf = is_leaf
self.parent_id = parent_id
def get_value(self):
return self.value
def get_parent_id(self):
return self.parent_id
def get_id(self):
return self.id
def get_is_leaf(self):
return self.is_leaf
def add_value(self):
self.value += 1
def generate_tree_node_data(base_data_list):
"""
读取base_data_list,并按部、大类、小类、大组、小组生成对应的树状结构
保存在dict中
node中有id,即分类号,value即出现的次数,parent_id即上层的所属关系
:param base_data_list:
:return:
"""
#存放{id:node}
id_node_dict = dict()
for base_data in base_data_list:
# 读取分类号所在列
class_number_value = base_data[const.CLASS_NBR]
if isinstance(class_number_value, basestring) and class_number_value.strip() != "":
class_number_string_list = class_number_value.split(";")
if class_number_string_list:
for class_nbr_string in class_number_string_list:
#部分类号取第一位
part_class_nbr = class_nbr_string[0]
#如果没有part_class_nbr则生成根节点,否则计数加1
if part_class_nbr not in id_node_dict.keys():
part_class_nbr_node = Node(part_class_nbr, 1, False, None)
id_node_dict[part_class_nbr] = part_class_nbr_node
else:
id_node_dict[part_class_nbr].add_value()
#大类分类号取第前3位
main_category_class_nbr = class_nbr_string[0:2]
# 如果没有main_category_class_nbr则生成节点,否则计数加1
if main_category_class_nbr not in id_node_dict.keys():
main_category_class_nbr_node = Node(main_category_class_nbr, 1, False, part_class_nbr)
id_node_dict[main_category_class_nbr] = main_category_class_nbr_node
else:
id_node_dict[main_category_class_nbr].add_value()
#小类分类号取前4位
sub_category_class_nbr = class_nbr_string[0:3]
# 如果没有sub_category_class_nbr则生成节点,否则计数加1
if sub_category_class_nbr not in id_node_dict.keys():
sub_category_class_nbr_node = Node(sub_category_class_nbr, 1, False, main_category_class_nbr)
id_node_dict[sub_category_class_nbr] = sub_category_class_nbr_node
else:
id_node_dict[sub_category_class_nbr].add_value()
#大组分类号取/号前的
main_group_class_nbr = class_nbr_string.split("/")[0]
# 如果没有main_group_class_nbr则生成节点,否则计数加1
if main_group_class_nbr not in id_node_dict.keys():
main_group_class_nbr_node = Node(main_group_class_nbr, 1, False, sub_category_class_nbr)
id_node_dict[main_group_class_nbr] = main_group_class_nbr_node
else:
id_node_dict[main_group_class_nbr].add_value()
#小组分类号取(号前的
sub_group_class_nbr = class_nbr_string.split("(")[0]
# 如果没有sub_group_class_nbr则生成节点,否则计数加1
if sub_group_class_nbr not in id_node_dict.keys():
sub_group_class_nbr_node = Node(sub_group_class_nbr, 1, True, main_group_class_nbr)
id_node_dict[sub_group_class_nbr] = sub_group_class_nbr_node
else:
id_node_dict[sub_group_class_nbr].add_value()
return id_node_dict
def generate_tree_display(id_node_dict):
"""
根据id_node_dict生成指定展示所需的数据结构:
{name:123,
value:22
childreen:[]
}
:param id_node_dict:
:return:
"""
node_list = id_node_dict.values()
root_node_list = list()
#先找到根节点,即parent_id为None的节点
for node in node_list:
if not node.get_parent_id():
root_node = dict()
root_node['name'] = node.get_id()
root_node['value'] = node.get_value()
root_node['children'] = list()
root_node_list.append(root_node)
for root_node in root_node_list:
add_children_node(root_node, node_list)
#如果len(root_node_list)>1,则加一个统一的根节点,否则只有一个根节点
result_root_node = dict()
if len(root_node_list) > 1:
result_root_node['name'] = ''
result_root_node['value'] = ''
result_root_node['children'] = root_node_list
else:
result_root_node = root_node_list[0]
return result_root_node
def add_children_node(root_node, node_list):
for node in node_list:
if node.get_parent_id() == root_node['name']:
son_node = dict()
son_node['name'] = node.get_id()
son_node['value'] = node.get_value()
if not node.get_is_leaf():
son_node['children'] = list()
add_children_node(son_node, node_list)
root_node['children'].append(son_node)
def save_tree_data(base_base_data_list):
id_node_dict = generate_tree_node_data(base_base_data_list)
tree_data = generate_tree_display(id_node_dict)
try:
os.remove(const.JSON_PATH)
except WindowsError:
pass
file = open(const.JSON_PATH,'w')
file.write('display_tree = ')
json.dump(tree_data, file, ensure_ascii=False)
file.close()
if __name__ == '__main__':
# root_node = dict()
# root_node['name'] = 'B'
# root_node['value'] = 12
# root_node['children']= list()
# node1 = Node('B1',2,False,'B')
# node2 = Node('B2',3,False,'B')
# node3 = Node('B1C',5,True,'B1')
# node_list =[node1,node2,node3]
# add_children_node(root_node,node_list)
# print root_node
"""
该函数的使用方法:执行完save_tree_data后,对应树状结构的数据会以js文件的方式保存在const.JSON_PATH文件中,
要查看生成的树状图,请打开innojoy_calculate目录下的test.html(右键打开方式-》以浏览器打开)
"""
base_data_list = read_xls_data.read_data()
save_tree_data(base_data_list) | python |
# -*- coding: utf-8 -*-
"""Generate the Resilient customizations required for fn_task_utils"""
from __future__ import print_function
from resilient_circuits.util import *
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_task_utils package"""
reload_params = {"package": u"fn_task_utils",
"incident_fields": [],
"action_fields": [u"task_utils_task_name"],
"function_params": [u"incident_id", u"task_id", u"task_name", u"task_utils_note_body", u"task_utils_note_type", u"task_utils_payload"],
"datatables": [],
"message_destinations": [u"fn_task_utils"],
"functions": [u"task_utils_add_note", u"task_utils_close_task", u"task_utils_create", u"task_utils_update_task"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"task_utils_add_note_to_task", u"task_utils_close_task", u"task_utils_create_custom_task", u"task_utils_mark_task_optional"],
"actions": [u"Example: Task Utils - Add Note to Task", u"Example: Task Utils - Close Task", u"Example: Task Utils - Create Custom Task", u"Example: Task Utils - Make this Task Optional"],
"incident_artifact_types": []
}
return reload_params
def customization_data(client=None):
"""Produce any customization definitions (types, fields, message destinations, etc)
that should be installed by `resilient-circuits customize`
"""
# This import data contains:
# Action fields:
# task_utils_task_name
# Function inputs:
# incident_id
# task_id
# task_name
# task_utils_note_body
# task_utils_note_type
# task_utils_payload
# Message Destinations:
# fn_task_utils
# Functions:
# task_utils_add_note
# task_utils_close_task
# task_utils_create
# task_utils_update_task
# Workflows:
# task_utils_add_note_to_task
# task_utils_close_task
# task_utils_create_custom_task
# task_utils_mark_task_optional
# Rules:
# Example: Task Utils - Add Note to Task
# Example: Task Utils - Close Task
# Example: Task Utils - Create Custom Task
# Example: Task Utils - Make this Task Optional
yield ImportDefinition(u"""
eyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgIm1pbm9yIjogMCwgImJ1aWxkX251bWJl
ciI6IDQyMzUsICJ2ZXJzaW9uIjogIjMxLjAuNDIzNSJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u
IjogMiwgImlkIjogMTIsICJleHBvcnRfZGF0ZSI6IDE1NjAyNDE3NTY1NDUsICJmaWVsZHMiOiBb
eyJpZCI6IDIyMywgIm5hbWUiOiAiaW5jX3RyYWluaW5nIiwgInRleHQiOiAiU2ltdWxhdGlvbiIs
ICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDAsICJ0b29sdGlwIjogIldoZXRoZXIgdGhlIGlu
Y2lkZW50IGlzIGEgc2ltdWxhdGlvbiBvciBhIHJlZ3VsYXIgaW5jaWRlbnQuICBUaGlzIGZpZWxk
IGlzIHJlYWQtb25seS4iLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImhpZGVfbm90aWZpY2F0
aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6
IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6
ICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EiLCAib3BlcmF0aW9ucyI6IFtd
LCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IHRydWUs
ICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJp
bmNpZGVudC9pbmNfdHJhaW5pbmciLCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFs
c2V9LCB7ImlkIjogMjk3LCAibmFtZSI6ICJ0YXNrX3V0aWxzX3Rhc2tfbmFtZSIsICJ0ZXh0Ijog
IlRhc2sgVXRpbHMgVGFzayBOYW1lIiwgInByZWZpeCI6ICJwcm9wZXJ0aWVzIiwgInR5cGVfaWQi
OiA2LCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4
dCIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0
X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJu
YWwiOiBmYWxzZSwgInV1aWQiOiAiZWI5YjBmYWYtZGNmMi00YTI1LThmOGUtYjY0NTRiZGM1YWIw
IiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10s
ICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFs
c2UsICJleHBvcnRfa2V5IjogImFjdGlvbmludm9jYXRpb24vdGFza191dGlsc190YXNrX25hbWUi
LCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMjk4LCAibmFt
ZSI6ICJ0YXNrX3V0aWxzX3BheWxvYWQiLCAidGV4dCI6ICJ0YXNrX3V0aWxzX3BheWxvYWQiLCAi
cHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwgInRvb2x0aXAiOiAiQSBKU09OIE9iamVjdCB3
aGljaCBtYXkgY29udGFpbiB0aGUgUGhhc2UsIEluc3RydWN0aW9uIFNldCBvciBBc3NpZ25lZCBV
c2VyIHZhbHVlcyBmb3IgYSBuZXcgdGFzayIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlw
ZSI6ICJ0ZXh0YXJlYSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFs
c2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZh
bHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiMzYxZjQyMGYtMzE4NC00NDI2LTlmZGMt
NzY0OWQ1OGVhYzkyIiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAi
dmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmlj
aF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogIl9fZnVuY3Rpb24vdGFza191dGlsc19wYXls
b2FkIiwgInRlbXBsYXRlcyI6IFt7ImlkIjogMywgIm5hbWUiOiAiQ3JlYXRlIE5ldyBPcHRpb25h
bCBUYXNrIiwgInRlbXBsYXRlIjogeyJmb3JtYXQiOiAidGV4dCIsICJjb250ZW50IjogIntcblwi
bmFtZVwiOiBcIk9wdGlvbmFsIFRhc2sgQ3JlYXRlZCB1c2luZyBUYXNrIFV0aWxzIEludGVncmF0
aW9uXCJcblwicmVxdWlyZWRcIiA6IGZhbHNlXG59XG4ifSwgInV1aWQiOiAiZWI4ZTdiNzUtMTFm
Yy00Y2M1LWIwZTItMWJmODllNjA2NmRlIn0sIHsiaWQiOiAyLCAibmFtZSI6ICJDcmVhdGUgTmV3
IFJlcXVpcmVkIFRhc2sgd2l0aCBJbnN0cnVjdGlvbnMgVGV4dCIsICJ0ZW1wbGF0ZSI6IHsiZm9y
bWF0IjogInRleHQiLCAiY29udGVudCI6ICJ7XG5cIm5hbWVcIjogXCJSZXF1aXJlZCBUYXNrIENy
ZWF0ZWQgdXNpbmcgVGFzayBVdGlscyBJbnRlZ3JhdGlvblwiLFxuXCJpbnN0cl90ZXh0XCI6IFwi
UGxlYXNlIGNvbXBsZXRlIHRoaXMgcmVxdWlyZWQgdGFzay5cIixcblwicmVxdWlyZWRcIiA6IHRy
dWVcbn1cbiJ9LCAidXVpZCI6ICIxNzZhMmIwNS03NGYwLTRlNDctOTUxZC05ZTA5NDg3MWIwMmIi
fSwgeyJpZCI6IDEsICJuYW1lIjogIlVwZGF0ZSBFeGlzdGluZyBUYXNrIE1hcmtpbmcgaXQgYXMg
b3B0aW9uYWwiLCAidGVtcGxhdGUiOiB7ImZvcm1hdCI6ICJ0ZXh0IiwgImNvbnRlbnQiOiAie1xu
XCJyZXF1aXJlZFwiOiBmYWxzZVxufSJ9LCAidXVpZCI6ICJjZDc2ZTZlYy1hYTZkLTRmYTMtOTM0
ZS04ZmFhMjY5MGM0NjMifV0sICJkZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMzAxLCAibmFt
ZSI6ICJ0YXNrX2lkIiwgInRleHQiOiAidGFza19pZCIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9p
ZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAi
bnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRl
ZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJp
bnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICJiYTMxODI2MS1lZDZhLTRhMzgtYTE4Ny05ZTBiNjhk
MTYwNGYiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMi
OiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQi
OiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi90YXNrX2lkIiwgInRlbXBsYXRlcyI6
IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDI5OSwgIm5hbWUiOiAidGFza19uYW1l
IiwgInRleHQiOiAidGFza19uYW1lIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTEsICJ0
b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImhp
ZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2Vu
X2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZh
bHNlLCAidXVpZCI6ICJlOWFmMDc3YS0zYjFjLTRmZjctYmVmYi04ZGZlZGQzYzdkYmMiLCAib3Bl
cmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRf
b25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4
cG9ydF9rZXkiOiAiX19mdW5jdGlvbi90YXNrX25hbWUiLCAidGVtcGxhdGVzIjogW10sICJkZXBy
ZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMzAyLCAibmFtZSI6ICJpbmNpZGVudF9pZCIsICJ0ZXh0
IjogImluY2lkZW50X2lkIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTEsICJ0b29sdGlw
IjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAicmVxdWly
ZWQiOiAiYWx3YXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxz
ZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFs
c2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI4MTFlOTlkNy1kMTk0LTRjZTgtODZjYy1h
ZmY1ZTAxYWI4NWMiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2
YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo
X3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9pbmNpZGVudF9pZCIsICJ0
ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sIHsiaWQiOiAzMDAsICJuYW1lIjog
InRhc2tfdXRpbHNfbm90ZV90eXBlIiwgInRleHQiOiAidGFza191dGlsc19ub3RlX3R5cGUiLCAi
cHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwgInRvb2x0aXAiOiAiQSBmaWVsZCB1c2VkIHRv
IHNwZWNpZnkgd2hlcmUgdGhlIGZpZWxkIHRhc2tfdXRpbHNfbm90ZV9ib2R5IGlzIHBsYWludGV4
dCBvciBodG1sLiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAi
cmVxdWlyZWQiOiAiYWx3YXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4i
OiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9u
IjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI1ZTIzYjE0NS0yYmVjLTRkODIt
YWQ1MC1kYzExNWYzNTUzNjAiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjog
e30sICJ2YWx1ZXMiOiBbeyJ2YWx1ZSI6IDE4MDAsICJsYWJlbCI6ICJ0ZXh0IiwgImVuYWJsZWQi
OiB0cnVlLCAicHJvcGVydGllcyI6IG51bGwsICJ1dWlkIjogIjZhMmU0ZTEzLTdmMGMtNGRhYi1h
NDYwLTg2YjA0NzY5Y2VlYiIsICJoaWRkZW4iOiBmYWxzZSwgImRlZmF1bHQiOiB0cnVlfSwgeyJ2
YWx1ZSI6IDE4MDEsICJsYWJlbCI6ICJodG1sIiwgImVuYWJsZWQiOiB0cnVlLCAicHJvcGVydGll
cyI6IG51bGwsICJ1dWlkIjogIjU2MWZkMGY5LWM1NTctNGQ2My1hOGZjLTIwNTM5ZWE4Yzg2YSIs
ICJoaWRkZW4iOiBmYWxzZSwgImRlZmF1bHQiOiBmYWxzZX1dLCAicmVhZF9vbmx5IjogZmFsc2Us
ICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJf
X2Z1bmN0aW9uL3Rhc2tfdXRpbHNfbm90ZV90eXBlIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVj
YXRlZCI6IGZhbHNlfSwgeyJpZCI6IDMwMywgIm5hbWUiOiAidGFza191dGlsc19ub3RlX2JvZHki
LCAidGV4dCI6ICJ0YXNrX3V0aWxzX25vdGVfYm9keSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9p
ZCI6IDExLCAidG9vbHRpcCI6ICJBIFRleHQgZmllbGQgdXNlZCB0byBzcGVjaWZ5IHRoZSBub3Rl
IHRoYXQgd2lsbCBiZSBhZGRlZCB0byBhIGdpdmVuIFRhc2suIEFjY2VwdHMgdGV4dCBvciBodG1s
IGFuZCBpcyBwYXJzZWQgYmFzZWQgb24gdGhlIHJlc3VsdCBvZiB0YXNrX3V0aWxzX25vdGVfdHlw
ZS4gRGVmYXVsdCBpcyB0ZXh0IiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRl
eHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVs
dF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVy
bmFsIjogZmFsc2UsICJ1dWlkIjogIjQ0YzRjNjAwLWYyMmQtNGJmNS1iYmY0LWVmYWQzNThmMDJm
NSIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtd
LCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZh
bHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL3Rhc2tfdXRpbHNfbm90ZV9ib2R5IiwgInRl
bXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfV0sICJpbmNpZGVudF90eXBlcyI6IFt7
InVwZGF0ZV9kYXRlIjogMTU2MDI0MTc1NzcyNCwgImNyZWF0ZV9kYXRlIjogMTU2MDI0MTc1Nzcy
NCwgInV1aWQiOiAiYmZlZWMyZDQtMzc3MC0xMWU4LWFkMzktNGEwMDA0MDQ0YWEwIiwgImRlc2Ny
aXB0aW9uIjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJleHBvcnRfa2V5
IjogIkN1c3RvbWl6YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJuYW1lIjogIkN1c3RvbWl6
YXRpb24gUGFja2FnZXMgKGludGVybmFsKSIsICJlbmFibGVkIjogZmFsc2UsICJzeXN0ZW0iOiBm
YWxzZSwgInBhcmVudF9pZCI6IG51bGwsICJoaWRkZW4iOiBmYWxzZSwgImlkIjogMH1dLCAicGhh
c2VzIjogW10sICJhdXRvbWF0aWNfdGFza3MiOiBbXSwgIm92ZXJyaWRlcyI6IFtdLCAibWVzc2Fn
ZV9kZXN0aW5hdGlvbnMiOiBbeyJuYW1lIjogImZuX3Rhc2tfdXRpbHMiLCAicHJvZ3JhbW1hdGlj
X25hbWUiOiAiZm5fdGFza191dGlscyIsICJkZXN0aW5hdGlvbl90eXBlIjogMCwgImV4cGVjdF9h
Y2siOiB0cnVlLCAidXNlcnMiOiBbImFsZnJlZEB3YXluZWNvcnAuY29tIl0sICJ1dWlkIjogImM2
OTdkOTc3LWMxNzMtNDJlMS1iNmQ4LTg1ODBiNTkxZjBjYiIsICJleHBvcnRfa2V5IjogImZuX3Rh
c2tfdXRpbHMifV0sICJhY3Rpb25zIjogW3siaWQiOiAyNywgIm5hbWUiOiAiRXhhbXBsZTogVGFz
ayBVdGlscyAtIEFkZCBOb3RlIHRvIFRhc2siLCAidHlwZSI6IDEsICJvYmplY3RfdHlwZSI6ICJp
bmNpZGVudCIsICJjb25kaXRpb25zIjogW10sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9k
ZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsidGFza191dGlsc19hZGRfbm90ZV90b190
YXNrIl0sICJ2aWV3X2l0ZW1zIjogW10sICJ0aW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQi
OiAiZTUyMzE3NDEtMTk2Yy00MzAzLTg3MmYtNDI1NDc4MDVlZDU1IiwgImV4cG9ydF9rZXkiOiAi
RXhhbXBsZTogVGFzayBVdGlscyAtIEFkZCBOb3RlIHRvIFRhc2siLCAibG9naWNfdHlwZSI6ICJh
bGwifSwgeyJpZCI6IDI4LCAibmFtZSI6ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQ2xvc2UgVGFz
ayIsICJ0eXBlIjogMSwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImNvbmRpdGlvbnMiOiBb
XSwgImF1dG9tYXRpb25zIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAid29ya2Zs
b3dzIjogWyJ0YXNrX3V0aWxzX2Nsb3NlX3Rhc2siXSwgInZpZXdfaXRlbXMiOiBbeyJzdGVwX2xh
YmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZp
ZWxkX3R5cGUiOiAiYWN0aW9uaW52b2NhdGlvbiIsICJjb250ZW50IjogImViOWIwZmFmLWRjZjIt
NGEyNS04ZjhlLWI2NDU0YmRjNWFiMCIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9XSwgInRp
bWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICIzNWVhNTcyYi1iN2M3LTQ2NTMtYjRmMC1l
ZmI4Zjg2MWI3YTMiLCAiZXhwb3J0X2tleSI6ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQ2xvc2Ug
VGFzayIsICJsb2dpY190eXBlIjogImFsbCJ9LCB7ImlkIjogMzAsICJuYW1lIjogIkV4YW1wbGU6
IFRhc2sgVXRpbHMgLSBDcmVhdGUgQ3VzdG9tIFRhc2siLCAidHlwZSI6IDEsICJvYmplY3RfdHlw
ZSI6ICJpbmNpZGVudCIsICJjb25kaXRpb25zIjogW10sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVz
c2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsidGFza191dGlsc19jcmVhdGVf
Y3VzdG9tX3Rhc2siXSwgInZpZXdfaXRlbXMiOiBbeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3df
aWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiYWN0aW9u
aW52b2NhdGlvbiIsICJjb250ZW50IjogImViOWIwZmFmLWRjZjItNGEyNS04ZjhlLWI2NDU0YmRj
NWFiMCIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9XSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2
NDAwLCAidXVpZCI6ICI4YWYxYmMyZi01Y2JhLTQ1OWYtYTE1ZC1lNjkzNWU1NWEzN2YiLCAiZXhw
b3J0X2tleSI6ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQ3JlYXRlIEN1c3RvbSBUYXNrIiwgImxv
Z2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiAzMSwgIm5hbWUiOiAiRXhhbXBsZTogVGFzayBVdGls
cyAtIE1ha2UgdGhpcyBUYXNrIE9wdGlvbmFsIiwgInR5cGUiOiAxLCAib2JqZWN0X3R5cGUiOiAi
dGFzayIsICJjb25kaXRpb25zIjogW10sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVzc2FnZV9kZXN0
aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsidGFza191dGlsc19tYXJrX3Rhc2tfb3B0aW9u
YWwiXSwgInZpZXdfaXRlbXMiOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6
ICJiYjFkNzU4NS04ZWE0LTQwYmEtYWI4Mi1hZDEwNzNjNjNjZGUiLCAiZXhwb3J0X2tleSI6ICJF
eGFtcGxlOiBUYXNrIFV0aWxzIC0gTWFrZSB0aGlzIFRhc2sgT3B0aW9uYWwiLCAibG9naWNfdHlw
ZSI6ICJhbGwifV0sICJsYXlvdXRzIjogW10sICJub3RpZmljYXRpb25zIjogbnVsbCwgInRpbWVm
cmFtZXMiOiBudWxsLCAibG9jYWxlIjogbnVsbCwgImluZHVzdHJpZXMiOiBudWxsLCAicmVndWxh
dG9ycyI6IG51bGwsICJnZW9zIjogbnVsbCwgInRhc2tfb3JkZXIiOiBbXSwgImFjdGlvbl9vcmRl
ciI6IFtdLCAidHlwZXMiOiBbXSwgInNjcmlwdHMiOiBbXSwgImluY2lkZW50X2FydGlmYWN0X3R5
cGVzIjogW10sICJ3b3JrZmxvd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDUsICJuYW1lIjogIkV4YW1w
bGU6IFRhc2sgVXRpbHMgLSBNYXJrIFRhc2sgYSBPcHRpb25hbCIsICJwcm9ncmFtbWF0aWNfbmFt
ZSI6ICJ0YXNrX3V0aWxzX21hcmtfdGFza19vcHRpb25hbCIsICJvYmplY3RfdHlwZSI6ICJ0YXNr
IiwgImRlc2NyaXB0aW9uIjogIkFuIGV4YW1wbGUgd29ya2Zsb3cgd2hpY2ggaXMgaW52b2tlZCBv
biBhIFRhc2sgb2JqZWN0IHNldHRpbmcgaXRzIFJlcXVpcmVkIGF0dHJpYnV0ZSB0byBmYWxzZS4i
LCAiY3JlYXRvcl9pZCI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIsICJsYXN0X21vZGlmaWVkX2J5
IjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZpZWRfdGltZSI6IDE1NTgwOTIw
NDE3MzEsICJleHBvcnRfa2V5IjogInRhc2tfdXRpbHNfbWFya190YXNrX29wdGlvbmFsIiwgInV1
aWQiOiAiOTJhYWMzODAtY2IzMy00NTRkLWExMmYtOTVlZmUxNTU0ZWY3IiwgImNvbnRlbnQiOiB7
IndvcmtmbG93X2lkIjogInRhc2tfdXRpbHNfbWFya190YXNrX29wdGlvbmFsIiwgInhtbCI6ICI8
P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1s
bnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6
YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5z
Om9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpv
bWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVz
aWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRw
Oi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMu
b3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3
dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJ0YXNrX3V0aWxzX21hcmtfdGFza19v
cHRpb25hbFwiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwiRXhhbXBsZTogVGFzayBVdGls
cyAtIE1hcmsgVGFzayBhIE9wdGlvbmFsXCI+PGRvY3VtZW50YXRpb24+QW4gZXhhbXBsZSB3b3Jr
ZmxvdyB3aGljaCBpcyBpbnZva2VkIG9uIGEgVGFzayBvYmplY3Qgc2V0dGluZyBpdHMgUmVxdWly
ZWQgYXR0cmlidXRlIHRvIGZhbHNlLjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0
YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMTJlbDZpeDwvb3V0Z29p
bmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzFjYmk1azJcIiBu
YW1lPVwiVGFzayBVdGlsczogVXBkYXRlIFRhc2tcIiByZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9u
XCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVpZD1cImJkOGRmNTNh
LTY0NDMtNDBmMS05MGY5LWViNTk5Mzk5NjkyNVwiPntcImlucHV0c1wiOntcIjM2MWY0MjBmLTMx
ODQtNDQyNi05ZmRjLTc2NDlkNThlYWM5MlwiOntcImlucHV0X3R5cGVcIjpcInN0YXRpY1wiLFwi
c3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFsdWVcIjpbXSxcInRleHRfY29udGVudF92
YWx1ZVwiOntcImZvcm1hdFwiOlwidGV4dFwiLFwiY29udGVudFwiOlwie1xcblxcXCJyZXF1aXJl
ZFxcXCI6IGZhbHNlXFxufVwifX19fSxcInByZV9wcm9jZXNzaW5nX3NjcmlwdFwiOlwiaW5wdXRz
LnRhc2tfaWQgPSB0YXNrLmlkXFxuaW5wdXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5c
In08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1
ZW5jZUZsb3dfMTJlbDZpeDwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xMGR6aXBi
PC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3df
MTJlbDZpeFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNl
cnZpY2VUYXNrXzFjYmk1azJcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMWlkcGYxd1wiPjxp
bmNvbWluZz5TZXF1ZW5jZUZsb3dfMTBkemlwYjwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVu
Y2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzEwZHppcGJcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFz
a18xY2JpNWsyXCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMWlkcGYxd1wiLz48dGV4dEFubm90YXRp
b24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxv
dyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRp
b25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1c
IlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0g
aWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRl
ZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9
XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdk
YzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+
PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwi
IHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hh
cGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0
XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9
XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hh
cGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBp
ZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhz
aTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUz
XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+
PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18xY2JpNWsyXCIgaWQ9
XCJTZXJ2aWNlVGFza18xY2JpNWsyX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdp
ZHRoPVwiMTAwXCIgeD1cIjQ2NFwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1u
ZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMTJlbDZpeFwiIGlkPVwiU2Vx
dWVuY2VGbG93XzEyZWw2aXhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBl
PVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzMxXCIgeHNp
OnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIzMzFc
IiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1c
IjQ2NFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxh
YmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjBcIiB4PVwiMzQ2XCIgeT1c
IjE5OS41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQ
TU5TaGFwZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzFpZHBmMXdcIiBpZD1cIkVuZEV2ZW50XzFp
ZHBmMXdfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI3
ODVcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwi
MTNcIiB3aWR0aD1cIjBcIiB4PVwiODAzXCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+
PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5j
ZUZsb3dfMTBkemlwYlwiIGlkPVwiU2VxdWVuY2VGbG93XzEwZHppcGJfZGlcIj48b21nZGk6d2F5
cG9pbnQgeD1cIjU2NFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdk
aTp3YXlwb2ludCB4PVwiNzg1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+
PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwi
IHg9XCI2NzQuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5F
ZGdlPjwvYnBtbmRpOkJQTU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25z
PiIsICJ2ZXJzaW9uIjogMX0sICJhY3Rpb25zIjogW119LCB7IndvcmtmbG93X2lkIjogMywgIm5h
bWUiOiAiRXhhbXBsZTogVGFzayBVdGlscyAtIEFkZCBOb3RlIHRvIFRhc2siLCAicHJvZ3JhbW1h
dGljX25hbWUiOiAidGFza191dGlsc19hZGRfbm90ZV90b190YXNrIiwgIm9iamVjdF90eXBlIjog
ImluY2lkZW50IiwgImRlc2NyaXB0aW9uIjogIkFuIGV4YW1wbGUgd29ya2Zsb3cgd2hpY2ggdGFr
ZXMgYSBUYXNrJ3MgSUQgYW5kIGEgYWRkcyBlaXRoZXIgYSBuZXcgdGV4dCBvciByaWNodGV4dCBu
b3RlIHRvIHRoYXQgdGFzay4iLCAiY3JlYXRvcl9pZCI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIs
ICJsYXN0X21vZGlmaWVkX2J5IjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZp
ZWRfdGltZSI6IDE1NjAyNDE1OTcxOTcsICJleHBvcnRfa2V5IjogInRhc2tfdXRpbHNfYWRkX25v
dGVfdG9fdGFzayIsICJ1dWlkIjogIjBlNGNjOWI0LTdiMTktNDIwYy04MDUxLWIzYzhhYWZiNzhh
YiIsICJjb250ZW50IjogeyJ3b3JrZmxvd19pZCI6ICJ0YXNrX3V0aWxzX2FkZF9ub3RlX3RvX3Rh
c2siLCAieG1sIjogIjw/eG1sIHZlcnNpb249XCIxLjBcIiBlbmNvZGluZz1cIlVURi04XCI/Pjxk
ZWZpbml0aW9ucyB4bWxucz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQv
TU9ERUxcIiB4bWxuczpicG1uZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAw
NTI0L0RJXCIgeG1sbnM6b21nZGM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUy
NC9EQ1wiIHhtbG5zOm9tZ2RpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQv
RElcIiB4bWxuczpyZXNpbGllbnQ9XCJodHRwOi8vcmVzaWxpZW50LmlibS5jb20vYnBtblwiIHht
bG5zOnhzZD1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hXCIgeG1sbnM6eHNpPVwi
aHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWEtaW5zdGFuY2VcIiB0YXJnZXROYW1lc3Bh
Y2U9XCJodHRwOi8vd3d3LmNhbXVuZGEub3JnL3Rlc3RcIj48cHJvY2VzcyBpZD1cInRhc2tfdXRp
bHNfYWRkX25vdGVfdG9fdGFza1wiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwiRXhhbXBs
ZTogVGFzayBVdGlscyAtIEFkZCBOb3RlIHRvIFRhc2tcIj48ZG9jdW1lbnRhdGlvbj48IVtDREFU
QVtBbiBleGFtcGxlIHdvcmtmbG93IHdoaWNoIHRha2VzIGEgVGFzaydzIElEIGFuZCBhIGFkZHMg
ZWl0aGVyIGEgbmV3IHRleHQgb3IgcmljaHRleHQgbm90ZSB0byB0aGF0IHRhc2suXV0+PC9kb2N1
bWVudGF0aW9uPjxzdGFydEV2ZW50IGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5n
PlNlcXVlbmNlRmxvd18wcDlrb2NsPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNr
IGlkPVwiU2VydmljZVRhc2tfMDkyaXZ1bVwiIG5hbWU9XCJUYXNrIFV0aWxzOiBBZGQgTm90ZVwi
IHJlc2lsaWVudDp0eXBlPVwiZnVuY3Rpb25cIj48ZXh0ZW5zaW9uRWxlbWVudHM+PHJlc2lsaWVu
dDpmdW5jdGlvbiB1dWlkPVwiZGE2NTNhM2QtOTY0YS00ZTI4LWE2ZTAtYzQzNmRiMGI2Nzc0XCI+
e1wiaW5wdXRzXCI6e1wiNWUyM2IxNDUtMmJlYy00ZDgyLWFkNTAtZGMxMTVmMzU1MzYwXCI6e1wi
aW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92
YWx1ZVwiOltdLFwic2VsZWN0X3ZhbHVlXCI6XCI2YTJlNGUxMy03ZjBjLTRkYWItYTQ2MC04NmIw
NDc2OWNlZWJcIn19LFwiNDRjNGM2MDAtZjIyZC00YmY1LWJiZjQtZWZhZDM1OGYwMmY1XCI6e1wi
aW5wdXRfdHlwZVwiOlwic3RhdGljXCIsXCJzdGF0aWNfaW5wdXRcIjp7XCJtdWx0aXNlbGVjdF92
YWx1ZVwiOltdLFwidGV4dF92YWx1ZVwiOlwidGVzdFwifX19LFwicHJlX3Byb2Nlc3Npbmdfc2Ny
aXB0XCI6XCJpbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVudC5pZCBcIn08L3Jlc2lsaWVudDpm
dW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMHA5a29j
bDwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18wYzBxM3BpPC9vdXRnb2luZz48L3Nl
cnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMHA5a29jbFwiIHNvdXJj
ZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VUYXNrXzA5Mml2
dW1cIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMGJhd3NpMFwiPjxpbmNvbWluZz5TZXF1ZW5j
ZUZsb3dfMGMwcTNwaTwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93IGlkPVwiU2Vx
dWVuY2VGbG93XzBjMHEzcGlcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wOTJpdnVtXCIgdGFy
Z2V0UmVmPVwiRW5kRXZlbnRfMGJhd3NpMFwiLz48dGV4dEFubm90YXRpb24gaWQ9XCJUZXh0QW5u
b3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJlPC90ZXh0Pjwv
dGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIHNv
dXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRBbm5vdGF0aW9u
XzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJCUE1ORGlhZ3Jh
bV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRcIiBpZD1cIkJQ
TU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFydEV2ZW50XzE1
NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0
PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFi
ZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIxNTdcIiB5PVwi
MjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O
U2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9XCJUZXh0QW5u
b3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwiIHdpZHRoPVwi
MTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O
RWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFzc29jaWF0aW9u
XzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBlPVwib21nZGM6
UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNpOnR5cGU9XCJv
bWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5kaTpCUE1OU2hh
cGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wOTJpdnVtXCIgaWQ9XCJTZXJ2aWNlVGFza18w
OTJpdnVtX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwiMTAwXCIgeD1c
IjQzN1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBt
bkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMHA5a29jbFwiIGlkPVwiU2VxdWVuY2VGbG93XzBwOWtv
Y2xfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRc
IiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiNDM3XCIgeHNpOnR5cGU9XCJvbWdkYzpQ
b2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9
XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIzMTcuNVwiIHk9XCIxODRcIi8+PC9icG1uZGk6QlBNTkxh
YmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiRW5k
RXZlbnRfMGJhd3NpMFwiIGlkPVwiRW5kRXZlbnRfMGJhd3NpMF9kaVwiPjxvbWdkYzpCb3VuZHMg
aGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjc2OFwiIHk9XCIxODhcIi8+PGJwbW5kaTpC
UE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI3ODZc
IiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5k
aTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18wYzBxM3BpXCIgaWQ9XCJTZXF1
ZW5jZUZsb3dfMGMwcTNwaV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNTM3XCIgeHNpOnR5cGU9
XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2NTRcIiB4c2k6
dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjY1NFwi
IHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwi
NzY4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFi
ZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCI2NjlcIiB5PVwi
MTk5LjVcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjwvYnBtbmRpOkJQ
TU5QbGFuZT48L2JwbW5kaTpCUE1ORGlhZ3JhbT48L2RlZmluaXRpb25zPiIsICJ2ZXJzaW9uIjog
M30sICJhY3Rpb25zIjogW119LCB7IndvcmtmbG93X2lkIjogNiwgIm5hbWUiOiAiRXhhbXBsZTog
VGFzayBVdGlscyAtIENsb3NlIFRhc2siLCAicHJvZ3JhbW1hdGljX25hbWUiOiAidGFza191dGls
c19jbG9zZV90YXNrIiwgIm9iamVjdF90eXBlIjogImluY2lkZW50IiwgImRlc2NyaXB0aW9uIjog
IkFuIGV4YW1wbGUgd29ya2Zsb3cgd2hpY2ggdGFrZXMgYSBUYXNrIG5hbWUgZnJvbSBhbiBhY3Rp
dml0eSBmaWVsZCBhbmQgYXR0ZW1wdHMgdG8gY2xvc2UgdGhlIHRhc2suIiwgImNyZWF0b3JfaWQi
OiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhbGZyZWRAd2F5
bmVjb3JwLmNvbSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTU4MDk3NDM3ODkwLCAiZXhwb3J0
X2tleSI6ICJ0YXNrX3V0aWxzX2Nsb3NlX3Rhc2siLCAidXVpZCI6ICIwYWZkMzM1ZC00NmJjLTQz
OWItOTYyOS1mZGNmOWE0OTE2YzAiLCAiY29udGVudCI6IHsid29ya2Zsb3dfaWQiOiAidGFza191
dGlsc19jbG9zZV90YXNrIiwgInhtbCI6ICI8P3htbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9
XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9C
UE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBtbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3Nw
ZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9tZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3Nw
ZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdkaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVj
L0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxpZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0u
Y29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwi
IHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3JnLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIg
dGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5jYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3Mg
aWQ9XCJ0YXNrX3V0aWxzX2Nsb3NlX3Rhc2tcIiBpc0V4ZWN1dGFibGU9XCJ0cnVlXCIgbmFtZT1c
IkV4YW1wbGU6IFRhc2sgVXRpbHMgLSBDbG9zZSBUYXNrXCI+PGRvY3VtZW50YXRpb24+QW4gZXhh
bXBsZSB3b3JrZmxvdyB3aGljaCB0YWtlcyBhIFRhc2sgbmFtZSBmcm9tIGFuIGFjdGl2aXR5IGZp
ZWxkIGFuZCBhdHRlbXB0cyB0byBjbG9zZSB0aGUgdGFzay48L2RvY3VtZW50YXRpb24+PHN0YXJ0
RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzFm
dHo0Y2U8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2aWNlVGFz
a18wYmNkY3Z4XCIgbmFtZT1cIlRhc2sgVXRpbHM6IENsb3NlIFRhc2tcIiByZXNpbGllbnQ6dHlw
ZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6ZnVuY3Rpb24gdXVp
ZD1cIjcyMjc5OThhLTE2ZDMtNDZmNi1iYzg4LWNkYzYyNWZmOTlkYVwiPntcImlucHV0c1wiOnt9
LFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCJpbnB1dHMuaW5jaWRlbnRfaWQgPSBpbmNpZGVu
dC5pZFxcbmlucHV0cy50YXNrX25hbWUgPSBydWxlLnByb3BlcnRpZXMudGFza191dGlsc190YXNr
X25hbWVcIn08L3Jlc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWlu
Zz5TZXF1ZW5jZUZsb3dfMWZ0ejRjZTwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18w
ZWd3aTdpPC9vdXRnb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5j
ZUZsb3dfMWZ0ejRjZVwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJl
Zj1cIlNlcnZpY2VUYXNrXzBiY2RjdnhcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMHB3YmZs
b1wiPjxpbmNvbWluZz5TZXF1ZW5jZUZsb3dfMGVnd2k3aTwvaW5jb21pbmc+PC9lbmRFdmVudD48
c2VxdWVuY2VGbG93IGlkPVwiU2VxdWVuY2VGbG93XzBlZ3dpN2lcIiBzb3VyY2VSZWY9XCJTZXJ2
aWNlVGFza18wYmNkY3Z4XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMHB3YmZsb1wiLz48dGV4dEFu
bm90YXRpb24gaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3
b3JrZmxvdyBoZXJlPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNz
b2NpYXRpb25fMXNldWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdl
dFJlZj1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRp
YWdyYW0gaWQ9XCJCUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9
XCJ1bmRlZmluZWRcIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVs
ZW1lbnQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwi
PjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIx
ODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9
XCI5MFwiIHg9XCIxNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpC
UE1OU2hhcGU+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8x
a3h4aXl0XCIgaWQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBo
ZWlnaHQ9XCIzMFwiIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpC
UE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVq
NDhcIiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2
OVwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4
PVwiMTUzXCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBN
TkVkZ2U+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wYmNkY3Z4
XCIgaWQ9XCJTZXJ2aWNlVGFza18wYmNkY3Z4X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4
MFwiIHdpZHRoPVwiMTAwXCIgeD1cIjM4NVwiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBl
PjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWZ0ejRjZVwiIGlk
PVwiU2VxdWVuY2VGbG93XzFmdHo0Y2VfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhz
aTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzg1
XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+
PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyOTEuNVwiIHk9XCIx
ODRcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNo
YXBlIGJwbW5FbGVtZW50PVwiRW5kRXZlbnRfMHB3YmZsb1wiIGlkPVwiRW5kRXZlbnRfMHB3YmZs
b19kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjY5NFwi
IHk9XCIxODhcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wi
IHdpZHRoPVwiMFwiIHg9XCI3MTJcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2Jw
bW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxv
d18wZWd3aTdpXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMGVnd2k3aV9kaVwiPjxvbWdkaTp3YXlwb2lu
dCB4PVwiNDg1XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndh
eXBvaW50IHg9XCI2OTRcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBt
bmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1c
IjU4OS41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+
PC9icG1uZGk6QlBNTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+Iiwg
InZlcnNpb24iOiAyfSwgImFjdGlvbnMiOiBbXX0sIHsid29ya2Zsb3dfaWQiOiAyLCAibmFtZSI6
ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQ3JlYXRlIEN1c3RvbSBUYXNrIiwgInByb2dyYW1tYXRp
Y19uYW1lIjogInRhc2tfdXRpbHNfY3JlYXRlX2N1c3RvbV90YXNrIiwgIm9iamVjdF90eXBlIjog
ImluY2lkZW50IiwgImRlc2NyaXB0aW9uIjogIkFuIGV4YW1wbGUgd29ya2Zsb3cgdXNlZCB0byBk
ZW1vbnN0cmF0ZSBob3cgeW91IGNhbiBjcmVhdGUgYSBjdXN0b20gdGFzayB1c2luZyB0aGUgVGFz
ayBVdGlscyBJbnRlZ3JhdGlvbi4gVGhpcyBleGFtcGxlIHdvcmtmbG93IHNob3dzIGhvdyB5b3Ug
Y2FuIHVzZSB0aGUgUHJlLVByb2Nlc3Npbmcgc2NyaXB0IHRvIHByZXBhcmUgYSBjdXN0b20gSlNP
TiBQYXlsb2FkIGJhc2VkIG9uIHlvdXIgdXNlIGNhc2UuIiwgImNyZWF0b3JfaWQiOiAiYWxmcmVk
QHdheW5lY29ycC5jb20iLCAibGFzdF9tb2RpZmllZF9ieSI6ICJhbGZyZWRAd2F5bmVjb3JwLmNv
bSIsICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTU4OTU4NDEzNDU4LCAiZXhwb3J0X2tleSI6ICJ0
YXNrX3V0aWxzX2NyZWF0ZV9jdXN0b21fdGFzayIsICJ1dWlkIjogIjYxYzA5NWU1LWRiOTktNGRi
OS1hYTBhLThiOTQxMjVmMTk3OCIsICJjb250ZW50IjogeyJ3b3JrZmxvd19pZCI6ICJ0YXNrX3V0
aWxzX2NyZWF0ZV9jdXN0b21fdGFzayIsICJ4bWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVu
Y29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3Jn
L3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21n
Lm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21n
Lm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5v
cmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGll
bnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxT
Y2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0
YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxw
cm9jZXNzIGlkPVwidGFza191dGlsc19jcmVhdGVfY3VzdG9tX3Rhc2tcIiBpc0V4ZWN1dGFibGU9
XCJ0cnVlXCIgbmFtZT1cIkV4YW1wbGU6IFRhc2sgVXRpbHMgLSBDcmVhdGUgQ3VzdG9tIFRhc2tc
Ij48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IHVzZWQgdG8gZGVtb25zdHJhdGUg
aG93IHlvdSBjYW4gY3JlYXRlIGEgY3VzdG9tIHRhc2sgdXNpbmcgdGhlIFRhc2sgVXRpbHMgSW50
ZWdyYXRpb24uIFRoaXMgZXhhbXBsZSB3b3JrZmxvdyBzaG93cyBob3cgeW91IGNhbiB1c2UgdGhl
IFByZS1Qcm9jZXNzaW5nIHNjcmlwdCB0byBwcmVwYXJlIGEgY3VzdG9tIEpTT04gUGF5bG9hZCBi
YXNlZCBvbiB5b3VyIHVzZSBjYXNlLjwvZG9jdW1lbnRhdGlvbj48c3RhcnRFdmVudCBpZD1cIlN0
YXJ0RXZlbnRfMTU1YXN4bVwiPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMXFuZHNvMTwvb3V0Z29p
bmc+PC9zdGFydEV2ZW50PjxzZXJ2aWNlVGFzayBpZD1cIlNlcnZpY2VUYXNrXzA5eGd3YnJcIiBu
YW1lPVwiVGFzayBVdGlsczogQ3JlYXRlIEN1c3RvbSBUYXNrXCIgcmVzaWxpZW50OnR5cGU9XCJm
dW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9XCJj
NDI4MWJkNC1kODcyLTQ1NjItYjdjOC1iMGUzNzlhZWE4YWRcIj57XCJpbnB1dHNcIjp7XCIzNjFm
NDIwZi0zMTg0LTQ0MjYtOWZkYy03NjQ5ZDU4ZWFjOTJcIjp7XCJpbnB1dF90eXBlXCI6XCJzdGF0
aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJ0ZXh0X2Nv
bnRlbnRfdmFsdWVcIjp7XCJmb3JtYXRcIjpcInRleHRcIixcImNvbnRlbnRcIjpcIntcXG5cXFwi
cmVxdWlyZWRcXFwiOiBmYWxzZSxcXG5cXFwiaW5zdHJfdGV4dFxcXCI6IFxcXCJDbG9zZSBvdXQg
dGhpcyByZXF1aXJlZCBUYXNrXFxcIixcXG5cXFwicGhhc2VfaWRcXFwiOiBcXFwiSW5pdGlhbFxc
XCJcXG59XCJ9fX19LFwicHJlX3Byb2Nlc3Npbmdfc2NyaXB0XCI6XCIjIyMjIyMjIyMjIyMjIyMj
IyMjIyMjIyMjIyMjIyMjIyMjIyMjIyNcXG4jIyMgRGVmaW5lIHByZS1wcm9jZXNzaW5nIGZ1bmN0
aW9ucyAjIyNcXG4jIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyMjIyNcXG5wYXls
b2FkID0ge1xcblxcXCJyZXF1aXJlZFxcXCI6IFRydWUsXFxuXFxcImluc3RyX3RleHRcXFwiOiBc
XFwiQ2xvc2Ugb3V0IHRoaXMgcmVxdWlyZWQgVGFza1xcXCIsXFxuXFxcInBoYXNlX2lkXFxcIjog
XFxcIkluaXRpYWxcXFwiXFxufVxcblxcbmRlZiBkaWN0X3RvX2pzb25fc3RyKGQpOlxcbiAgXFxc
IlxcXCJcXFwiRnVuY3Rpb24gdGhhdCBjb252ZXJ0cyBhIGRpY3Rpb25hcnkgaW50byBhIEpTT04g
c3RyaW5nc2VsZi5cXG4gICAgIFN1cHBvcnRzIGJhc2VzdHJpbmcsIGJvb2wgYW5kIGludC5cXG4g
ICAgIElmIHRoZSB2YWx1ZSBpcyBOb25lLCBpdCBzZXRzIGl0IHRvIEZhbHNlXFxcIlxcXCJcXFwi
XFxuXFxuICBqc29uX3N0ciA9ICdcXFwieyB7MH0gfVxcXCInXFxuICBqc29uX2VudHJ5ID0gJ1xc
XCJ7MH1cXFwiOnsxfSdcXG4gIGpzb25fZW50cnlfc3RyID0gJ1xcXCJ7MH1cXFwiOlxcXCJ7MX1c
XFwiJ1xcbiAgZW50cmllcyA9IFtdIFxcbiAgXFxuICBmb3IgZW50cnkgaW4gZDpcXG4gICAga2V5
ID0gZW50cnlcXG4gICAgdmFsdWUgPSBkW2VudHJ5XVxcbiAgICBcXG4gICAgICBcXG4gICAgaWYg
dmFsdWUgaXMgTm9uZTpcXG4gICAgICB2YWx1ZSA9IEZhbHNlXFxuICAgICAgXFxuICAgIFxcbiAg
ICBpZiBpc2luc3RhbmNlKHZhbHVlLCBiYXNlc3RyaW5nKTpcXG4gICAgICBlbnRyaWVzLmFwcGVu
ZChqc29uX2VudHJ5X3N0ci5mb3JtYXQoa2V5LCB2YWx1ZSkpXFxuICAgIFxcbiAgICBlbGlmIGlz
aW5zdGFuY2UodmFsdWUsIGJvb2wpOlxcbiAgICAgIHZhbHVlID0gJ3RydWUnIGlmIHZhbHVlID09
IFRydWUgZWxzZSAnZmFsc2UnXFxuICAgICAgZW50cmllcy5hcHBlbmQoanNvbl9lbnRyeS5mb3Jt
YXQoa2V5LCB2YWx1ZSkpXFxuICAgIFxcbiAgICBlbHNlOlxcbiAgICAgIGVudHJpZXMuYXBwZW5k
KGpzb25fZW50cnkuZm9ybWF0KGtleSwgdmFsdWUpKVxcbiAgXFxuICByZXR1cm4gJ3snICsgJywn
LmpvaW4oZW50cmllcykgKyAnfSdcXG5cXG4jIElmIHlvdSBkb24ndCBhbHJlYWR5IGhhdmUgc29t
ZXRoaW5nIGluIHRhc2tfdXRpbHNfcGF5bG9hZFxcbmlmIGlucHV0cy50YXNrX3V0aWxzX3BheWxv
YWQgIT0gTm9uZTogXFxuICAjIHByZXBhcmUgYSBKU09OIHBheWxvYWQgdXNpbmcgYWJvdmUgY29k
ZTsgXFxuICBpbnB1dHMudGFza191dGlsc19wYXlsb2FkID0gZGljdF90b19qc29uX3N0cihwYXls
b2FkKVxcblxcbiMgVGFrZSB0aGUgaW5jaWRlbnQgaWQgZnJvbSB0aGlzIGluY2lkZW50XFxuaW5w
dXRzLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWRcXG5cXG4jIElmIHlvdSBzcGVjaWZpZWQgYSB2
YWx1ZSBpbiB0aGUgQWN0aXZpdHkgRmllbGQgdGhlbiB1c2UgaXQgZm9yIHRhc2tfbmFtZVxcbmlm
IHJ1bGUucHJvcGVydGllcy50YXNrX3V0aWxzX3Rhc2tfbmFtZSAhPSBOb25lOlxcbiAgaW5wdXRz
LnRhc2tfbmFtZSA9IHJ1bGUucHJvcGVydGllcy50YXNrX3V0aWxzX3Rhc2tfbmFtZVwifTwvcmVz
aWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVudHM+PGluY29taW5nPlNlcXVlbmNlRmxv
d18xcW5kc28xPC9pbmNvbWluZz48b3V0Z29pbmc+U2VxdWVuY2VGbG93XzF5N3lnczQ8L291dGdv
aW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBpZD1cIlNlcXVlbmNlRmxvd18xcW5kc28x
XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRh
c2tfMDl4Z3diclwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8wOXQxYXMzXCI+PGluY29taW5n
PlNlcXVlbmNlRmxvd18xeTd5Z3M0PC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cg
aWQ9XCJTZXF1ZW5jZUZsb3dfMXk3eWdzNFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzA5eGd3
YnJcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8wOXQxYXMzXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1c
IlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5TdGFydCB5b3VyIHdvcmtmbG93IGhlcmU8
L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRpb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1
ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFu
bm90YXRpb25fMWt4eGl5dFwiLz48L3Byb2Nlc3M+PGJwbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQ
TU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBicG1uRWxlbWVudD1cInVuZGVmaW5lZFwi
IGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0
RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5k
cyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwiMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRp
OkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1
N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBt
bmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1c
IlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIg
d2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBt
bmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2NpYXRpb25fMXNldWo0OFwiIGlkPVwiQXNz
b2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9
XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2RpOndheXBvaW50IHg9XCIxNTNcIiB4c2k6
dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1ORWRnZT48YnBtbmRp
OkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VUYXNrXzA5eGd3YnJcIiBpZD1cIlNlcnZp
Y2VUYXNrXzA5eGd3YnJfZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIx
MDBcIiB4PVwiNDMwXCIgeT1cIjE2NlwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1O
RWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xcW5kc28xXCIgaWQ9XCJTZXF1ZW5jZUZs
b3dfMXFuZHNvMV9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdk
YzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI0MzBcIiB4c2k6dHlwZT1c
Im9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRz
IGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjMxNFwiIHk9XCIxODRcIi8+PC9icG1uZGk6
QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50
PVwiRW5kRXZlbnRfMDl0MWFzM1wiIGlkPVwiRW5kRXZlbnRfMDl0MWFzM19kaVwiPjxvbWdkYzpC
b3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjc4MVwiIHk9XCIxODhcIi8+PGJw
bW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9
XCI3OTlcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+
PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xeTd5Z3M0XCIgaWQ9
XCJTZXF1ZW5jZUZsb3dfMXk3eWdzNF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNTMwXCIgeHNp
OnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI3ODFc
IiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJlbD48
b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjY1NS41XCIgeT1cIjE4
NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBNTlBs
YW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiAzfSwg
ImFjdGlvbnMiOiBbXX1dLCAicm9sZXMiOiBbXSwgIndvcmtzcGFjZXMiOiBbXSwgImZ1bmN0aW9u
cyI6IFt7ImlkIjogMzQsICJuYW1lIjogInRhc2tfdXRpbHNfYWRkX25vdGUiLCAiZGlzcGxheV9u
YW1lIjogIlRhc2sgVXRpbHM6IEFkZCBOb3RlIiwgImRlc2NyaXB0aW9uIjogeyJmb3JtYXQiOiAi
dGV4dCIsICJjb250ZW50IjogIkEgZnVuY3Rpb24gd2hpY2ggdGFrZXMgaW4gdGhlIElEIG9mIGFu
IGV4aXN0aW5nIFRhc2sgYW5kIHRoZW4gYWRkcyBlaXRoZXIgYSBwbGFpbiBvciByaWNodGV4dCBu
b3RlIHRvIHRoZSBUYXNrLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX3Rhc2tfdXRpbHMi
LCAiZXhwb3J0X2tleSI6ICJ0YXNrX3V0aWxzX2FkZF9ub3RlIiwgInV1aWQiOiAiZGE2NTNhM2Qt
OTY0YS00ZTI4LWE2ZTAtYzQzNmRiMGI2Nzc0IiwgInZlcnNpb24iOiAxLCAiY3JlYXRvciI6IHsi
aWQiOiAzOSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwg
ImRpc3BsYXlfbmFtZSI6ICJBbGZyZWQgUGVubnl3b3J0aCJ9LCAibGFzdF9tb2RpZmllZF9ieSI6
IHsiaWQiOiAzOSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFsZnJlZEB3YXluZWNvcnAuY29t
IiwgImRpc3BsYXlfbmFtZSI6ICJBbGZyZWQgUGVubnl3b3J0aCJ9LCAibGFzdF9tb2RpZmllZF90
aW1lIjogMTU1ODA5MjA0MDU0MCwgInZpZXdfaXRlbXMiOiBbeyJzdGVwX2xhYmVsIjogbnVsbCwg
InNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAi
X19mdW5jdGlvbiIsICJjb250ZW50IjogIjgxMWU5OWQ3LWQxOTQtNGNlOC04NmNjLWFmZjVlMDFh
Yjg1YyIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAi
c2hvd19pZiI6IG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJf
X2Z1bmN0aW9uIiwgImNvbnRlbnQiOiAiYmEzMTgyNjEtZWQ2YS00YTM4LWExODctOWUwYjY4ZDE2
MDRmIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJz
aG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9f
ZnVuY3Rpb24iLCAiY29udGVudCI6ICJlOWFmMDc3YS0zYjFjLTRmZjctYmVmYi04ZGZlZGQzYzdk
YmMiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNo
b3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19m
dW5jdGlvbiIsICJjb250ZW50IjogIjVlMjNiMTQ1LTJiZWMtNGQ4Mi1hZDUwLWRjMTE1ZjM1NTM2
MCIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hv
d19pZiI6IG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1
bmN0aW9uIiwgImNvbnRlbnQiOiAiNDRjNGM2MDAtZjIyZC00YmY1LWJiZjQtZWZhZDM1OGYwMmY1
IiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZX1dLCAid29ya2Zsb3dzIjogW3sid29ya2Zsb3df
aWQiOiAzLCAibmFtZSI6ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQWRkIE5vdGUgdG8gVGFzayIs
ICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJ0YXNrX3V0aWxzX2FkZF9ub3RlX3RvX3Rhc2siLCAib2Jq
ZWN0X3R5cGUiOiAiaW5jaWRlbnQiLCAiZGVzY3JpcHRpb24iOiBudWxsLCAidXVpZCI6IG51bGws
ICJhY3Rpb25zIjogW119XX0sIHsiaWQiOiAzNSwgIm5hbWUiOiAidGFza191dGlsc19jbG9zZV90
YXNrIiwgImRpc3BsYXlfbmFtZSI6ICJUYXNrIFV0aWxzOiBDbG9zZSBUYXNrIiwgImRlc2NyaXB0
aW9uIjogeyJmb3JtYXQiOiAidGV4dCIsICJjb250ZW50IjogIkEgZnVuY3Rpb24gd2hpY2ggd2ls
bCBhdHRlbXB0IHRvIGNsb3NlIGVpdGhlciBhIFN5c3RlbSBvciBDdXN0b20gdGFzayB1c2luZyB0
aGUgUkVTVCBBUEkuIn0sICJkZXN0aW5hdGlvbl9oYW5kbGUiOiAiZm5fdGFza191dGlscyIsICJl
eHBvcnRfa2V5IjogInRhc2tfdXRpbHNfY2xvc2VfdGFzayIsICJ1dWlkIjogIjcyMjc5OThhLTE2
ZDMtNDZmNi1iYzg4LWNkYzYyNWZmOTlkYSIsICJ2ZXJzaW9uIjogMSwgImNyZWF0b3IiOiB7Imlk
IjogMzksICJ0eXBlIjogInVzZXIiLCAibmFtZSI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIsICJk
aXNwbGF5X25hbWUiOiAiQWxmcmVkIFBlbm55d29ydGgifSwgImxhc3RfbW9kaWZpZWRfYnkiOiB7
ImlkIjogMzksICJ0eXBlIjogInVzZXIiLCAibmFtZSI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIs
ICJkaXNwbGF5X25hbWUiOiAiQWxmcmVkIFBlbm55d29ydGgifSwgImxhc3RfbW9kaWZpZWRfdGlt
ZSI6IDE1NTgwOTIwNDA1NDAsICJ2aWV3X2l0ZW1zIjogW3sic3RlcF9sYWJlbCI6IG51bGwsICJz
aG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9f
ZnVuY3Rpb24iLCAiY29udGVudCI6ICI4MTFlOTlkNy1kMTk0LTRjZTgtODZjYy1hZmY1ZTAxYWI4
NWMiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNo
b3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19m
dW5jdGlvbiIsICJjb250ZW50IjogImJhMzE4MjYxLWVkNmEtNGEzOC1hMTg3LTllMGI2OGQxNjA0
ZiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hv
d19pZiI6IG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1
bmN0aW9uIiwgImNvbnRlbnQiOiAiZTlhZjA3N2EtM2IxYy00ZmY3LWJlZmItOGRmZWRkM2M3ZGJj
IiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZX1dLCAid29ya2Zsb3dzIjogW3sid29ya2Zsb3df
aWQiOiA2LCAibmFtZSI6ICJFeGFtcGxlOiBUYXNrIFV0aWxzIC0gQ2xvc2UgVGFzayIsICJwcm9n
cmFtbWF0aWNfbmFtZSI6ICJ0YXNrX3V0aWxzX2Nsb3NlX3Rhc2siLCAib2JqZWN0X3R5cGUiOiAi
aW5jaWRlbnQiLCAiZGVzY3JpcHRpb24iOiBudWxsLCAidXVpZCI6IG51bGwsICJhY3Rpb25zIjog
W119XX0sIHsiaWQiOiAzNiwgIm5hbWUiOiAidGFza191dGlsc19jcmVhdGUiLCAiZGlzcGxheV9u
YW1lIjogIlRhc2sgVXRpbHM6IENyZWF0ZSBDdXN0b20gVGFzayIsICJkZXNjcmlwdGlvbiI6IHsi
Zm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJBIGZ1bmN0aW9uIHdoaWNoIGNhbiBiZSB1c2Vk
IHRvIGNyZWF0ZSBhIGN1c3RvbSB0YXNrIHVzaW5nIHRoZSBSRVNUIEFQSS4ifSwgImRlc3RpbmF0
aW9uX2hhbmRsZSI6ICJmbl90YXNrX3V0aWxzIiwgImV4cG9ydF9rZXkiOiAidGFza191dGlsc19j
cmVhdGUiLCAidXVpZCI6ICJjNDI4MWJkNC1kODcyLTQ1NjItYjdjOC1iMGUzNzlhZWE4YWQiLCAi
dmVyc2lvbiI6IDEsICJjcmVhdG9yIjogeyJpZCI6IDM5LCAidHlwZSI6ICJ1c2VyIiwgIm5hbWUi
OiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxheV9uYW1lIjogIkFsZnJlZCBQZW5ueXdv
cnRoIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDM5LCAidHlwZSI6ICJ1c2VyIiwgIm5h
bWUiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxheV9uYW1lIjogIkFsZnJlZCBQZW5u
eXdvcnRoIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTU4MDkyMDQwNTQwLCAidmlld19pdGVt
cyI6IFt7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGwsICJlbGVtZW50IjogImZp
ZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNvbnRlbnQiOiAiODExZTk5
ZDctZDE5NC00Y2U4LTg2Y2MtYWZmNWUwMWFiODVjIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxz
ZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmll
bGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVudCI6ICJlOWFmMDc3
YS0zYjFjLTRmZjctYmVmYi04ZGZlZGQzYzdkYmMiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNl
fSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVs
ZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250ZW50IjogIjM2MWY0MjBm
LTMxODQtNDQyNi05ZmRjLTc2NDlkNThlYWM5MiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9
XSwgIndvcmtmbG93cyI6IFt7IndvcmtmbG93X2lkIjogMiwgIm5hbWUiOiAiRXhhbXBsZTogVGFz
ayBVdGlscyAtIENyZWF0ZSBDdXN0b20gVGFzayIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJ0YXNr
X3V0aWxzX2NyZWF0ZV9jdXN0b21fdGFzayIsICJvYmplY3RfdHlwZSI6ICJpbmNpZGVudCIsICJk
ZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVsbCwgImFjdGlvbnMiOiBbXX1dfSwgeyJpZCI6
IDM3LCAibmFtZSI6ICJ0YXNrX3V0aWxzX3VwZGF0ZV90YXNrIiwgImRpc3BsYXlfbmFtZSI6ICJU
YXNrIFV0aWxzOiBVcGRhdGUgVGFzayIsICJkZXNjcmlwdGlvbiI6IHsiZm9ybWF0IjogInRleHQi
LCAiY29udGVudCI6ICJBIGZ1bmN0aW9uIHdoaWNoIHRha2VzIGluIHRoZSBJRCBvZiBhbiBleGlz
dGluZyBUYXNrIGFuZCBhIHRhc2tfdXRpbHNfcGF5bG9hZCB3aGljaCBpcyBhIEpTT04gU3RyaW5n
IG9mIHRoZSB0YXNrIGRldGFpbHMgdG8gdXBkYXRlLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjog
ImZuX3Rhc2tfdXRpbHMiLCAiZXhwb3J0X2tleSI6ICJ0YXNrX3V0aWxzX3VwZGF0ZV90YXNrIiwg
InV1aWQiOiAiYmQ4ZGY1M2EtNjQ0My00MGYxLTkwZjktZWI1OTkzOTk2OTI1IiwgInZlcnNpb24i
OiAxLCAiY3JlYXRvciI6IHsiaWQiOiAzOSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFsZnJl
ZEB3YXluZWNvcnAuY29tIiwgImRpc3BsYXlfbmFtZSI6ICJBbGZyZWQgUGVubnl3b3J0aCJ9LCAi
bGFzdF9tb2RpZmllZF9ieSI6IHsiaWQiOiAzOSwgInR5cGUiOiAidXNlciIsICJuYW1lIjogImFs
ZnJlZEB3YXluZWNvcnAuY29tIiwgImRpc3BsYXlfbmFtZSI6ICJBbGZyZWQgUGVubnl3b3J0aCJ9
LCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1ODA5MjA0MDU0MCwgInZpZXdfaXRlbXMiOiBbeyJz
dGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlk
IiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250ZW50IjogIjgxMWU5OWQ3LWQxOTQt
NGNlOC04NmNjLWFmZjVlMDFhYjg1YyIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0
ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQi
LCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNvbnRlbnQiOiAiYmEzMTgyNjEtZWQ2YS00
YTM4LWExODctOWUwYjY4ZDE2MDRmIiwgInNob3dfbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3Rl
cF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIs
ICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29udGVudCI6ICJlOWFmMDc3YS0zYjFjLTRm
ZjctYmVmYi04ZGZlZGQzYzdkYmMiLCAic2hvd19saW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVw
X2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwg
ImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJjb250ZW50IjogIjM2MWY0MjBmLTMxODQtNDQy
Ni05ZmRjLTc2NDlkNThlYWM5MiIsICJzaG93X2xpbmtfaGVhZGVyIjogZmFsc2V9XSwgIndvcmtm
bG93cyI6IFt7IndvcmtmbG93X2lkIjogNSwgIm5hbWUiOiAiRXhhbXBsZTogVGFzayBVdGlscyAt
IE1hcmsgVGFzayBhIE9wdGlvbmFsIiwgInByb2dyYW1tYXRpY19uYW1lIjogInRhc2tfdXRpbHNf
bWFya190YXNrX29wdGlvbmFsIiwgIm9iamVjdF90eXBlIjogInRhc2siLCAiZGVzY3JpcHRpb24i
OiBudWxsLCAidXVpZCI6IG51bGwsICJhY3Rpb25zIjogW119XX1dfQ==
"""
) | python |
"""This module contains the class declaration for the MapViewWidget."""
from math import pi, sin, cos
from PySide6.QtCore import QPoint, QPointF, Qt
from PySide6.QtGui import QCursor, QPainter, QPainterPath, QPen
from PySide6.QtWidgets import QApplication, QWidget
from sailsim.sailor.Commands import Waypoint
# Map constants
ZoomInFactor = 1.25
ZoomOutFactor = 1 / ZoomInFactor
ScrollStep = 10
def pointsToPath(points, jump=1):
"""Convert a pointlist into a QPainterPath."""
path = QPainterPath()
path.moveTo(QPointF(points[0][0], -points[0][1]))
for i in range(0, len(points), jump)[1:]:
point = points[i]
path.lineTo(QPointF(point[0], -point[1]))
return path
def boatPainterPath():
"""Return the QPainterPath for drawing a boat."""
boat = QPainterPath()
boat.moveTo(0, -2)
boat.cubicTo(QPointF(1, -.5), QPointF(1, .5), QPointF(.8, 2.2))
boat.lineTo(-.8, 2.2)
boat.cubicTo(QPointF(-1, .5), QPointF(-1, -.5), QPointF(0, -2))
return boat
class MapViewWidget(QWidget):
"""Map Widget that displays the boat and its path."""
windowWidth = 0
windowHeight = 0
offset = QPoint()
scale = 4
lastDragPos = QPoint()
waypointsLink = QPainterPath()
waypoints = QPainterPath()
path = QPainterPath()
# Boat properties
boatPos = QPointF(0, 0)
boatDir = 0
boatMainSailAngle = 0
boatRudderAngle = 0
# Display proerties
displayWaypointLink = True
displayWaypoints = True
displayPath = True
displayMainSail = True
displayRudder = True
def __init__(self, parent=None):
super(MapViewWidget, self).__init__(parent)
self.setWindowTitle("MapViewWidget")
self.setCursor(Qt.CrossCursor)
self.resize(550, 400)
def paintEvent(self, event):
painter = QPainter(self)
painter.setRenderHint(QPainter.Antialiasing, True)
painter.translate(self.offset)
painter.scale(self.scale, self.scale)
if self.displayWaypointLink:
painter.setPen(QPen(Qt.gray, .1, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawPath(self.waypointsLink)
if self.displayWaypoints:
painter.setPen(QPen(Qt.blue, .1, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawPath(self.waypoints)
if self.displayPath:
painter.setPen(QPen(Qt.darkGray, 4 / self.scale, Qt.SolidLine, Qt.RoundCap, Qt.RoundJoin))
painter.drawPath(self.path)
painter.translate(self.boatPos)
painter.rotate(self.boatDir)
painter.setPen(Qt.NoPen)
painter.setBrush(Qt.black)
painter.drawPath(boatPainterPath())
if self.displayMainSail:
painter.setPen(QPen(Qt.green, 0.1, Qt.SolidLine, Qt.RoundCap))
painter.drawLine(QPointF(0, 0), QPointF(sin(self.boatMainSailAngle), cos(self.boatMainSailAngle)) * 2)
if self.displayRudder:
painter.setPen(QPen(Qt.blue, 0.1, Qt.SolidLine, Qt.RoundCap))
painter.drawLine(QPointF(0, 2.2), QPointF(sin(self.boatRudderAngle), cos(self.boatRudderAngle)) * 0.5 + QPointF(0, 2.2))
def setPath(self, path):
"""Change the path and updates the painter."""
self.path = path
self.update()
def setWaypoints(self, commands):
"""Display the waypoints in a commandList on mapView."""
if len(commands) > 0:
wpPath = QPainterPath()
# TODO find out boat starting coordinates
wpLinkList = [[0, 0]]
for command in commands:
if isinstance(command, Waypoint):
wpPath.addEllipse(QPoint(command.destX, -command.destY), command.radius, command.radius)
wpLinkList.append([command.destX, command.destY])
self.waypointsLink = pointsToPath(wpLinkList)
self.waypoints = wpPath
self.update()
def viewFrame(self, frame):
"""Set the boat to a position saved in a frame given."""
self.setBoat(frame.boatPosX, frame.boatPosY, frame.boatDirection, frame.boatMainSailAngle, frame.boatRudderAngle)
def setBoat(self, posX, posY, direction, mainSailAngle, rudderAngle):
"""Set boat to a position given."""
self.boatPos = QPointF(posX, -posY)
self.boatDir = direction / pi * 180
self.boatMainSailAngle = -mainSailAngle
self.boatRudderAngle = rudderAngle
self.update()
def resizeEvent(self, event):
"""Keep center of the map in the center."""
width, height = event.size().width(), event.size().height()
self.offset -= QPoint((self.windowWidth - width) / 2, (self.windowHeight - height) / 2)
self.windowWidth, self.windowHeight = width, height
def keyPressEvent(self, event):
"""Move mapView according to the button pressed."""
# TODO is this working?
if event.key() == Qt.Key_Plus:
self.zoom(ZoomInFactor)
elif event.key() == Qt.Key_Minus:
self.zoom(ZoomOutFactor)
elif event.key() == Qt.Key_Left:
self.scroll(+ScrollStep, 0)
elif event.key() == Qt.Key_Right:
self.scroll(-ScrollStep, 0)
elif event.key() == Qt.Key_Down:
self.scroll(0, -ScrollStep)
elif event.key() == Qt.Key_Up:
self.scroll(0, +ScrollStep)
else:
super(MapViewWidget, self).keyPressEvent(event)
def wheelEvent(self, event):
"""Zoom in and out when mouse wheel is moved."""
numDegrees = event.angleDelta().y() / 8
numSteps = numDegrees / 32
self.zoom(pow(ZoomInFactor, numSteps))
def mousePressEvent(self, event):
if event.buttons() == Qt.LeftButton:
self.lastDragPos = QPoint(event.pos())
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton:
self.offset += event.pos() - self.lastDragPos
self.lastDragPos = QPoint(event.pos())
self.update()
def zoom(self, zoomFactor):
"""Zoom the mapView and keep mouse in the same spot."""
self.scale *= zoomFactor
self.offset += (self.mapFromGlobal(QCursor.pos()) - self.offset) * (1 - zoomFactor)
self.update()
def scroll(self, deltaX, deltaY):
"""Translate mapView."""
self.offset += QPoint(deltaX, deltaY)
self.update()
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
widget = MapViewWidget()
widget.show()
r = app.exec_()
sys.exit(r)
| python |
from .uia_control import UIAControl
class HeaderItem(UIAControl):
CONTROL_TYPE = "HeaderItem"
| python |
#!/usr/bin/env python3
# This is awful. What it does is scan the specified root paths, recursively,
# for zip files (checking magic number, not extension), and unzips those which
# contain only a single file. The file's name inside the zip is ignored, except
# for the extension, which is combined with the extension-less name of the zip.
# The original zip is then moved to {root}/__unzipped/{zip_path}
# This is done so unzips preserve renames done to the zip files after the
# disk imaging program has written them.
# usage: $0 ROOTDIR+
import calendar
import itertools
import os
import shutil
import sys
import zipfile
UNZIPPED_DIRNAME = "__unzipped"
def strip_exts(s):
# yes, this is wrong and stupid and you should use os.path.splitext, but
# this covers the mess i made of naming conventions
bits = s.split(".")
if len(bits) == 1:
return s
if bits[0] == "":
prefix = bits[0:2]
domain = bits[2:]
else:
prefix = bits[0:1]
domain = bits[1:]
truncated = itertools.dropwhile(lambda s: 2 <= len(s) <= 3, reversed(domain))
return ".".join(itertools.chain(prefix, reversed(list(truncated))))
def unzip_image(zip_path, move_to_on_success=None):
with zipfile.ZipFile(zip_path, "r") as archive:
members = archive.infolist()
if len(members) != 1:
print("Contains multiple files: {0}".format(zip_path), file=sys.stderr)
return
base_name = strip_exts(zip_path)
member = members[0]
_, member_ext = os.path.splitext(member.filename)
out_name = base_name + member_ext.lower()
print("{0} >> {1}".format(member.filename, out_name))
with open(out_name, "wb") as out_stream:
zstream = archive.open(member, "r")
try:
out_stream.write(zstream.read())
finally:
zstream.close()
volume_mtime = os.path.getmtime(zip_path)
member_mtime = calendar.timegm(member.date_time)
earliest = min(volume_mtime, member_mtime)
os.utime(out_name, (earliest, earliest))
if move_to_on_success:
root = os.path.dirname(move_to_on_success)
relpath = os.path.relpath(zip_path, root)
move_dest = os.path.join(move_to_on_success, relpath)
print("{0} -> {1}".format(zip_path, move_dest))
move_parent = os.path.dirname(move_dest)
try:
os.makedirs(move_parent)
except EnvironmentError:
if os.path.isdir(move_parent):
pass
else:
raise
shutil.move(zip_path, move_dest)
def main():
for root in sys.argv[1:]:
success_path = os.path.join(root, UNZIPPED_DIRNAME)
for path, dirnames, filenames in os.walk(root):
try:
dirnames.pop(dirnames.index(UNZIPPED_DIRNAME))
except ValueError:
pass
for filename in filenames:
filepath = os.path.join(path, filename)
if zipfile.is_zipfile(filepath):
unzip_image(filepath, success_path)
if __name__ == '__main__':
main()
| python |
nun = int(input('Digite um número para ver sua tabuada: '))
print('-' * 12)
print('{} x {} = {}'.format(nun, 1, nun*1))
print('{} x {} = {}'.format(nun, 2, nun*2))
print('{} x {} = {}'.format(nun, 3, nun*3))
print('{} x {} = {}'.format(nun, 4, nun*4))
print('{} x {} = {}'.format(nun, 5, nun*5))
print('{} x {} = {}'.format(nun, 6, nun*6))
print('{} x {} = {}'.format(nun, 7, nun*7))
print('{} x {} = {}'.format(nun, 8, nun*8))
print('{} x {:2} = {}'.format(nun, 9, nun*9))
print('{} x {:2} = {}'.format(nun, 10, nun*10))
print('-' * 12) | python |
# Copyright (C) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See LICENSE in project root for information.
from mmlspark.vw._VowpalWabbitContextualBandit import _VowpalWabbitContextualBandit, _VowpalWabbitContextualBanditModel
from pyspark.ml.common import inherit_doc
from pyspark import SparkContext, SQLContext
from pyspark.sql import DataFrame
from pyspark.ml.wrapper import JavaWrapper
from pyspark.ml.common import _py2java, _java2py
def to_java_params(sc, model, pyParamMap):
paramMap = JavaWrapper._new_java_obj("org.apache.spark.ml.param.ParamMap")
for param, value in pyParamMap.items():
java_param = model._java_obj.getParam(param.name)
java_value = _py2java(sc, value)
paramMap.put([java_param.w(java_value)])
return paramMap
@inherit_doc
class VowpalWabbitContextualBandit(_VowpalWabbitContextualBandit):
def _create_model(self, java_model):
model = VowpalWabbitContextualBanditModel()
model._java_obj = java_model
model._transfer_params_from_java()
return model
def setInitialModel(self, model):
"""
Initialize the estimator with a previously trained model.
"""
self._java_obj.setInitialModel(model._java_obj.getModel())
def parallelFit(self, dataset, param_maps):
sc = SparkContext._active_spark_context
self._transfer_params_to_java()
javaParamMaps = [to_java_params(sc, self, x) for x in param_maps]
javaModels = self._java_obj.parallelFit(dataset._jdf, javaParamMaps)
return [self._copyValues(self._create_model(x)) for x in javaModels]
@inherit_doc
class VowpalWabbitContextualBanditModel(_VowpalWabbitContextualBanditModel):
def saveNativeModel(self, filename):
"""
Save the native model to a local or WASB remote location.
"""
self._java_obj.saveNativeModel(filename)
def getNativeModel(self):
"""
Get the binary native VW model.
"""
return self._java_obj.getModel()
def getReadableModel(self):
return self._java_obj.getReadableModel()
def getPerformanceStatistics(self):
ctx = SparkContext._active_spark_context
sql_ctx = SQLContext.getOrCreate(ctx)
return DataFrame(self._java_obj.getPerformanceStatistics(), sql_ctx)
| python |
"""
/*
* -----------------------------------------------------------------
* $Revision: 1.3 $
* $Date: 2010/12/01 23:08:49 $
* -----------------------------------------------------------------
* Programmer(s): Allan Taylor, Alan Hindmarsh and
* Radu Serban @ LLNL
* -----------------------------------------------------------------
* Example (serial):
*
* This example solves a nonlinear system that arises from a system
* of partial differential equations. The PDE system is a food web
* population model, with predator-prey interaction and diffusion
* on the unit square in two dimensions. The dependent variable
* vector is the following:
*
* 1 2 ns
* c = (c , c , ..., c ) (denoted by the variable cc)
*
* and the PDE's are as follows:
*
* i i
* 0 = d(i)*(c + c ) + f (x,y,c) (i=1,...,ns)
* xx yy i
*
* where
*
* i ns j
* f (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The number np is both the
* number of prey and predator species. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* b(i) = BB * (1 + alpha * x * y) (i <= np)
* b(i) =-BB * (1 + alpha * x * y) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED ( i > np)
*
* The various scalar parameters are set using define's or in
* routine InitUserData.
*
* The boundary conditions are: normal derivative = 0, and the
* initial guess is constant in x and y, but the final solution
* is not.
*
* The PDEs are discretized by central differencing on an MX by
* MY mesh.
*
* The nonlinear system is solved by KINSOL using the method
* specified in local variable globalstrat.
*
* The preconditioner matrix is a block-diagonal matrix based on
* the partial derivatives of the interaction terms f only.
*
* Constraints are imposed to make all components of the solution
* positive.
* -----------------------------------------------------------------
* References:
*
* 1. Peter N. Brown and Youcef Saad,
* Hybrid Krylov Methods for Nonlinear Systems of Equations
* LLNL report UCRL-97645, November 1987.
*
* 2. Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems,
* Lawrence Livermore National Laboratory Report UCRL-95088,
* Rev. 1, June 1987, and Journal of Applied Mathematics and
* Computation, Vol. 31 (May 1989), pp. 40-91. (Presents a
* description of the time-dependent version of this test
* problem.)
* -----------------------------------------------------------------
*/
"""
import numpy as np
from pySundials.kinsol import Kinsol, denseGETRF, denseGETRS
from pySundials.sundials import NvectorNdarrayFloat64
np.seterr(all='raise')
#/* Problem Constants */
NUM_SPECIES = 6# /* must equal 2*(number of prey or predators)
# number of prey = number of predators */
#define PI RCONST(3.1415926535898) /* pi */
MX =5 #/* MX = number of x mesh points */
MY =5 #/* MY = number of y mesh points */
NSMX =NUM_SPECIES * MX
NEQ =NSMX * MY #/* number of equations in the system */
AA =1.0 #/* value of coefficient AA in above eqns */
EE =10000. #/* value of coefficient EE in above eqns */
GG =0.5e-6 #/* value of coefficient GG in above eqns */
BB =1.0 #/* value of coefficient BB in above eqns */
DPREY =1.0 #/* value of coefficient dprey above */
DPRED =0.5 #/* value of coefficient dpred above */
ALPHA =1.0 #/* value of coefficient alpha above */
AX =1.0 #/* total range of x variable */
AY =1.0 #/* total range of y variable */
FTOL =1.e-7 #/* ftol tolerance */
STOL =1.e-13 #/* stol tolerance */
THOUSAND =1000.0 #/* one thousand */
ZERO =0. #/* 0. */
ONE =1.0 #/* 1. */
TWO =2.0 #/* 2. */
PREYIN =1.0 #/* initial guess for prey concentrations. */
PREDIN =30000.0 #/* initial guess for predator concs. */
CORRECT_VALUES_BL = [1.16428,1.16428,1.16428,34927.48780,34927.48780,34927.48780]
CORRECT_VALUES_TR = [1.25797,1.25797,1.25797,37736.66420,37736.66420,37736.66420]
#/* User-defined vector access macro: IJ_Vptr */
#/* IJ_Vptr is defined in order to translate from the underlying 3D structure
# of the dependent variable vector to the 1D storage scheme for an N-vector.
# IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
# indices is = 0, jx = i, jy = j. */
#define IJ_Vptr(vv,i,j) (&NV_Ith_S(vv, i*NUM_SPECIES + j*NSMX))
#/* Type : UserData
# contains preconditioner blocks, pivot arrays, and problem constants */
class FoodWeb(Kinsol):
def __init__(self, mx, my, num_species, ax, ay, uround ):
self.mx = mx
self.my = my
self.ns = num_species
self.np = num_species/2
self.ax = ax
self.ay = ay
self.dx = ax/(mx-1)
self.dy = ay/(my-1)
self.uround = uround
self.sqruround = np.sqrt(uround)
#/*
# * Allocate memory for data structure of type UserData
# */
self.P = np.empty( (mx,my,num_species,num_species), dtype=np.float64 )
self.pivot = np.zeros( (mx,my,num_species), dtype=np.int32 )
self.acoef = np.empty( (num_species,num_species) )
self.bcoef = np.empty( num_species )
self.cox = np.empty( num_species )
self.coy = np.empty( num_species )
self.rates = NvectorNdarrayFloat64((mx,my,num_species))
# /* Set up the coefficients a and b plus others found in the equations */
for i in range(self.np):
a1 = (i,self.np)
a2 = (i+self.np,0)
for j in range(self.np):
self.acoef[i,self.np+j] = -GG
self.acoef[i+self.np,j] = EE
self.acoef[i,j] = ZERO
self.acoef[i+self.np,self.np+j] = ZERO
self.acoef[i,i] = -AA
self.acoef[i+self.np,i+self.np] = - AA
self.bcoef[i] = BB
self.bcoef[i+self.np] = -BB
self.cox[i] = DPREY/self.dx**2
self.cox[i+self.np] = DPRED/self.dx**2
self.coy[i] = DPREY/self.dy**2
self.coy[i+self.np] = DPRED/self.dy**2
def SetInitialProfiles(self, cc, sc):
ctemp = np.empty(self.ns)
stemp = np.empty(self.ns)
ctemp[:self.np] = PREYIN
stemp[:self.np] = ONE
ctemp[self.np:] = PREDIN
stemp[self.np:] = 0.00001
for i in range(self.ns):
cc.data[:,:,i] = ctemp[i]
sc.data[:,:,i] = stemp[i]
def WebRate(self, xx, yy, jx, jy, cc, rates ):
for i in range(self.ns):
rates[jx,jy,i] = np.dot(cc[jx,jy,:], self.acoef[i,:])
fac = ONE + ALPHA * xx * yy
for i in range(self.ns):
rates[jx,jy,i] = cc[jx,jy,i] * (self.bcoef[i]*fac+rates[jx,jy,i])
def RhsFn(self, cc, fval):
"""
/*
* System function for predator-prey system
*/
"""
delx = self.dx
dely = self.dy
try:
#/* Loop over all mesh points, evaluating rate array at each point*/
for jy in range(self.my):
yy = dely*jy
#/* Set lower/upper index shifts, special at boundaries. */
idyl = 1 if jy != 0 else -1
idyu = 1 if jy != (MY-1) else -1
for jx in range(self.mx):
xx = delx*jx
#/* Set left/right index shifts, special at boundaries. */
idxl = 1 if jx != 0 else -1
idxr = 1 if jx != (MX-1) else -1
#/* Get species interaction rate array at (xx,yy) */
self.WebRate(xx, yy, jx, jy, cc.data, self.rates.data)
dcyli = cc.data[jx,jy,:] - cc.data[jx,jy-idyl,:]
dcyui = cc.data[jx,jy+idyu,:] - cc.data[jx,jy,:]
dcxli = cc.data[jx,jy,:] - cc.data[jx-idxl,jy,:]
dcxri = cc.data[jx+idxr,jy,:] - cc.data[jx,jy,:]
#/* Compute the total rate value at (xx,yy) */
fval.data[jx,jy,:] = self.coy*(dcyui-dcyli) + self.cox*(dcxri-dcxli) + self.rates.data[jx,jy,:]
except Exception as e:
print e, e.message
return -1
#print fval.data.min(), fval.data.max()
return 0
def SpilsPrecSetup(self, cc, cscale, fval, fscale,
vtemp1, vtemp2):
"""
/*
* Preconditioner setup routine. Generate and preprocess P.
*/
"""
#realtype r, r0, uround, sqruround, xx, yy, delx, dely, csave, fac;
#realtype *cxy, *scxy, **Pxy, *ratesxy, *Pxycol, perturb_rates[NUM_SPECIES];
#long int i, j, jx, jy, ret;
#UserData data;
delx = self.dx
dely = self.dy
uround = self.uround
sqruround = self.sqruround
fac = fval.WL2Norm( fscale )
perturb_rates = np.empty_like( self.rates.data )
r0 = THOUSAND * uround * fac * NEQ
if r0 == ZERO: r0 = ONE
#/* Loop over spatial points; get size NUM_SPECIES Jacobian block at each */
for jy in range(self.my):
yy = jy*dely
for jx in range(self.mx):
xx = jx*delx
# Pxy = self.P[ix,iy]
#Pxy = (data->P)[jx][jy];
#cxy = IJ_Vptr(cc,jx,jy);
#scxy= IJ_Vptr(cscale,jx,jy);
#ratesxy = IJ_Vptr((data->rates),jx,jy);
#/* Compute difference quotients of interaction rate fn. */
for j in range(self.ns):
csave = cc.data[jx,jy,j] # /* Save the j,jx,jy element of cc */
r = max(sqruround*abs(csave), r0/cscale.data[jx,jy,j])
cc.data[jx,jy,j] += r # /* Perturb the j,jx,jy element of cc */
fac = ONE/r
self.WebRate(xx, yy, jx, jy, cc.data, perturb_rates )
#/* Restore j,jx,jy element of cc */
cc.data[jx,jy,j] = csave
#/* Load the j-th column of difference quotients */
for i in range(self.ns):
self.P[jx,jy,j,i] = (perturb_rates[jx,jy,i] - self.rates.data[jx,jy,i])*fac
#/* Do LU decomposition of size NUM_SPECIES preconditioner block */
# P shuold be order='F' from the outset, alternatively use
# scipy for this calculation.
P = np.asfortranarray(self.P[jx,jy,:,:].T)
ret = denseGETRF(P, self.pivot[jx,jy,:])
#if ret != 0: return 1
return 0
def SpilsPrecSolve(self, cc, cscale, fval, fscale, vv, ftem):
"""
/*
* Preconditioner solve routine
*/
"""
#print 'SpilsPrecSolve'
for jx in range(self.mx):
for jy in range(self.my):
#For each (jx,jy), solve a linear system of size NUM_SPECIES.
#vxy is the address of the corresponding portion of the vector vv;
#Pxy is the address of the corresponding block of the matrix P;
#piv is the address of the corresponding block of the array pivot.
piv = self.pivot[jx,jy,:]
Pxy = self.P[jx,jy,:,:]
#print Pxy, piv
P = np.asfortranarray(self.P[jx,jy,:,:].T)
denseGETRS(P, self.pivot[jx,jy,:], vv.data[jx,jy,:])
return 0
def PrintFinalStats(self, ):
"""
Print final statistics contained in iopt
"""
nni = self.GetNumNonlinSolvIters()
nfe = self.GetNumFuncEvals()
nli = self.SpilsGetNumLinIters()
npe = self.SpilsGetNumPrecEvals()
nps = self.SpilsGetNumPrecSolves()
ncfl = self.SpilsGetNumConvFails()
nfeSG = self.SpilsGetNumFuncEvals()
print "Final Statistics.. "
print "nni = %5ld nli = %5ld"%(nni, nli)
print "nfe = %5ld nfeSG = %5ld"%(nfe, nfeSG)
print "nps = %5ld npe = %5ld ncfl = %5ld"%(nps, npe, ncfl)
#/* Functions Called by the KINSOL Solver */
#
#static int func(N_Vector cc, N_Vector fval, void *user_data);
#
#static int PrecSetupBD(N_Vector cc, N_Vector cscale,
# N_Vector fval, N_Vector fscale,
# void *user_data,
# N_Vector vtemp1, N_Vector vtemp2);
#
#static int PrecSolveBD(N_Vector cc, N_Vector cscale,
# N_Vector fval, N_Vector fscale,
# N_Vector vv, void *user_data,
# N_Vector ftem);
#
#/* Private Helper Functions */
#
#static UserData AllocUserData(void);
#static void InitUserData(UserData data);
#static void FreeUserData(UserData data);
#static void SetInitialProfiles(N_Vector cc, N_Vector sc);
#static void PrintHeader(int globalstrategy, int maxl, int maxlrst,
# realtype fnormtol, realtype scsteptol);
#static void PrintOutput(N_Vector cc);
#static void PrintFinalStats(void *kmem);
#static void WebRate(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
# void *user_data);
#static realtype DotProd(long int size, realtype *x1, realtype *x2);
#static int check_flag(void *flagvalue, char *funcname, int opt);
#/*
# *--------------------------------------------------------------------
# * MAIN PROGRAM
# *--------------------------------------------------------------------
# */
def testKinFoodWeb():
#int globalstrategy;
#realtype fnormtol, scsteptol;
#N_Vector cc, sc, constraints;
#UserData data;
#int flag, maxl, maxlrst;
#void *kmem;
data = FoodWeb(MX,MY, NUM_SPECIES, AX,AY, 1E-10)
#/* Create serial vectors of length NEQ */
cc = NvectorNdarrayFloat64((MX,MY, NUM_SPECIES))
sc = NvectorNdarrayFloat64((MX,MY, NUM_SPECIES))
constraints = NvectorNdarrayFloat64((MX,MY, NUM_SPECIES))
constraints.Constant(TWO)
data.SetInitialProfiles(cc, sc);
fnormtol=FTOL
scsteptol=STOL
#/* Call KINCreate/KINInit to initialize KINSOL.
#A pointer to KINSOL problem memory is returned and stored in kmem. */
#/* Vector cc passed as template vector. */
data.SetConstraints(constraints)
#/* Call KINSpgmr to specify the linear solver KINSPGMR with preconditioner
#routines PrecSetupBD and PrecSolveBD. */
maxl = 15
maxlrst = 2
strategy = 'none'
data.initSolver(cc)
data.setupIndirectLinearSolver(solver='spgmr', maxl=maxl, user_pre=True)
data.funcNormTol=FTOL
data.scaledStepTol=STOL
data.spilsMaxRestarts=maxlrst
#/* Print out the problem size, solution parameters, initial guess. */
PrintHeader(strategy, maxl, maxlrst, fnormtol, scsteptol);
#/* Call KINSol and print output concentration profile */
print 'Solving...'
flag = data.Solve(cc,sc,sc, strategy=strategy)
# flag = KINSol(kmem, /* KINSol memory block */
# cc, /* initial guess on input; solution vector */
# globalstrategy, /* global stragegy choice */
# sc, /* scaling vector, for the variable cc */
# sc); /* scaling vector for function values fval */
print "\n\nComputed equilibrium species concentrations:\n"
PrintAndAssertOutput(cc)
#/* Print final statistics and free memory */
data.PrintFinalStats()
return 0
def PrintHeader(globalstrategy, maxl, maxlrst, fnormtol, scsteptol):
"""
/*
* Print first lines of output (problem description)
*/
"""
print "\nPredator-prey test problem -- KINSol (serial version)\n"
print "Mesh dimensions = %d X %d"%(MX, MY)
print "Number of species = %d"% NUM_SPECIES
print "Total system size = %d\n"% NEQ
print "Flag globalstrategy = %s "%globalstrategy
print "Linear solver is SPGMR with maxl = %d, maxlrst = %d"%(maxl, maxlrst)
print "Preconditioning uses interaction-only block-diagonal matrix"
print "Positivity constraints imposed on all components"
print "Tolerance parameters: fnormtol = %g scsteptol = %g"%(fnormtol, scsteptol)
print "\nInitial profile of concentration"
print "At all mesh points: %g %g %g %g %g %g"%(PREYIN, PREYIN, PREYIN,
PREDIN, PREDIN, PREDIN)
def PrintAndAssertOutput(cc):
"""
/*
* Print sampled values of current cc
*/
"""
from nose.tools import assert_almost_equal
jy = 0
jx = 0
print "\nAt bottom left:"
#/* Print out lines with up to 6 values per line */
for _is in range(NUM_SPECIES):
print " %g"% cc.data[jx,jy,_is],
assert_almost_equal(CORRECT_VALUES_BL[_is], cc.data[jx,jy,_is], places=4)
jy = MY-1
jx = MX-1
print "\n\nAt top right:"
#/* Print out lines with up to 6 values per line */
for _is in range(NUM_SPECIES):
print " %g"% cc.data[jx,jy,_is],
assert_almost_equal(CORRECT_VALUES_TR[_is], cc.data[jx,jy,_is], places=4)
print "\n"
if __name__ == '__main__':
testKinFoodWeb()
| python |
class NetworkNode:
nodeClass = 'IoT'
def __init__(self, serialNumber, os, ip, location='5th Floor Storage Room'):
self.serialNumber = serialNumber
self.os = os
self.ip = ip
def nodeInfo(self):
print('-' * 79)
return '{} {} {} {}'.format(self.serialNumber, self.os, self.ip, self.location)
def nodeType(self):
return '{}'.format(self.nodeClass)
node01 = NetworkNode('FTX8675309', 'IOS-XE', '10.0.0.11')
print(node01.nodeInfo())
print(node01.nodeType()) | python |
import json
import os
from collections import OrderedDict
def load_characters():
fname = os.path.join(os.path.dirname(__file__), 'charactersdata.json')
with open(fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def load_locations():
fname = os.path.join(os.path.dirname(__file__), 'locationsdata.json')
with open (fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def load_bending():
fname = os.path.join(os.path.dirname(__file__), 'bendingdata.json')
with open (fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict) | python |
from behave import use_step_matcher, step, when, then
use_step_matcher("re")
@when('I want to calculate (?P<number_a>\d) and (?P<number_b>\d)')
def step_impl(context, number_a, number_b):
context.number_a = int(number_a)
context.number_b = int(number_b)
@step('use addition method')
def use_addition_method(context):
context.result = context.number_a + context.number_b
@then('result is (?P<expected_result>\d)')
def result_assertion(context, expected_result):
assert context.result == int(expected_result), 'Result is incorrect\nExpected: {}\nGot: {}'.format(expected_result,
context.result)
@step('use multiplication method')
def use_multiplication_method(context):
if 'input_data' in context:
for i in context.input_data:
i['actual'] = i['a'] * i['b']
else:
context.result = context.number_a * context.number_b
@step('this step is for incorrect expected result')
def step_impl(context):
assert context.result == 7, 'Result is incorrect\nExpected: {}\nGot: {}'.format(7, context.result)
@then("all table data calculated correctly")
def step_impl(context):
for i in context.input_data:
assert i['expected'] == i['actual'], 'Result is incorrect\nExpected: {}\nGot: {}'.format(i['expected'],
i['actual'])
@when("I want to calculate numbers from table")
def step_impl(context):
step_table = context.table
context.input_data = []
for row in step_table:
data = {
'a': int(row['number_a']),
'b': int(row['number_b']),
'expected': int(row['expected'])
}
context.input_data.append(dict(data))
| python |
from django.apps import AppConfig
class DeliverymanagementConfig(AppConfig):
name = 'DeliveryManagement'
| python |
import groups
import config
import discord
import log.logging
from discord import Option
from discord.ext import commands
from utility import EncodeDrawCode
class Draw(commands.Cog):
def __init__(self, bot):
self.bot = bot
@groups.fun.command()
async def draw(
ctx: commands.Context,
code: Option(
str,
description="Width x Hight : 1 = Blue | 0 = Black", # noqa: F722
required=True
)
):
"""Draws the spesified code to the screen using emotes"""
await log.logging.Info(
f"{ctx.author.name}#{ctx.author.discriminator} ({ctx.author.id})"
" executed Draw in Fun"
)
if "x" not in code.lower() or ":" not in code:
await ctx.respond(
":anger: Grrrr, invalid draw code format",
ephemeral=True
)
return
message = await EncodeDrawCode(code)
if not message:
await ctx.respond(
":anger: Grrrr, Failed to encode message, it may be out of "
"bounds of your dimensions or has invalid characters",
ephemeral=True
)
return
emb = discord.Embed(
title="Drawing",
description=message,
color=config.embed_color
)
emb.add_field(name="Code", value=code)
await ctx.respond(embed=emb)
def setup(bot):
bot.add_cog(Draw(bot))
| python |
from math import ceil
def find_median(arr):
n = len(arr)
median = find_median_util(arr, n/2+1, 0, n-1)
print(median)
def find_median_util(arr, k, low, high):
m = partition(arr, low, high)
length = m - low + 1
if length == k:
return arr[m]
if length > k:
return find_median_util(arr, k, low, m-1)
else:
return find_median_util(arr, k-length, m+1, high)
def partition(arr, low, high):
pivot = get_pivot_val(arr, low, high)
while low < high:
while arr[low] < pivot:
low += 1
while arr[high] > pivot:
high -= 1
if arr[low] == arr[high]:
low += 1
# swap
elif low < high:
temp = arr[low]
arr[low] = arr[high]
arr[high] = temp
return high
def get_pivot_val(arr, low, high):
if high - low + 1 <= 9:
sorted(arr)
return arr[int(len(arr)/2)]
medians = [0] * int(ceil(( high - low +1 )/5))
median_index = 0
while high >= low:
temp = [0]* min(5, (high - low + 1))
for i in range(0, len(temp)):
if low <= high :
temp[i] = arr[low]
low += 1
sorted(temp)
medians[median_index] = temp[int(len(temp)/2)]
median_index += 1
return get_pivot_val(medians, 0, len(medians) - 1)
arr = [25, 24, 33, 39, 3, 18, 19, 31, 23, 49, 45, 16, 1, 29, 40, 22, 15, 20, 24, 4, 13, 34]
find_median(arr) | python |
# ------------------------------------------------------------------------------
# CodeHawk Binary Analyzer
# Author: Henny Sipma
# ------------------------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2021 Aarno Labs LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ------------------------------------------------------------------------------
"""Basis for dictionary records in the MIPS dictionary."""
from typing import Callable, cast, Dict, List, Tuple, Type, TypeVar, TYPE_CHECKING
from chb.app.BDictionary import BDictionary
import chb.util.fileutil as UF
from chb.util.IndexedTable import IndexedTableValue
if TYPE_CHECKING:
from chb.api.InterfaceDictionary import InterfaceDictionary
from chb.mips.MIPSDictionary import MIPSDictionary
class MIPSDictionaryRecord(IndexedTableValue):
def __init__(
self,
mipsd: "MIPSDictionary",
ixval: IndexedTableValue) -> None:
IndexedTableValue.__init__(self, ixval.index, ixval.tags, ixval.args)
self._mipsd = mipsd
@property
def mipsd(self) -> "MIPSDictionary":
return self._mipsd
@property
def bd(self) -> BDictionary:
return self.mipsd.bd
@property
def ixd(self) -> "InterfaceDictionary":
return self.mipsd.ixd
MdR = TypeVar("MdR", bound=MIPSDictionaryRecord, covariant=True)
class MIPSDictionaryRegistry:
def __init__(self) -> None:
self.register: Dict[Tuple[type, str], Type[MIPSDictionaryRecord]] = {}
def register_tag(
self,
tag: str,
anchor: type) -> Callable[[type], type]:
def handler(t: type) -> type:
self.register[(anchor, tag)] = t
return t
return handler
def mk_instance(
self,
md: "MIPSDictionary",
ixval: IndexedTableValue,
anchor: Type[MdR]) -> MdR:
tag = ixval.tags[0]
if (anchor, tag) not in self.register:
raise UF.CHBError("Unknown mipsdictionary type: " + tag)
instance = self.register[(anchor, tag)](md, ixval)
return cast(MdR, instance)
mipsregistry: MIPSDictionaryRegistry = MIPSDictionaryRegistry()
| python |
"""init
Revision ID: 51b36c819d5f
Revises:
Create Date: 2019-12-07 22:55:45.980654
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '51b36c819d5f'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('prize',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('logo', sa.String(length=128), nullable=True),
sa.Column('description', sa.String(length=128), nullable=False),
sa.Column('score', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_prize'))
)
op.create_table('station',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=128), nullable=False),
sa.Column('score', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id', name=op.f('pk_station'))
)
op.create_index(op.f('ix_station_name'), 'station', ['name'], unique=False)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=128), nullable=False),
sa.Column('password', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id', name=op.f('pk_user'))
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=False)
op.create_table('token',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('station_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('value', sa.String(length=128), nullable=True),
sa.ForeignKeyConstraint(['station_id'], ['station.id'], name=op.f('fk_token_station_id_station')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_token'))
)
op.create_table('usedtoken',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('user_id', sa.Integer(), nullable=False),
sa.Column('token_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=False),
sa.Column('score', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['token_id'], ['token.id'], name=op.f('fk_usedtoken_token_id_token')),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], name=op.f('fk_usedtoken_user_id_user')),
sa.PrimaryKeyConstraint('id', name=op.f('pk_usedtoken'))
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('usedtoken')
op.drop_table('token')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_index(op.f('ix_station_name'), table_name='station')
op.drop_table('station')
op.drop_table('prize')
# ### end Alembic commands ###
| python |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import ew
from ew import jinja2_ew
from allura.lib import validators as v
from forgetracker import model
class BinForm(ew.SimpleForm):
template = 'jinja:forgetracker:templates/tracker_widgets/bin_form.html'
defaults = dict(
ew.SimpleForm.defaults,
submit_text="Save Bin")
class hidden_fields(ew.NameList):
_id = jinja2_ew.HiddenField(
validator=v.Ming(model.Bin), if_missing=None)
class fields(ew.NameList):
summary = jinja2_ew.TextField(
label='Bin Name',
validator=v.UnicodeString(not_empty=True))
terms = jinja2_ew.TextField(
label='Search Terms',
validator=v.UnicodeString(not_empty=True))
| python |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn as sk
import copy
import warnings
import pickle
import joblib
warnings.filterwarnings("ignore")
import pandas as pd
import numpy as np
import math
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.utils import shuffle
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score,make_scorer
from numpy.random import seed
seed(1)
df = pd.read_csv('covid-dataset.csv')
df = df.astype({"label":'string'})
X=df.drop("label",axis=1).values[0:399]
X=X.astype(int)
y=df["label"].values[0:399]
print(X.shape)
print(X,y)
from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
y = enc.fit_transform(y)
from imblearn.over_sampling import SMOTE
smt = SMOTE(k_neighbors = 2)
X_train_res, y_train_res = smt.fit_resample(X, y)
X, y = shuffle(X_train_res, y_train_res)
for i in [10]:
originalclass = []
predictedclass = []
def classification_report_with_accuracy_score(y_true, y_pred):
originalclass.extend(y_true)
predictedclass.extend(y_pred)
return accuracy_score(y_true, y_pred)
model = KNeighborsClassifier()
n_neighbors = range(1, 21, 2)
weights = ['uniform', 'distance']
metric = ['euclidean', 'manhattan', 'minkowski']
grid = dict(n_neighbors=n_neighbors,weights=weights,metric=metric)
cv = StratifiedKFold(n_splits=i, shuffle=True, random_state=1)
grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
grid_search.fit(X,y)
print(grid_search.best_params_)
nested_score = cross_val_score(grid_search, X, y, cv=cv, scoring=make_scorer(classification_report_with_accuracy_score))
print(classification_report(originalclass, predictedclass))
print(np.mean(nested_score))
joblib.dump(grid_search,open('model.pkl','wb'))
| python |
from constants import CURRENCY_SYMBOLS_MAP
class CurrencySymbols(object):
# Checks if the input currency name exists in the map
# If it exists, return the symbbol
@staticmethod
def get_symbol(currency):
if not currency:
return None
else:
return CURRENCY_SYMBOLS_MAP.get(currency)
| python |
class WorkflowException(Exception):
pass
class UnsupportedRequirement(WorkflowException):
pass
| python |
# BSD 3-Clause License
#
# Copyright (c) 2017 xxxx
# All rights reserved.
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ============================================================================
import os
import argparse
import random
import shutil
from shutil import copyfile
from misc import printProgressBar
def rm_mkdir(dir_path):
if os.path.exists(dir_path):
shutil.rmtree(dir_path)
print('Remove path - %s'%dir_path)
os.makedirs(dir_path)
print('Create path - %s'%dir_path)
def main(config):
rm_mkdir(config.train_path)
rm_mkdir(config.train_GT_path)
rm_mkdir(config.valid_path)
rm_mkdir(config.valid_GT_path)
rm_mkdir(config.test_path)
rm_mkdir(config.test_GT_path)
filenames = os.listdir(config.origin_data_path)
data_list = []
GT_list = []
for filename in filenames:
ext = os.path.splitext(filename)[-1]
if ext =='.jpg':
filename = filename.split('_')[-1][:-len('.jpg')]
data_list.append('ISIC_'+filename+'.jpg')
GT_list.append('ISIC_'+filename+'_segmentation.png')
num_total = len(data_list)
num_train = int((config.train_ratio/(config.train_ratio+config.valid_ratio+config.test_ratio))*num_total)
num_valid = int((config.valid_ratio/(config.train_ratio+config.valid_ratio+config.test_ratio))*num_total)
num_test = num_total - num_train - num_valid
print('\nNum of train set : ',num_train)
print('\nNum of valid set : ',num_valid)
print('\nNum of test set : ',num_test)
Arange = list(range(num_total))
random.shuffle(Arange)
for i in range(num_train):
idx = Arange.pop()
src = os.path.join(config.origin_data_path, data_list[idx])
dst = os.path.join(config.train_path,data_list[idx])
copyfile(src, dst)
src = os.path.join(config.origin_GT_path, GT_list[idx])
dst = os.path.join(config.train_GT_path, GT_list[idx])
copyfile(src, dst)
printProgressBar(i + 1, num_train, prefix = 'Producing train set:', suffix = 'Complete', length = 50)
for i in range(num_valid):
idx = Arange.pop()
src = os.path.join(config.origin_data_path, data_list[idx])
dst = os.path.join(config.valid_path,data_list[idx])
copyfile(src, dst)
src = os.path.join(config.origin_GT_path, GT_list[idx])
dst = os.path.join(config.valid_GT_path, GT_list[idx])
copyfile(src, dst)
printProgressBar(i + 1, num_valid, prefix = 'Producing valid set:', suffix = 'Complete', length = 50)
for i in range(num_test):
idx = Arange.pop()
src = os.path.join(config.origin_data_path, data_list[idx])
dst = os.path.join(config.test_path,data_list[idx])
copyfile(src, dst)
src = os.path.join(config.origin_GT_path, GT_list[idx])
dst = os.path.join(config.test_GT_path, GT_list[idx])
copyfile(src, dst)
printProgressBar(i + 1, num_test, prefix = 'Producing test set:', suffix = 'Complete', length = 50)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# model hyper-parameters
parser.add_argument('--train_ratio', type=float, default=0.7)
parser.add_argument('--valid_ratio', type=float, default=0.1)
parser.add_argument('--test_ratio', type=float, default=0.2)
# data path
parser.add_argument('--origin_data_path', type=str, default='../ISIC/dataset/ISIC2018_Task1-2_Training_Input')
parser.add_argument('--origin_GT_path', type=str, default='../ISIC/dataset/ISIC2018_Task1_Training_GroundTruth')
parser.add_argument('--train_path', type=str, default='./dataset/train/')
parser.add_argument('--train_GT_path', type=str, default='./dataset/train_GT/')
parser.add_argument('--valid_path', type=str, default='./dataset/valid/')
parser.add_argument('--valid_GT_path', type=str, default='./dataset/valid_GT/')
parser.add_argument('--test_path', type=str, default='./dataset/test/')
parser.add_argument('--test_GT_path', type=str, default='./dataset/test_GT/')
config = parser.parse_args()
print(config)
main(config) | python |
from .model import TapisModel
from .searchable import SearchableCommand
| python |
from cfnlint.core.BaseYamlLinter import BaseYamlLinter
class Linter(BaseYamlLinter):
def lint(self):
resources = self.file_as_yaml['Resources']
lintingrules = self.lintingrules['ResourceTypes']
for resource_name, resource in resources.items():
# Skip custom resources, cannot be linted
if resource['Type'].startswith("Custom::"):
continue
# Check if the resource is supported
if resource['Type'] not in lintingrules:
self.errors.append(
"Unsupported resource: %s (%s)" % (resource_name, resource['Type'])
)
continue
# Check if there are rules specified for the resource
if not lintingrules[resource['Type']]:
continue
if not 'Properties' in resource:
continue
# Check mandatory properties
resource_properties = resource['Properties']
rule_properties = lintingrules[resource['Type']]['Properties']
# If there are no required resources, skip
if not rule_properties:
continue
# Validate all properties in the rules
for rule_property, rules in rule_properties.items():
if rules['Required']:
# Check if the property is specified
if rule_property not in resource_properties:
self.errors.append(
'Mandatory property "{}" not specified for {} ({}). See: {}'
.format(rule_property, resource_name, resource['Type'], rules['Documentation'])
)
continue
if rule_property in resource_properties:
property_value = resource_properties[rule_property]
# Check if it has is a valid value
if rules:
if 'AllowedValues' in rules:
if rules['AllowedValues']:
if property_value not in rules['AllowedValues']:
self.errors.append(
'Invalid property value "{}" specified for {}.{} See: {}'
.format(property_value, resource_name, rule_property, rules['Documentation'])
)
continue
| python |
from rest_framework import serializers
from app_idea_feed.serializers import IdeaFeedItemSerializer
from user_profiles_api.serializers import UserProfileSerializer
from app_user_actions.serializers import UserLikedIdeasSerializer, \
IdeaLikedByUsersSerializer, IdeaCommentByUsersSerializer
from app_user_actions.models import UserLikedIdeas, UserFavouriteIdeas
from app_idea_feed.models import IdeaFeedItem
class ViewLikedIdeasSerializer(serializers.ModelSerializer):
"""Serializes user liked ideas"""
user_profile = UserProfileSerializer()
idea_feed = IdeaFeedItemSerializer()
class Meta:
model = UserLikedIdeas
fields = ('id', 'user_profile', 'idea_feed')
extra_kwargs = {'user_profile': {'read_only': True}}
class ViewFavouriteIdeasSerializer(serializers.ModelSerializer):
"""Serializes user favourite ideas"""
user_profile = UserProfileSerializer()
idea_feed = IdeaFeedItemSerializer()
class Meta:
model = UserFavouriteIdeas
fields = ('id', 'user_profile', 'idea_feed')
extra_kwargs = {'user_profile': {'read_only': True}}
class ViewIdeasFeed(serializers.ModelSerializer):
"""Serializes ideas feed for view"""
user_profile = UserProfileSerializer()
class Meta:
model = IdeaFeedItem
fields = ('id', 'user_profile', 'name', 'description', 'theme', 'department',
'rag_status', 'tags', 'stage', 'contributor', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}}
class ViewIdeasCompleteFeed(serializers.ModelSerializer):
"""Serializes complete ideas feed for view with likes and comments"""
user_profile = UserProfileSerializer()
liked_idea = IdeaLikedByUsersSerializer(many=True)
comments_idea = IdeaCommentByUsersSerializer(many=True)
class Meta:
model = IdeaFeedItem
fields = ('id', 'user_profile', 'liked_idea', 'comments_idea', 'name', 'description',
'theme', 'department', 'rag_status', 'tags', 'stage', 'contributor', 'created_on')
extra_kwargs = {'user_profile': {'read_only': True}} | python |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from .models import Customer, CreditCard, Transaction, Product, ScheduledPayment
class CustomerAdmin(admin.ModelAdmin):
"""CustomerAdmin"""
list_display = ('owner_id', 'name', 'mobile', 'email', 'company')
class ProductAdmin(admin.ModelAdmin):
"""ServiceAdmin"""
list_display = ('title', 'description', 'currency', 'price')
class TransactionAdmin(admin.ModelAdmin):
list_display = ('owner_id', 'customer', 'transaction_id', 'card', 'product', 'status', 'result_code', 'result_description', 'currency', 'price')
search_fields = ('transaction_id','registration_id',)
# list_filter = ('status', 'result_code', 'is_initial',)
# inlines = [ScheduledPaymentInline]
class CreditCardAdmin(admin.ModelAdmin):
list_display = ('owner_id', 'bin', 'cardholder_name', 'expiry_month', 'expiry_year', 'last_four_digits')
class ScheduledPaymentAdmin(admin.ModelAdmin):
list_display = ('customer', 'card', 'product', 'amount', 'status', 'scheduled_date')
admin.site.register(Customer, CustomerAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(CreditCard, CreditCardAdmin)
admin.site.register(Transaction, TransactionAdmin)
admin.site.register(ScheduledPayment, ScheduledPaymentAdmin)
| python |
from dunderfile import __FILE__, __LINE__, __func__, helper
assert __FILE__() == __file__
assert helper.__FILE__ == __FILE__()
assert helper.__LINE__ == __LINE__()
# Because we're at top-level in the module.
assert __func__() == '<module>'
def my_function():
assert __func__() == 'my_function'
assert __func__() == helper.__func__
my_function()
def nested_functions():
def inner_function():
assert __func__() == "inner_function"
assert __func__(depth=1) == "nested_functions"
nested_functions()
| python |
import requests
from datetime import datetime
def str_parse_time(string):
"""Parses given string into time"""
r = requests.get("https://dateparser.piyush.codes/fromstr", params={"message": string})
data = r.json()
return data["message"]
def format_time(time):
"""Formats the time"""
format = time.strftime("%a, %b %d, %Y %X")
days = round((datetime.utcnow() - time).total_seconds() / 86400)
format += f"\n*{days} {'days' if days != 1 else 'day'} ago*"
return format | python |
import django.forms
from .models import City
class CityForm(django.forms.ModelForm):
class Meta:
model = City
fields = ['name']
widgets = {'name': django.forms.TextInput(attrs={'class': 'input', 'placeholder': 'City Name'})}
| python |
from nose.tools import *
from pysb.core import Model, SelfExporter
import pickle
def with_model(func):
"""Decorate a test to set up and tear down a Model."""
def inner(*args, **kwargs):
model = Model(func.__name__, _export=False)
# manually set up SelfExporter, targeting func's globals
selfexporter_state = SelfExporter.do_export
SelfExporter.do_export = True
SelfExporter.default_model = model
SelfExporter.target_module = func.__module__
SelfExporter.target_globals = func.__globals__
SelfExporter.target_globals['model'] = model
try:
# call the actual test function
func(*args, **kwargs)
finally:
# clean up the globals
SelfExporter.cleanup()
SelfExporter.do_export = selfexporter_state
return make_decorator(func)(inner)
def serialize_component_list(model, filename):
"""Serialize (pickle) the components of the given model to a file. This can
later be used to compare the state of the model against a previously
validated state using :py:func:`check_model_against_component_list`.
"""
f = open(filename, 'w')
pickle.dump(list(model.all_components().values()), f)
f.close()
def check_model_against_component_list(model, component_list):
"""Check the components of the given model against the provided list
of components, asserting that they are equal. Useful for testing a
model against a previously validated (and serialized) state.
Currently checks equality by performing a string comparison of the
repr() of each component, however, this may be revised to use alternative
measures of equality in the future.
To serialize the list of components to create a record of a
validated state, see :py:func:`serialize_component_list`.
"""
assert len(model.all_components()) == len(component_list), \
"Model %s does not have the same " \
"number of components as the previously validated version. " \
"The validated model has %d components, current model has " \
"%d components." % \
(model.name, len(model.all_components()), len(component_list))
model_components = list(model.all_components().values())
for i, comp in enumerate(component_list):
model_comp_str = repr(model_components[i])
comp_str = repr(comp)
assert comp_str == model_comp_str, \
"Model %s does not match reference version: " \
"Mismatch at component %d: %s in the reference model not " \
"equal to %s in the current model." \
% (model.name, i, comp_str, model_comp_str)
assert True
| python |
# Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import re
from SpiffWorkflow.Operators import *
from SpiffWorkflow.Task import Task
from SpiffWorkflow.Exception import WorkflowException
from TaskSpec import TaskSpec
class MultiChoice(TaskSpec):
"""
This class represents an if condition where multiple conditions may match
at the same time, creating multiple outgoing branches.
This task has one or more inputs, and one or more incoming branches.
This task has one or more outputs.
"""
def __init__(self, parent, name, **kwargs):
"""
Constructor.
parent -- a reference to the parent (TaskSpec)
name -- a name for the pattern (string)
"""
TaskSpec.__init__(self, parent, name, **kwargs)
self.cond_taskspecs = []
self.choice = None
def connect(self, taskspec):
"""
Convenience wrapper around connect_if() where condition is set to None.
"""
return self.connect_if(None, taskspec)
def connect_if(self, condition, taskspec):
"""
Connects a taskspec that is executed if the condition DOES match.
condition -- a condition (Condition)
taskspec -- the conditional taskspec
"""
assert taskspec is not None
self.outputs.append(taskspec)
self.cond_taskspecs.append((condition, taskspec))
taskspec._connect_notify(self)
def test(self):
"""
Checks whether all required attributes are set. Throws an exception
if an error was detected.
"""
TaskSpec.test(self)
if len(self.cond_taskspecs) < 1:
raise WorkflowException(self, 'At least one output required.')
for condition, task in self.cond_taskspecs:
if task is None:
raise WorkflowException(self, 'Condition with no task.')
if condition is None:
continue
if condition is None:
raise WorkflowException(self, 'Condition is None.')
def _on_trigger(self, my_task, choice):
"""
Lets a caller narrow down the choice by using a Choose trigger.
"""
self.choice = choice
def _predict_hook(self, my_task):
my_task._update_children(self.outputs, Task.MAYBE)
def _on_complete_hook(self, my_task):
"""
Runs the task. Should not be called directly.
Returns True if completed, False otherwise.
"""
# Find all matching conditions.
outputs = []
for condition, output in self.cond_taskspecs:
if condition is not None and not condition._matches(my_task):
continue
if self.choice is not None and output.name not in self.choice:
continue
outputs.append(output)
my_task._update_children(outputs)
return True
| python |
ascii_snek = """\
--..,_ _,.--.
`'.'. .'`__ o `;__.
'.'. .'.'` '---'` `
'.`'--....--'`.'
`'--....--'`
"""
def main():
print(ascii_snek)
if __name__ == '__main__':
main() | python |
if __name__ == '__main__':
from pino.ino import Arduino, Comport, PinMode, PinState
from pino.config import Config
# from pino.ui.clap import PinoCli
from time import sleep
# com = Comport().set_baudrate(115200) \
# .set_port("/dev/ttyACM0") \
# .set_inofile("$HOME/Experimental/pino/example/proto.ino") \
# .deploy() \
# .connect(1.15)
# loop = 10
# interval = 0.5
config = Config("./example/config.yml")
# config = PinoCli().get_config()
com = Comport() \
.apply_settings(config.get_comport()) \
.deploy() \
.connect()
arduino = Arduino(com)
arduino.set_pinmode(13, PinMode.OUTPUT)
variables = config.get_experimental()
loop = variables.get("loop", 10)
interval = variables.get("interval", 0.5)
for _ in range(loop):
arduino.digital_write(13, PinState.HIGH)
sleep(interval)
arduino.digital_write(13, PinState.LOW)
sleep(interval)
| python |
""""""
from sys import stdout
from typing import List, Dict, Optional, Union, Tuple, Any, Iterable
import logging
import json
from itertools import product
import pandas as pd
from pandas import DataFrame
from shimoku_api_python.exceptions import ApiClientError
from .data_managing_api import DataValidation
from .explorer_api import (
BusinessExplorerApi, CreateExplorerAPI, CascadeExplorerAPI,
MultiCreateApi, ReportExplorerApi, DeleteExplorerApi, UniverseExplorerApi
)
from .data_managing_api import DataManagingApi
from .app_type_metadata_api import AppTypeMetadataApi
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logging.basicConfig(
stream=stdout,
datefmt='%Y-%m-%d %H:%M',
format='%(asctime)s | %(levelname)s | %(message)s'
)
class PlotAux:
_get_business = BusinessExplorerApi.get_business
_get_business_apps = BusinessExplorerApi.get_business_apps
get_business_apps = BusinessExplorerApi.get_business_apps
get_universe_businesses = UniverseExplorerApi.get_universe_businesses
get_report = ReportExplorerApi.get_report
_get_report_with_data = ReportExplorerApi._get_report_with_data
_update_report = ReportExplorerApi.update_report
update_report = ReportExplorerApi.update_report
get_report_data = ReportExplorerApi.get_report_data
_find_app_by_name_filter = CascadeExplorerAPI.find_app_by_name_filter
_find_app_type_by_name_filter = (
CascadeExplorerAPI.find_app_type_by_name_filter
)
# TODO this shit has to be fixed
get_universe_app_types = CascadeExplorerAPI.get_universe_app_types
_get_universe_app_types = CascadeExplorerAPI.get_universe_app_types
_get_app_reports = CascadeExplorerAPI.get_app_reports
_get_app_by_type = CascadeExplorerAPI.get_app_by_type
_get_app_by_name = CascadeExplorerAPI.get_app_by_name
_find_business_by_name_filter = CascadeExplorerAPI.find_business_by_name_filter
_create_report = CreateExplorerAPI.create_report
_create_app_type = CreateExplorerAPI.create_app_type
_create_normalized_name = CreateExplorerAPI._create_normalized_name
_create_key_name = CreateExplorerAPI._create_key_name
_create_app = CreateExplorerAPI.create_app
_create_business = CreateExplorerAPI.create_business
_get_app_type_by_name = AppTypeMetadataApi.get_app_type_by_name
_update_report_data = DataManagingApi.update_report_data
_append_report_data = DataManagingApi.append_report_data
_transform_report_data_to_chart_data = DataManagingApi._transform_report_data_to_chart_data
_is_report_data_empty = DataManagingApi._is_report_data_empty
_convert_dataframe_to_report_entry = DataManagingApi._convert_dataframe_to_report_entry
_create_report_entries = DataManagingApi._create_report_entries
_validate_table_data = DataValidation._validate_table_data
_validate_tree_data = DataValidation._validate_tree_data
_validate_data_is_pandarable = DataValidation._validate_data_is_pandarable
_create_app_type_and_app = MultiCreateApi.create_app_type_and_app
_delete_report = DeleteExplorerApi.delete_report
_delete_app = DeleteExplorerApi.delete_app
_delete_report_entries = DeleteExplorerApi.delete_report_entries
class PlotApi(PlotAux):
"""
"""
def __init__(self, api_client, **kwargs):
self.api_client = api_client
if kwargs.get('business_id'):
self.business_id: Optional[str] = kwargs['business_id']
else:
self.business_id: Optional[str] = None
@staticmethod
def _validate_filters(filters: Dict) -> None:
# Check the filters is built properly
try:
if filters.get('update_filter_type'):
cols: List[str] = ['row', 'column', 'filter_cols', 'update_filter_type']
assert sorted(list(filters.keys())) == sorted(cols)
else:
old_cols: List[str] = ['row', 'column', 'filter_cols']
new_cols: List[str] = ['order', 'filter_cols']
assert (
sorted(list(filters.keys())) == sorted(old_cols)
or
sorted(list(filters.keys())) == sorted(new_cols)
)
except AssertionError:
raise KeyError(
f'filters object must contain the keys'
f'"exists", "row", "column", "filter_cols" | '
f'Provided keys are: {list(filters.keys())}'
)
def _find_target_reports(
self, menu_path: str,
grid: Optional[str] = None,
order: Optional[int] = None,
component_type: Optional[str] = None,
by_component_type: bool = True,
) -> List[Dict]:
type_map = {
'alert_indicator': 'INDICATORS',
'indicator': 'INDICATORS',
'table': None,
'stockline': 'STOCKLINECHART',
'html': 'HTML',
'MULTIFILTER': 'MULTIFILTER',
}
if component_type in type_map.keys():
component_type = type_map[component_type]
else:
component_type = 'ECHARTS'
by_grid: bool = False
if grid:
by_grid = True
elif order is not None:
pass
else:
raise ValueError(
'Row and Column or Order must be specified'
)
name, path_name = self._clean_menu_path(menu_path=menu_path)
app: Dict = self._get_app_by_name(
business_id=self.business_id,
name=name,
)
app_id: str = app['id']
reports: List[Dict] = self._get_app_reports(
business_id=self.business_id, app_id=app_id,
)
# Delete specific components in a path / grid
# or all of them whatsoever is its component_type
if by_component_type:
target_reports: List[Dict] = [
report
for report in reports
if (
report['path'] == path_name
and report['grid'] == grid
and report['reportType'] == component_type
)
]
elif by_grid: # Whatever is the reportType delete it
target_reports: List[Dict] = [
report
for report in reports
if (
report['path'] == path_name
and report['grid'] == grid
)
]
else:
target_reports: List[Dict] = [
report
for report in reports
if (
report['path'] == path_name
and report['order'] == order
)
]
return target_reports
def _get_component_order(self, app_id: str, path_name: str) -> int:
"""Set an ascending report.Order to new path created
If a report in the same path exists take its order
otherwise find the higher report.Order and set it +1
as the report.Order of the new path
"""
reports_ = self._get_app_reports(
business_id=self.business_id,
app_id=app_id,
)
try:
order_temp = max([report['order'] for report in reports_])
except ValueError:
order_temp = 0
path_order: List[int] = [
report['order']
for report in reports_
if report['path'] == path_name
]
if path_order:
return min(path_order)
else:
return order_temp + 1
def _clean_menu_path(self, menu_path: str) -> Tuple[str, str]:
"""Break the menu path in the apptype or app normalizedName
and the path normalizedName if any"""
# remove empty spaces
menu_path: str = menu_path.strip()
# replace "_" for www protocol it is not good
menu_path = menu_path.replace('_', '-')
try:
assert len(menu_path.split('/')) <= 2 # we allow only one level of path
except AssertionError:
raise ValueError(
f'We only allow one subpath in your request | '
f'you introduced {menu_path} it should be maximum '
f'{"/".join(menu_path.split("/")[:1])}'
)
# Split AppType or App Normalized Name
normalized_name: str = menu_path.split('/')[0]
name: str = (
' '.join(normalized_name.split('-'))
)
try:
path_normalized_name: str = menu_path.split('/')[1]
path_name: str = (
' '.join(path_normalized_name.split('-'))
)
except IndexError:
path_name = None
return name, path_name
def _create_chart(
self, data: Union[str, DataFrame, List[Dict]],
menu_path: str, report_metadata: Dict,
order: Optional[int] = None,
rows_size: Optional[int] = None,
cols_size: Optional[int] = None,
padding: Optional[int] = None,
overwrite: bool = True,
real_time: bool = False,
) -> str:
"""
:param data:
:param menu_path:
:param report_metadata:
:param row: Only required for Overwrite
:param column: Only required for Overwrite
:param report_type: Only required for Overwrite
:param overwrite: Whether to Update (delete) any report in
the same menu_path and grid position or not
"""
if order is not None and rows_size and cols_size:
report_metadata['order'] = order
report_metadata['sizeRows'] = rows_size
report_metadata['sizeColumns'] = cols_size
if padding:
report_metadata['sizePadding'] = padding
name, path_name = self._clean_menu_path(menu_path=menu_path)
try:
d: Dict[str, Dict] = self._create_app_type_and_app(
business_id=self.business_id,
app_type_metadata={'name': name},
app_metadata={},
)
app: Dict = d['app']
except ApiClientError: # Business admin user
app: Dict = self._get_app_by_name(business_id=self.business_id, name=name)
if not app:
app: Dict = self._create_app(
business_id=self.business_id, name=name,
)
app_id: str = app['id']
if order is not None: # elif order fails when order = 0!
kwargs = {'order': order}
elif report_metadata.get('grid'):
kwargs = {'grid': report_metadata.get('grid'), 'order': 0}
else:
raise ValueError(
'Row and Column or Order must be specified to overwrite a report'
)
report_metadata.update({'path': path_name})
report_metadata.update(kwargs)
if report_metadata.get('dataFields'):
report_metadata['dataFields'] = (
json.dumps(report_metadata['dataFields'])
)
if overwrite:
self.delete(
menu_path=menu_path,
by_component_type=False,
**kwargs
)
report: Dict = self._create_report(
business_id=self.business_id,
app_id=app_id,
report_metadata=report_metadata,
real_time=real_time,
)
report_id: str = report['id']
try:
if data:
self._update_report_data(
business_id=self.business_id,
app_id=app_id,
report_id=report_id,
report_data=data,
)
except ValueError:
if not data.empty:
self._update_report_data(
business_id=self.business_id,
app_id=app_id,
report_id=report_id,
report_data=data,
)
return report_id
def _create_trend_chart(
self, echart_type: str,
data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str,
row: Optional[int] = None, # TODO to deprecate
column: Optional[int] = None, # TODO to deprecate
order: Optional[int] = None,
rows_size: Optional[int] = None,
cols_size: Optional[int] = None,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
overwrite: bool = True,
) -> str:
"""For Linechart, Barchart, Stocklinechart, Scatter chart, and alike
Example
-------------------
input
data:
val_a, val_b,
mon, 7,
tue, 10,
wed, 11,
thu, 20,
fri, 27,
x: 'val_a'
y: 'val_b'
menu_path: 'purchases/weekly'
row: 2
column: 1
title: 'Purchases by week'
color: None
option_modifications: {}
:param echart_type:
:param data:
:param x:
:param y:
:param menu_path: it contain the `app_name/path` for instance "product-suite/results"
and it will use the AppType ProductSuite (if it does not it will create it)
then it will check if the App exists, if not create it and finally create
the report with the specific path "results"
:param row:
:param column:
:param title:
:param option_modifications:
:param filters: To create a filter for every specified column
"""
cols: List[str] = [x] + y
self._validate_table_data(data, elements=cols)
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[cols] # keep only x and y
df.rename(columns={x: 'xAxis'}, inplace=True)
# Default
option_modifications_temp = {
"legend": {"type": "scroll"},
"toolbox": {"orient": "vertical", "top": 20},
'series': {'smooth': True}
}
# TODO this will be done in FE
# https://trello.com/c/GXRYHEsO/
num_size: int = len(df[y].max())
if num_size > 6:
margin: int = 12 * (num_size - 6) # 12 pixels by extra num
option_modifications_temp["yAxis"] = {
"axisLabel": {"margin": margin},
}
if option_modifications:
if not option_modifications.get('legend'):
option_modifications.update({"legend": {"type": "scroll"}})
if not option_modifications.get('toolbox'):
option_modifications['toolbox'] = {"orient": "vertical", "top": 20}
elif not option_modifications.get('toolbox').get('orient'):
option_modifications['toolbox'].update({"orient": "vertical", "top": 20})
if not option_modifications.get('series'):
option_modifications['series'] = {'smooth': True}
elif not option_modifications.get('series').get('smooth'):
option_modifications['series'].update({'smooth': True})
else:
option_modifications = option_modifications_temp
# TODO we have two titles now, take a decision
# one in dataFields the other as field
data_fields: Dict = self._set_data_fields(
title='', subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
)
data_fields['type'] = echart_type
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': data_fields,
'title': title,
}
if row and column:
report_metadata['grid'] = f'{row}, {column}'
if filters:
raise NotImplementedError
return self._create_chart(
data=df,
menu_path=menu_path, overwrite=overwrite,
report_metadata=report_metadata, order=order,
rows_size=rows_size, cols_size=cols_size, padding=padding,
)
def _create_multifilter_reports(
self, data: Union[str, DataFrame, List[Dict]], filters: Dict,
) -> Iterable:
"""
Create chunks of the data to create N reports for every filter combination
"""
df: DataFrame = self._validate_data_is_pandarable(data)
filter_cols: List[str] = filters['filter_cols']
select_filter: Dict[str, str] = {
v: f'Select{index + 1}'
for index, v in enumerate(filter_cols)
}
# Create all combinations
# https://stackoverflow.com/questions/18497604/combining-all-combinations-of-two-lists-into-a-dict-of-special-form
d: Dict = {}
for filter_name in filter_cols:
d[filter_name] = df[filter_name].unique().tolist()
filter_combinations = [
dict(zip((list(d.keys())), row))
for row in product(*list(d.values()))
]
for filter_combination in filter_combinations:
df_temp = df.copy()
filter_element: Dict = {}
for filter_, value in filter_combination.items():
filter_element[select_filter[filter_]] = value
df_temp = df_temp[df_temp[filter_] == value]
if df_temp.empty:
break
if df_temp.empty:
continue
# Get rid of NaN columns based on the filters
df_temp = df_temp.dropna(axis=1)
yield df_temp, filter_element
def _update_filter_report(
self, filter_row: Optional[int],
filter_column: Optional[int],
filter_order: Optional[int],
filter_elements: List,
menu_path: str,
update_type: str = 'concat',
) -> None:
""""""
filter_reports: List[Dict] = (
self._find_target_reports(
menu_path=menu_path,
grid=f'{filter_row}, {filter_column}',
order=filter_order,
component_type='MULTIFILTER',
by_component_type=True,
)
)
try:
assert len(filter_reports) == 1
filter_report = filter_reports[0]
except AssertionError:
raise ValueError(
f'The Filter you are defining does not exist in the specified position | '
f'{len(filter_reports)} | row {filter_row} | column {filter_column}'
)
filter_report_data: Dict = json.loads(
filter_report['chartData']
)
# Here we append old and new reportId
df_filter_report_data: pd.DataFrame = pd.DataFrame(filter_report_data)
df_filter_elements: pd.DataFrame = pd.DataFrame(filter_elements)
if update_type == 'concat':
df_chart_data: pd.DataFrame = pd.concat([
df_filter_report_data,
df_filter_elements,
])
elif update_type == 'append':
df_chart_data: pd.DataFrame = pd.merge(
df_filter_report_data, df_filter_elements,
how='left', on=[
c
for c in df_filter_report_data.columns
if 'Select' in c
], suffixes=('_old', '_new')
)
df_chart_data['reportId'] = (
df_chart_data['reportId_old']
+
df_chart_data['reportId_new']
)
df_chart_data.drop(
columns=['reportId_old', 'reportId_new'],
axis=1, inplace=True,
)
else:
raise ValueError(
f'update_type can only be "concat" or "append" | '
f'Value provided is {update_type}'
)
chart_data: List[Dict] = df_chart_data.to_dict(orient='records')
del df_chart_data
report_metadata: Dict = {
'reportType': 'MULTIFILTER',
'grid': f'{filter_row}, {filter_column}',
'title': '',
}
self._create_chart(
data=chart_data,
menu_path=menu_path,
report_metadata=report_metadata,
overwrite=True,
)
def _create_trend_charts_with_filters(
self, data: Union[str, DataFrame, List[Dict]],
filters: Dict, **kwargs,
):
""""""
filter_elements: List[Dict] = []
self._validate_filters(filters=filters)
# We are going to save all the reports one by one
for df_temp, filter_element in (
self._create_multifilter_reports(
data=data, filters=filters,
)
):
kwargs_: Dict = kwargs.copy()
cols: List[str] = df_temp.columns
kwargs_['y'] = [
value for value in kwargs_['y']
if value in cols
]
report_id = self._create_trend_chart(
data=df_temp, overwrite=False, **kwargs_,
)
filter_element['reportId'] = [report_id]
filter_elements.append(filter_element)
update_filter_type: Optional[str] = filters.get('update_filter_type')
filter_row: Optional[int] = filters.get('row')
filter_column: Optional[int] = filters.get('column')
filter_order: Optional[int] = filters.get('order')
if update_filter_type:
# concat is to add new filter options
# append is to add new reports to existing filter options
try:
assert update_filter_type in ['concat', 'append']
except AssertionError:
raise ValueError(
f'update_filter_type must be one of both: "concat" or "append" | '
f'Value provided: {update_filter_type}'
)
self._update_filter_report(
filter_row=filter_row,
filter_column=filter_column,
filter_order=filter_order,
filter_elements=filter_elements,
menu_path=kwargs['menu_path'],
update_type=update_filter_type,
)
else:
report_metadata: Dict = {
'reportType': 'MULTIFILTER',
'title': '',
}
if filter_row and filter_column:
report_metadata['grid'] = f'{filter_row}, {filter_column}'
elif filter_order is not None:
report_metadata['order'] = filter_order
else:
raise ValueError('Either row and column or order must be provided')
self._create_chart(
data=filter_elements,
menu_path=kwargs['menu_path'],
report_metadata=report_metadata,
order=filter_order,
overwrite=True,
)
def _create_trend_charts(
self, data: Union[str, DataFrame, List[Dict]],
filters: Optional[Dict], **kwargs,
):
"""
Example
-----------------
filters: Dict = {
'exists': False,
'row': 1, 'column': 1,
'filter_cols': [
'seccion', 'frecuencia', 'region',
],
}
"""
if filters:
self._create_trend_charts_with_filters(
data=data, filters=filters, **kwargs
)
else:
self._create_trend_chart(data=data, **kwargs)
def _set_data_fields(
self, title: str, subtitle: str,
x_axis_name: str, y_axis_name: str,
option_modifications: Dict,
) -> Dict:
""""""
chart_options: Dict = {
'title': title if title else "",
'subtitle': subtitle if subtitle else "",
'legend': True,
'tooltip': True,
'axisPointer': True,
'toolbox': {
'saveAsImage': True,
'restore': True,
'dataView': False,
'dataZoom': True,
'magicType': False,
},
'xAxis': {
'name': x_axis_name if x_axis_name else "",
'type': 'category',
},
'yAxis': {
'name': y_axis_name if y_axis_name else "",
'type': 'value',
},
'dataZoom': True,
'smooth': True,
}
data_fields: Dict = {
'chartOptions': chart_options,
}
if option_modifications:
for k, v in option_modifications.items():
if k == 'optionModifications':
data_fields[k] = v
else:
data_fields['chartOptions'][k] = v
return data_fields
def set_business(self, business_id: str):
""""""
# If the business id does not exists it raises an ApiClientError
_ = self._get_business(business_id)
self.business_id: str = business_id
def set_new_business(self, name: str):
""""""
business: Dict = self._create_business(name=name)
self.business_id: str = business['id']
def set_path_orders(
self, app_name: str, path_order: Dict[str, int],
) -> None:
"""
:param app_name: the App name
:param path_order: example {'test': 0, 'more-test': 1}
"""
app: Dict = self._get_app_by_name(
business_id=self.business_id,
name=app_name,
)
app_id = app['id']
reports = self._get_app_reports(
business_id=self.business_id,
app_id=app_id,
)
for report in reports:
path: str = report['path']
# TODO we need to use something else besides `order`
order: int = path_order.get(path)
if order:
self.update_report(
business_id=self.business_id,
app_id=app_id,
report_id=report['id'],
# TODO this needs to be replaced
report_metadata={'order': order},
)
def append_data_to_trend_chart(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str],
component_type: str,
menu_path: str,
row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None,
) -> None:
"""Append new data"""
allowed_components_type: List[str] = [
'line', 'bar', 'scatter', 'predictive_line',
]
if component_type not in allowed_components_type:
raise ValueError(
f'{component_type} not allowed | '
f'Must be one of {allowed_components_type}'
)
cols: List[str] = [x] + y
self._validate_table_data(data, elements=cols)
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[cols] # keep only x and y
df.rename(columns={x: 'xAxis'}, inplace=True)
if row and column:
target_reports: List[Dict] = (
self._find_target_reports(
menu_path=menu_path, grid=f'{row}, {column}',
component_type=component_type,
)
)
else:
target_reports: List[Dict] = (
self._find_target_reports(
menu_path=menu_path, order=order,
component_type=component_type,
)
)
# TODO for multifilter we will need to iterate on this
assert len(target_reports) == 1
for report in target_reports:
self._append_report_data(
business_id=self.business_id,
app_id=report['appId'],
report_id=report['id'],
report_data=df,
)
# TODO move part of it to get_reports_by_path_grid_and_type() in report_metadata_api.py
def delete(
self, menu_path: str,
grid: Optional[str] = None,
order: Optional[int] = None,
row: Optional[int] = None,
column: Optional[int] = None,
component_type: Optional[str] = None,
by_component_type: bool = True,
) -> None:
"""In cascade find the reports that match the query
and delete them all
"""
if grid:
kwargs = {'grid': grid}
elif order is not None:
kwargs = {'order': order}
elif row and column:
kwargs = {'grid': f'{row}, {column}'}
else:
raise ValueError('Either Row and Column or Order must be specified')
target_reports: List[Dict] = (
self._find_target_reports(
menu_path=menu_path,
component_type=component_type,
by_component_type=by_component_type,
**kwargs,
)
)
for report in target_reports:
self._delete_report(
business_id=self.business_id,
app_id=report['appId'],
report_id=report['id']
)
def delete_path(self, menu_path: str) -> None:
"""In cascade delete an App or Path and all the reports within it
If menu_path contains an "{App}/{Path}" then it removes the path
otherwise it removes the whole app
"""
name, path_name = self._clean_menu_path(menu_path=menu_path)
app: Dict = self._get_app_by_name(
business_id=self.business_id,
name=name,
)
if not app:
return
app_id: str = app['id']
reports: List[Dict] = self._get_app_reports(
business_id=self.business_id, app_id=app_id,
)
if path_name:
target_reports: List[Dict] = [
report
for report in reports
if report['path'] == path_name
]
else:
target_reports: List[Dict] = reports
for report in target_reports:
self._delete_report(
business_id=self.business_id,
app_id=app_id,
report_id=report['id']
)
else:
if '/' not in menu_path:
self._delete_app(
business_id=self.business_id,
app_id=app_id,
)
def table(
self, data: Union[str, DataFrame, List[Dict]],
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
title: Optional[str] = None, # second layer
filter_columns: Optional[List[str]] = None,
sort_table_by_col: Optional[Dict] = None,
horizontal_scrolling: bool = False,
overwrite: bool = True,
):
"""
{
"Product": null,
"Monetary importance": {
"field": "stringField2",
"filterBy": ["High", "Medium", "Low"]
"defaultOrder": "asc",
},
"Purchase soon": {
"field": "stringField3",
"filterBy": ["Yes", "No"]
},
}
"""
def _calculate_table_extra_map() -> Dict[str, str]:
"""
Example
----------------
input
filter_columns = ["Monetary importance"]
sort_table_by_col = {'date': 'asc'}
output
filters_map = {
'stringField1': 'Monetary importance',
'stringField2': 'date',
}
"""
filters_map: Dict[str, str] = {}
key_prefix_name: str = 'stringField'
if sort_table_by_col:
field_cols: List[str] = filter_columns + list(sort_table_by_col.keys())
else:
field_cols: List[str] = filter_columns
if field_cols:
for index, filter_column in enumerate(field_cols):
filters_map[filter_column] = f'{key_prefix_name}{index + 1}'
return filters_map
else:
return {}
def _calculate_table_filter_fields() -> Dict[str, List[str]]:
"""
Example
----------------
input
df
x, y, Monetary importance,
1, 2, high,
2, 2, high,
10, 9, low,
2, 1, high,
4, 6, medium,
filter_columns = ["Monetary importance"]
output
filter_fields = {
'Monetary importance': ['high', 'medium', 'low'],
}
"""
filter_fields_: Dict[str, List[str]] = {}
if filter_columns:
for filter_column in filter_columns:
values: List[str] = df[filter_column].unique().tolist()
try:
assert len(values) <= 20
except AssertionError:
raise ValueError(
f'At maximum a table may have 20 different values in a filter | '
f'You provided {len(values)} | '
f'You provided {values}'
)
filter_fields_[filter_column] = values
return filter_fields_
else:
return {}
def _calculate_table_data_fields() -> Dict:
"""
Example
-------------
input
df
x, y, Monetary importance,
1, 2, high,
2, 2, high,
10, 9, low,
2, 1, high,
4, 6, medium,
filters_map = {
'stringField1': 'Monetary importance',
}
filter_fields = {
'Monetary importance': ['high', 'medium', 'low'],
}
output
{
"Product": null,
"Monetary importance": {
"field": "stringField1",
"filterBy": ["high", "medium", "low"]
},
}
"""
data_fields: Dict = {}
cols: List[str] = df.columns.tolist()
if sort_table_by_col:
cols_to_sort_by: List[str] = list(sort_table_by_col.keys())
else:
cols_to_sort_by: List[str] = []
for col in cols:
if col in filter_fields:
data_fields[col] = {
'field': extra_map[col],
'filterBy': filter_fields[col],
}
else:
data_fields[col] = None
if col in cols_to_sort_by:
if data_fields:
if data_fields[col]:
data_fields[col].update(
{
'field': extra_map[col],
"defaultOrder": sort_table_by_col[col],
}
)
else:
data_fields[col] = {
'field': extra_map[col],
"defaultOrder": sort_table_by_col[col],
}
else:
data_fields[col] = {
'field': extra_map[col],
"defaultOrder": sort_table_by_col[col],
}
return json.dumps(data_fields)
df: DataFrame = self._validate_data_is_pandarable(data)
if sort_table_by_col:
try:
assert len(sort_table_by_col) == 1
except AssertionError:
raise ValueError(
f'Currently we can only sort tables by one column '
f'You passed {len(sort_table_by_col)} columns'
)
# This is for the responsive part of the application
# by default 6 is the maximum for average desktop screensize
# then it starts creating an horizontal scrolling
if horizontal_scrolling:
if len(df.columns) > 6:
raise ValueError(
f'Tables with more than 6 columns are not allowed'
)
extra_map: Dict[str, str] = _calculate_table_extra_map()
filter_fields: Dict[str, List[str]] = _calculate_table_filter_fields()
name, path_name = self._clean_menu_path(menu_path=menu_path)
try:
d: Dict[str, Dict] = self._create_app_type_and_app(
business_id=self.business_id,
app_type_metadata={'name': name},
app_metadata={},
)
app: Dict = d['app']
except ApiClientError: # Business admin user
app: Dict = self._get_app_by_name(business_id=self.business_id, name=name)
if not app:
app: Dict = self._create_app(
business_id=self.business_id, name=name,
)
app_id: str = app['id']
order: int = self._get_component_order(
app_id=app_id, path_name=path_name,
)
report_metadata: Dict[str, Any] = {
'title': title,
'path': path_name,
'order': order,
'dataFields': _calculate_table_data_fields(),
}
if row and column:
report_metadata['grid']: str = f'{row}, {column}'
if overwrite:
if not row and not column and not order:
raise ValueError(
'Row, Column or Order must be specified to overwrite a report'
)
if report_metadata.get('grid'):
self.delete(
menu_path=menu_path,
grid=report_metadata.get('grid'),
by_component_type=False,
)
else:
self.delete(
menu_path=menu_path,
order=order,
by_component_type=False,
)
report: Dict = self._create_report(
business_id=self.business_id,
app_id=app_id,
report_metadata=report_metadata,
)
report_id: str = report['id']
report_entry_filter_fields: Dict[str, List[str]] = {
extra_map[extra_name]: values
for extra_name, values in filter_fields.items()
}
# We do not allow NaN values for report Entry
df = df.fillna('')
report_entries: List[Dict] = (
self._convert_dataframe_to_report_entry(
df=df, filter_map=extra_map,
sort_table_by_col=sort_table_by_col,
filter_fields=report_entry_filter_fields
)
)
self._update_report_data(
business_id=self.business_id,
app_id=app_id,
report_id=report_id,
report_data=report_entries,
)
def html(
self, html: str, menu_path: str,
title: Optional[str] = None,
row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[str] = None,
):
report_metadata: Dict = {
'reportType': 'HTML',
'order': order if order else 1,
'title': title if title else '',
}
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=[{'value': html}],
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def iframe(
self, menu_path: str, url: str,
row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[str] = None,
title: Optional[str] = None,
height: Optional[int] = None,
):
report_metadata: Dict = {
'reportType': 'IFRAME',
'dataFields': {
'url': url,
'height': height if height else 600
},
'order': order if order else 1,
'title': title if title else '',
}
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=[],
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def bar(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a barchart
"""
# TODO this only works for single bar:
"""
'xAxis': {
'axisLabel': {
'inside': True,
'color': '#ffffff'
},
'axisTick': {
'show': False
}
},
'color': '#002FD8', # put multicolor
"""
option_modifications: Dict[str, Any] = {
'dataZoom': False,
'optionModifications': {
'series': {
'itemStyle': {
'borderRadius': [9, 9, 0, 0]
}
},
# 'color': '#002FD8', # TODO put multicolor
'emphasis': {
'itemStyle': {'color': '#29D86F'}
},
}
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='bar',
)
)
def horizontal_barchart(
self, data: Union[str, DataFrame, List[Dict]],
x: List[str], y: str, # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Horizontal barchart
https://echarts.apache.org/examples/en/editor.html?c=bar-y-category
"""
option_modifications: Dict[str, Any] = {
'dataZoom': False,
'xAxis': {'type': 'value'},
'yAxis': {'type': 'category'},
'optionModifications': {'yAxis': {'boundaryGap': True}},
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='bar',
)
)
def zero_centered_barchart(
self, data: Union[str, DataFrame, List[Dict]],
x: List[str], y: str, # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Horizontal barchart
https://echarts.apache.org/examples/en/editor.html?c=bar-y-category
"""
option_modifications: Dict[str, Any] = {
'dataZoom': False,
'xAxis': {
'type': 'value',
'position': 'top',
'splitLine': {
'lineStyle': {'type': 'dashed'}
}
},
'yAxis': {
'type': 'category',
'axisLine': {'show': False},
'axisLabel': {'show': False},
'axisTick': {'show': False},
'splitLine': {'show': False},
},
'optionModifications': {'yAxis': {'boundaryGap': True}},
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='bar',
)
)
def line(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # thid layer
filters: Optional[Dict] = None, # thid layer
):
""""""
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='line',
)
)
def predictive_line(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str,
min_value_mark: Any, max_value_mark: Any,
color_mark: str = 'rgba(255, 173, 177, 0.4)',
row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
filters: Optional[Dict] = None,
):
"""
:param data:
:param x:
:param y:
:param menu_path:
:param row:
:param column:
:param min_value_mark:
:param max_value_mark:
:param color_mark: RGBA code
:param title:
:param x_axis_name:
:param y_axis_name:
:param filters:
"""
option_modifications = {
'optionModifications': {
'series': {
'smooth': True,
'markArea': {
'itemStyle': {
'color': color_mark
},
'data': [
[
{
'name': 'Prediction',
'xAxis': min_value_mark
},
{
'xAxis': max_value_mark
}
],
],
}
},
}
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='line',
)
)
def line_with_confidence_area(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, # above_band_name: str, below_band_name: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
filters: Optional[Dict] = None,
):
"""
https://echarts.apache.org/examples/en/editor.html?c=line-stack
option = {
title: {
text: 'Stacked Line'
},
tooltip: {
trigger: 'axis'
},
legend: {
data: ['Email', 'Union Ads', 'Video Ads', 'Direct', 'Search Engine']
},
grid: {
left: '3%',
right: '4%',
bottom: '3%',
containLabel: true
},
toolbox: {
feature: {
saveAsImage: {}
}
},
xAxis: {
type: 'category',
boundaryGap: false,
data: ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
},
yAxis: {
type: 'value'
},
series: [
{
name: 'Email',
type: 'line',
stack: 'Total',
data: [120, 132, 101, 134, 90, 230, 210]
},
{
name: 'Union Ads',
type: 'line',
stack: 'Total',
lineStyle: {
opacity: 0
},
stack: 'confidence-band',
symbol: 'none',
data: [220, 182, 191, 234, 290, 330, 310]
},
{
name: 'Video Ads',
type: 'line',
stack: 'Total',
data: [150, 232, 201, 154, 190, 330, 410]
},
{
name: 'Direct',
type: 'line',
data: [320, 332, 301, 334, 390, 330, 320]
},
{
name: 'Search Engine',
type: 'line',
lineStyle: {
opacity: 0
},
areaStyle: {
color: '#ccc'
},
stack: 'confidence-band',
symbol: 'none',
data: [820, 932, 901, 934, 1290, 1330, 1320]
}
]
};
"""
option_modifications = {
'series': [{
'smooth': True,
'lineStyle': {
'opacity': 0
},
'areaStyle': {
'color': '#ccc'
},
'stack': 'confidence-band',
'symbol': 'none',
}, ],
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=[y],
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='line',
)
)
def scatter_with_confidence_area(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, # above_band_name: str, below_band_name: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
filters: Optional[Dict] = None,
):
"""
https://echarts.apache.org/examples/en/editor.html?c=line-stack
option = {
title: {
text: 'Stacked Line'
},
tooltip: {
trigger: 'axis'
},
legend: {
data: ['Email', 'Union Ads', 'Video Ads', 'Direct', 'Search Engine']
},
grid: {
left: '3%',
right: '4%',
bottom: '3%',
containLabel: true
},
toolbox: {
feature: {
saveAsImage: {}
}
},
xAxis: {
type: 'category',
boundaryGap: false,
data: ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
},
yAxis: {
type: 'value'
},
series: [
{
name: 'Email',
type: 'line',
stack: 'Total',
data: [120, 132, 101, 134, 90, 230, 210]
},
{
name: 'Union Ads',
type: 'line',
stack: 'Total',
lineStyle: {
opacity: 0
},
stack: 'confidence-band',
symbol: 'none',
data: [220, 182, 191, 234, 290, 330, 310]
},
{
name: 'Video Ads',
type: 'line',
stack: 'Total',
data: [150, 232, 201, 154, 190, 330, 410]
},
{
name: 'Direct',
type: 'line',
data: [320, 332, 301, 334, 390, 330, 320]
},
{
name: 'Search Engine',
type: 'line',
lineStyle: {
opacity: 0
},
areaStyle: {
color: '#ccc'
},
stack: 'confidence-band',
symbol: 'none',
data: [820, 932, 901, 934, 1290, 1330, 1320]
}
]
};
"""
option_modifications = {
'series': [{
'smooth': True,
'lineStyle': {
'opacity': 0
},
'areaStyle': {
'color': '#ccc'
},
'stack': 'confidence-band',
'symbol': 'none',
}, ],
}
return self._create_trend_chart(
data=data, x=x, y=[y], menu_path=menu_path,
row=row, column=column,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='scatter',
filters=filters,
)
def stockline(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
self._validate_table_data(data, elements=[x] + y)
df: DataFrame = self._validate_data_is_pandarable(data)
data_fields: Dict = {
"key": x,
"labels": {
"key": x_axis_name,
"value": y_axis_name,
"hideKey": False,
"hideValue": False
},
"values": y,
"dataZoomX": True,
"smooth": True,
"symbol": "circle",
}
report_metadata: Dict = {
'reportType': 'STOCKLINECHART',
'title': title if title else '',
'dataFields': data_fields,
}
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def scatter(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
try:
assert 2 <= len(y) <= 3
except Exception:
raise ValueError(f'y provided has {len(y)} it has to have 2 or 3 dimensions')
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='scatter',
)
)
def bubble_chart(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], z: str, # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None, # to create filters
):
""""""
return self._create_trend_chart(
data=data, x=x, y=y, menu_path=menu_path,
row=row, column=column,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='scatter',
filters=filters,
)
def indicator(
self, data: Union[str, DataFrame, List[Dict]], value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
target_path: Optional[str] = None,
set_title: Optional[str] = None,
header: Optional[str] = None,
footer: Optional[str] = None,
color: Optional[str] = None,
align: Optional[str] = None,
multi_column: int = 4,
real_time: bool = False,
):
"""
:param data:
:param value:
:param menu_path:
:param row:
:param column:
:param order:
:param rows_size:
:param cols_size:
:param padding:
:param target_path:
:param set_title: the title of the set of indicators
:param header:
:param footer:
:param color:
:param align: to align center, left or right a component
:param multi_column: how many indicators are allowed by column
:param real_time:
"""
mandatory_elements: List[str] = [
header, value, target_path,
]
mandatory_elements = [element for element in mandatory_elements if element]
extra_elements: List[str] = [footer, color, align]
extra_elements = [element for element in extra_elements if element]
self._validate_table_data(data, elements=mandatory_elements)
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[mandatory_elements + extra_elements] # keep only x and y
cols_to_rename: Dict[str, str] = {
header: 'title',
footer: 'description',
value: 'value',
color: 'color',
align: 'align',
}
if target_path:
cols_to_rename.update({target_path: 'targetPath'})
cols_to_rename = {
col_to_rename: v
for col_to_rename, v in cols_to_rename.items()
if col_to_rename in mandatory_elements + extra_elements
}
df.rename(columns=cols_to_rename, inplace=True)
for extra_element in extra_elements:
if extra_element == 'align':
df['align'] = df['align'].fillna('right')
elif extra_element == 'color':
df['color'] = df['color'].fillna('black')
elif extra_element == 'description':
df['description'] = df['description'].fillna('')
else:
raise ValueError(f'{extra_element} is not solved')
report_metadata: Dict = {
'reportType': 'INDICATORS',
'title': set_title if set_title else ''
}
# TODO align is not working well yet
# By default Shimoku assigns 4 indicators per row
# the following lines adjust it to the nature of the data
# and the multi_column variable
len_df: int = len(df)
columns: int = 4
if len_df < multi_column:
columns: int = len_df
elif multi_column != 4:
columns: int = multi_column
data_fields: Dict = {'dataFields': {'columns': columns}}
report_metadata.update(data_fields)
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
real_time=real_time,
)
def alert_indicator(
self, data: Union[str, DataFrame, List[Dict]],
value: str, target_path: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
set_title: Optional[str] = None,
header: Optional[str] = None,
footer: Optional[str] = None,
color: Optional[str] = None,
multi_column: int = 4,
):
""""""
elements: List[str] = [header, footer, value, color, target_path]
elements = [element for element in elements if element]
self._validate_table_data(data, elements=elements)
return self.indicator(
data=data, value=value,
menu_path=menu_path, row=row, column=column,
order=order, cols_size=cols_size, rows_size=rows_size, padding=padding,
target_path=target_path,
set_title=set_title,
header=header,
footer=footer, color=color,
multi_column=multi_column,
)
def pie(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Piechart
"""
self._validate_table_data(data, elements=[x, y])
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[x, y]] # keep only x and y
df.rename(columns={x: 'name', y: 'value'}, inplace=True)
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': {'type': 'pie'},
'title': title,
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def radar(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: List[str], # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
# subtitle: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a RADAR
"""
self._validate_table_data(data, elements=[x] + y)
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[x] + y] # keep only x and y
df.rename(columns={x: 'name'}, inplace=True)
data_fields: Dict = {
'type': 'radar',
}
if option_modifications:
data_fields['optionModifications'] = option_modifications
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': data_fields,
'title': title,
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def tree(
self, data: Union[str, List[Dict]],
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Tree
"""
self._validate_tree_data(data[0], vals=['name', 'value', 'children'])
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': {'type': 'tree'},
'title': title,
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def treemap(
self, data: Union[str, List[Dict]],
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Treemap
"""
self._validate_tree_data(data[0], vals=['name', 'value', 'children'])
report_metadata: Dict = {
'title': title,
'reportType': 'ECHARTS',
'dataFields': {'type': 'treemap'},
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def sunburst(
self, data: List[Dict],
name: str, children: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a Sunburst
"""
self._validate_tree_data(data[0], vals=['name', 'children'])
report_metadata: Dict = {
'reportType': 'ECHARTS',
'title': title,
'dataFields': {'type': 'sunburst'},
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def candlestick(
self, data: Union[str, DataFrame, List[Dict]],
x: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
y = ['open', 'close', 'highest', 'lowest']
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=y,
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='candlestick',
)
)
def heatmap(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[x, y, value]] # keep only x and y
df.rename(columns={x: 'xAxis', y: 'yAxis', value: 'value'}, inplace=True)
option_modifications: Dict = {
"toolbox": {"orient": "horizontal", "top": 0},
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=[y, value],
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='heatmap',
)
)
def cohort(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
filters: Optional[Dict] = None,
):
""""""
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[x, y, value]] # keep only x and y
df.rename(columns={x: 'xAxis', y: 'yAxis', value: 'value'}, inplace=True)
option_modifications: Dict = {
"toolbox": {"orient": "horizontal", "top": 0},
"xAxis": {"axisLabel": {"margin": '10%'}},
'optionModifications': {
'grid': {
'bottom': '20%',
# 'top': '10%'
},
"visualMap": {
'calculable': True,
"inRange": {
"color": ['#cfb1ff', '#0000ff']
},
},
},
}
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=x, y=[y, value],
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='heatmap',
)
)
def predictive_cohort(self):
""""""
raise NotImplementedError
def sankey(
self, data: Union[str, DataFrame, List[Dict]],
source: str, target: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[source, target, value]] # keep only x and y
df.rename(
columns={
source: 'source',
target: 'target',
value: 'value',
},
inplace=True,
)
report_metadata: Dict = {
'title': title,
'reportType': 'ECHARTS',
'dataFields': {'type': 'sankey'},
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def funnel(
self, data: Union[str, DataFrame, List[Dict]],
name: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
""""""
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[name, value]] # keep only x and y
df.rename(
columns={
name: 'name',
value: 'value',
},
inplace=True,
)
return self._create_trend_charts(
data=data, filters=filters,
**dict(
x=name, y=[value],
menu_path=menu_path, row=row, column=column,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='funnel',
)
)
def speed_gauge(
self, data: Union[str, DataFrame, List[Dict]],
name: str, value: str,
menu_path: str,
min: int, max: int,
row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
# subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
) -> str:
"""
option = {
series: [
{
type: 'gauge',
startAngle: 190,
endAngle: -10,
min: 0,
max: 80,
pointer: {
show: true
},
progress: {
show: true,
overlap: false,
roundCap: true,
clip: false,
itemStyle: {
borderWidth: 0,
borderColor: '#464646'
}
},
axisLine: {
lineStyle: {
width: 10
}
},
splitLine: {
show: true,
distance: 0,
length: 5
},
axisTick: {
show: true
},
axisLabel: {
show: true,
distance: 30
},
data: gaugeData,
title: {
fontSize: 14,
offsetCenter: ['0%', '30%'],
},
anchor: {
show: true,
showAbove: true,
size: 25,
itemStyle: {
borderWidth: 10
}
},
detail: {
bottom: 10,
width: 10,
height: 14,
fontSize: 14,
color: 'auto',
borderRadius: 20,
borderWidth: 0,
formatter: '{value}%',
offsetCenter: [0, '45%']
}
}
]
}
"""
self._validate_table_data(data, elements=[name, value])
df: DataFrame = self._validate_data_is_pandarable(data)
title: str = (
title if title
else f'{df["name"].to_list()[0]}: {df["value"].to_list()[0]}'
)
df = df[[name, value]] # keep only x and y
df.rename(
columns={
name: 'name',
value: 'value',
},
inplace=True,
)
data_fields: Dict = {
'type': 'gauge',
'optionModifications': {
'series': {
'startAngle': 190,
'endAngle': -10,
'min': min,
'max': max,
'pointer': {
'show': True
},
'progress': {
'show': True,
'overlap': False,
'roundCap': True,
'clip': False,
'itemStyle': {
'borderWidth': 0,
'borderColor': '#464646'
}
},
'axisLine': {
'lineStyle': {
'width': 10
}
},
'splitLine': {
'show': True,
'distance': 0,
'length': 5
},
'axisTick': {
'show': True,
},
'axisLabel': {
'show': True,
'distance': 30
},
'title': {
'show': False,
},
'anchor': {
'show': True,
'showAbove': True,
'size': 25,
'itemStyle': {
'borderWidth': 10
}
},
'detail': {
'show': False,
}
},
},
}
if option_modifications:
raise NotImplementedError
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': data_fields,
'title': title,
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def ring_gauge(
self, data: Union[str, DataFrame, List[Dict]],
name: str, value: str,
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
# subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
) -> str:
""""""
self._validate_table_data(data, elements=[name, value])
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[name, value]] # keep only x and y
df.rename(
columns={
name: 'name',
value: 'value',
},
inplace=True,
)
data_fields: Dict = {
'type': 'gauge',
}
if option_modifications:
data_fields['optionModifications'] = option_modifications
report_metadata: Dict = {
'reportType': 'ECHARTS',
'dataFields': data_fields,
'title': title,
}
if filters:
raise NotImplementedError
if row and column:
report_metadata['grid'] = f'{row}, {column}'
return self._create_chart(
data=data,
menu_path=menu_path,
order=order, rows_size=rows_size, cols_size=cols_size, padding=padding,
report_metadata=report_metadata,
)
def themeriver(
self, data: Union[str, DataFrame, List[Dict]],
x: str, y: str, name: str, # first layer
menu_path: str, row: Optional[int] = None, column: Optional[int] = None, # report creation
order: Optional[int] = None, rows_size: Optional[int] = None, cols_size: int = 12,
padding: Optional[List[int]] = None,
title: Optional[str] = None, # second layer
subtitle: Optional[str] = None,
x_axis_name: Optional[str] = None,
y_axis_name: Optional[str] = None,
option_modifications: Optional[Dict] = None, # third layer
filters: Optional[Dict] = None,
):
"""Create a barchart
"""
df: DataFrame = self._validate_data_is_pandarable(data)
df = df[[x, y, name]] # keep only x and y
df.rename(
columns={
name: 'name',
y: 'value',
},
inplace=True,
)
y = [y, name]
self._create_trend_chart(
data=df, x=x, y=y, menu_path=menu_path,
row=row, column=column,
title=title, subtitle=subtitle,
x_axis_name=x_axis_name,
y_axis_name=y_axis_name,
option_modifications=option_modifications,
echart_type='themeriver',
filters=filters,
)
def stacked_barchart(self):
raise NotImplementedError
| python |
import numpy as np
from pyscf.pbc import scf as pbchf
from pyscf.pbc import dft as pbcdft
import ase
import ase.lattice
import ase.dft.kpoints
def run_hf(cell, exxdiv=None):
"""Run a gamma-point Hartree-Fock calculation."""
mf = pbchf.RHF(cell, exxdiv=exxdiv)
mf.verbose = 7
print mf.scf()
return mf
def run_dft(cell):
"""Run a gamma-point DFT (LDA) calculation."""
mf = pbcdft.RKS(cell)
mf.xc = 'lda,vwn'
mf.verbose = 7
print mf.scf()
return mf
def run_khf(cell, nmp=[1,1,1], gamma=False, kshift=np.zeros(3), exxdiv=None):
"""Run a k-point-sampling Hartree-Fock calculation."""
scaled_kpts = ase.dft.kpoints.monkhorst_pack(nmp)
if gamma:
for i in range(3):
if nmp[i] % 2 == 0:
scaled_kpts[:,i] += 0.5/nmp[i]
# Move first kpt to the Gamma pt
scaled_kpts -= scaled_kpts[0,:]
# Shift by kshift
scaled_kpts += kshift
# Put back in BZ
print "Before shifting back"
print scaled_kpts
scaled_kpts -= 1.0*np.round(scaled_kpts/1.0)
print "After shifting back"
print scaled_kpts
abs_kpts = cell.get_abs_kpts(scaled_kpts)
kmf = pbchf.KRHF(cell, abs_kpts, exxdiv=exxdiv)
kmf.verbose = 7
print kmf.scf()
return kmf
def run_kdft(cell, nmp=[1,1,1], gamma=False):
"""Run a k-point-sampling DFT (LDA) calculation."""
scaled_kpts = ase.dft.kpoints.monkhorst_pack(nmp)
if gamma:
for i in range(3):
if nmp[i] % 2 == 0:
scaled_kpts[:,i] += 0.5/nmp[i]
abs_kpts = cell.get_abs_kpts(scaled_kpts)
kmf = pbcdft.KRKS(cell, abs_kpts)
kmf.xc = 'lda,vwn'
kmf.verbose = 7
print kmf.scf()
return kmf
if __name__ == '__main__':
from helpers import get_ase_diamond_primitive, build_cell
ase_atom = get_ase_diamond_primitive()
cell = build_cell(ase_atom)
run_hf(cell)
run_dft(cell)
run_khf(cell)
run_kdft(cell)
| python |
from django.core.management.base import BaseCommand, CommandError
from django.contrib.sites.models import Site
import urllib.request
import json
import pytz
import datetime
from django.utils import timezone
import html
import uuid
from time import sleep
from apps.pages.models import SuperintendentMessage
from apps.images.models import NewsThumbnail, ContentBanner
from apps.objects.models import User
import apps.common.functions
class Command(BaseCommand):
site = Site.objects.get(domain='www.slcschools.org')
host = 'https://www1.slcschools.org'
req = urllib.request.Request(host + '/rest/supermessages')
resp = urllib.request.urlopen(req,timeout=600)
supermessagejson = resp.read().decode('utf8')
supermessage = json.loads(supermessagejson)
webmaster = User.objects.get(username='[email protected]')
for article in supermessage:
sleep(2)
article_uuid = uuid.UUID(article['uuid'])
body = article['body']
summary = article['body_1']
author_date = timezone.datetime(int(article['created_1']),int(article['created_2']), int(article['created_3']), hour=int(article['created_4']), minute=int(article['created_5']), tzinfo=timezone.utc)
message, created = SuperintendentMessage.objects.get_or_create(uuid=article_uuid, defaults={'author_date':author_date,'deleted':0,'create_user':webmaster,'update_user':webmaster,'published':1,'url':'/tempnewsurl', 'site': site })
message.body=body
message.summary=summary
message.author_date=author_date
message.deleted=False
message.create_user=webmaster
message.update_user=webmaster
message.published=True
message.save()
print(message)
if article['field_article_image'] != '':
newsthumbimage, created = NewsThumbnail.objects.get_or_create(uuid=uuid.uuid5(message.uuid, article['field_article_image']), defaults={'related_node':message.page_node,'title':message.title + ' Thumbnail','deleted':0,'create_user':webmaster,'update_user':webmaster, 'published':1,'url':'/tempnewsthumburl','parent':message.page_node, 'site': site})
newsthumbimage.related_node = message.page_node
newsthumbimage.title = message.title + ' Thumbnail'
newsthumbimage.deleted = 0
newsthumbimage.create_user = webmaster
newsthumbimage.update_user = webmaster
newsthumbimage.published = 1
newsthumbimage.parent = message.page_node
newsthumbimage.alttext=article['field_article_image_2']
thumbreq = urllib.request.Request(article['field_article_image_1'])
thumbresp = urllib.request.urlopen(thumbreq,timeout=600)
imagedata = thumbresp
original_file, original_extension = apps.common.functions.findfileext_media(article['field_article_image_1'])
newsthumbimage.image_file.save(original_file + original_extension, imagedata)
newsthumbimage.save()
| python |
import os
import unittest
import sqlite3
import datetime
import pandas
from gtfspy.gtfs import GTFS
from gtfspy.filter import FilterExtract
from gtfspy.filter import remove_all_trips_fully_outside_buffer
from gtfspy.import_gtfs import import_gtfs
import hashlib
class TestGTFSFilter(unittest.TestCase):
def setUp(self):
self.gtfs_source_dir = os.path.join(os.path.dirname(__file__), "test_data")
self.gtfs_source_dir_filter_test = os.path.join(self.gtfs_source_dir, "filter_test_feed/")
# self.G = GTFS.from_directory_as_inmemory_db(self.gtfs_source_dir)
# some preparations:
self.fname = self.gtfs_source_dir + "/test_gtfs.sqlite"
self.fname_copy = self.gtfs_source_dir + "/test_gtfs_copy.sqlite"
self.fname_filter = self.gtfs_source_dir + "/test_gtfs_filter_test.sqlite"
self._remove_temporary_files()
self.assertFalse(os.path.exists(self.fname_copy))
conn = sqlite3.connect(self.fname)
import_gtfs(self.gtfs_source_dir, conn, preserve_connection=True, print_progress=False)
conn_filter = sqlite3.connect(self.fname_filter)
import_gtfs(self.gtfs_source_dir_filter_test, conn_filter, preserve_connection=True, print_progress=False)
self.G = GTFS(conn)
self.G_filter_test = GTFS(conn_filter)
self.hash_orig = hashlib.md5(open(self.fname, 'rb').read()).hexdigest()
def _remove_temporary_files(self):
for fn in [self.fname, self.fname_copy, self.fname_filter]:
if os.path.exists(fn) and os.path.isfile(fn):
os.remove(fn)
def tearDown(self):
self._remove_temporary_files()
def test_copy(self):
# do a simple copy
FilterExtract(self.G, self.fname_copy, update_metadata=False).create_filtered_copy()
# check that the copying has been properly performed:
hash_copy = hashlib.md5(open(self.fname_copy, 'rb').read()).hexdigest()
self.assertTrue(os.path.exists(self.fname_copy))
self.assertEqual(self.hash_orig, hash_copy)
def test_filter_change_metadata(self):
# A simple test that changing update_metadata to True, does update some stuff:
FilterExtract(self.G, self.fname_copy, update_metadata=True).create_filtered_copy()
# check that the copying has been properly performed:
hash_orig = hashlib.md5(open(self.fname, 'rb').read()).hexdigest()
hash_copy = hashlib.md5(open(self.fname_copy, 'rb').read()).hexdigest()
self.assertTrue(os.path.exists(self.fname_copy))
self.assertNotEqual(hash_orig, hash_copy)
os.remove(self.fname_copy)
def test_filter_by_agency(self):
FilterExtract(self.G, self.fname_copy, agency_ids_to_preserve=['DTA']).create_filtered_copy()
hash_copy = hashlib.md5(open(self.fname_copy, 'rb').read()).hexdigest()
self.assertNotEqual(self.hash_orig, hash_copy)
G_copy = GTFS(self.fname_copy)
agency_table = G_copy.get_table("agencies")
assert "EXA" not in agency_table['agency_id'].values, "EXA agency should not be preserved"
assert "DTA" in agency_table['agency_id'].values, "DTA agency should be preserved"
routes_table = G_copy.get_table("routes")
assert "EXR1" not in routes_table['route_id'].values, "EXR1 route_id should not be preserved"
assert "AB" in routes_table['route_id'].values, "AB route_id should be preserved"
trips_table = G_copy.get_table("trips")
assert "EXT1" not in trips_table['trip_id'].values, "EXR1 route_id should not be preserved"
assert "AB1" in trips_table['trip_id'].values, "AB1 route_id should be preserved"
calendar_table = G_copy.get_table("calendar")
assert "FULLW" in calendar_table['service_id'].values, "FULLW service_id should be preserved"
# stop_times
stop_times_table = G_copy.get_table("stop_times")
# 01:23:45 corresponds to 3600 + (32 * 60) + 45 [in day seconds]
assert 3600 + (32 * 60) + 45 not in stop_times_table['arr_time']
os.remove(self.fname_copy)
def test_filter_by_start_and_end_full_range(self):
# untested tables with filtering: stops, shapes
# test filtering by start and end time, copy full range
FilterExtract(self.G, self.fname_copy, start_date=u"2007-01-01", end_date=u"2011-01-01", update_metadata=False).create_filtered_copy()
G_copy = GTFS(self.fname_copy)
dsut_end = G_copy.get_day_start_ut("2010-12-31")
dsut_to_trip_I = G_copy.get_tripIs_within_range_by_dsut(dsut_end, dsut_end + 24 * 3600)
self.assertGreater(len(dsut_to_trip_I), 0)
os.remove(self.fname_copy)
def test_filter_end_date_not_included(self):
# the end date should not be included:
FilterExtract(self.G, self.fname_copy, start_date="2007-01-02", end_date="2010-12-31").create_filtered_copy()
hash_copy = hashlib.md5(open(self.fname_copy, 'rb').read()).hexdigest()
self.assertNotEqual(self.hash_orig, hash_copy)
G_copy = GTFS(self.fname_copy)
dsut_end = G_copy.get_day_start_ut("2010-12-31")
dsut_to_trip_I = G_copy.get_tripIs_within_range_by_dsut(dsut_end, dsut_end + 24 * 3600)
self.assertEqual(len(dsut_to_trip_I), 0)
calendar_copy = G_copy.get_table("calendar")
max_date_calendar = max([datetime.datetime.strptime(el, "%Y-%m-%d")
for el in calendar_copy["end_date"].values])
min_date_calendar = max([datetime.datetime.strptime(el, "%Y-%m-%d")
for el in calendar_copy["start_date"].values])
end_date_not_included = datetime.datetime.strptime("2010-12-31", "%Y-%m-%d")
start_date_not_included = datetime.datetime.strptime("2007-01-01", "%Y-%m-%d")
self.assertLess(max_date_calendar, end_date_not_included, msg="the last date should not be included in calendar")
self.assertLess(start_date_not_included, min_date_calendar)
os.remove(self.fname_copy)
def test_filter_spatially(self):
# test that the db is split by a given spatial boundary
FilterExtract(self.G, self.fname_copy, buffer_lat=36.914893, buffer_lon=-116.76821, buffer_distance_km=50).create_filtered_copy()
G_copy = GTFS(self.fname_copy)
stops_table = G_copy.get_table("stops")
self.assertNotIn("FUR_CREEK_RES", stops_table['stop_id'].values)
self.assertIn("AMV", stops_table['stop_id'].values)
self.assertEqual(len(stops_table['stop_id'].values), 8)
conn_copy = sqlite3.connect(self.fname_copy)
stop_ids_df = pandas.read_sql('SELECT stop_id from stop_times '
'left join stops '
'on stops.stop_I = stop_times.stop_I', conn_copy)
stop_ids = stop_ids_df["stop_id"].values
self.assertNotIn("FUR_CREEK_RES", stop_ids)
self.assertIn("AMV", stop_ids)
trips_table = G_copy.get_table("trips")
self.assertNotIn("BFC1", trips_table['trip_id'].values)
routes_table = G_copy.get_table("routes")
self.assertNotIn("BFC", routes_table['route_id'].values)
# cases:
# whole trip excluded
# whole route excluded
# whole agency excluded
# part of trip excluded
# part of route excluded
# part of agency excluded
# not removing stops from a trip that returns into area
# test higher-order removals
# stop A preserved
# -> stop B preserved
# -> stop C preserved
def test_filter_spatially_2(self):
n_rows_before = {
"routes": 4,
"stop_times": 14,
"trips": 4,
"stops": 6,
"shapes": 4
}
n_rows_after_1000 = { # within "soft buffer" in the feed data
"routes": 1,
"stop_times": 2,
"trips": 1,
"stops": 2,
"shapes": 0
}
n_rows_after_3000 = { # within "hard buffer" in the feed data
"routes": len(["t1", "t3", "t4"]),
"stop_times": 11,
"trips": 4,
"stops": len({"P", "H", "V", "L", "B"}),
# for some reason, the first "shapes": 4
}
paris_lat = 48.832781
paris_lon = 2.360734
SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL = \
"SELECT trips.trip_I, shape_id, min(shape_break) as min_shape_break, max(shape_break) as max_shape_break FROM trips, stop_times WHERE trips.trip_I=stop_times.trip_I GROUP BY trips.trip_I"
trip_min_max_shape_seqs = pandas.read_sql(SELECT_MIN_MAX_SHAPE_BREAKS_BY_TRIP_I_SQL, self.G_filter_test.conn)
for distance_km, n_rows_after in zip([1000, 3000], [n_rows_after_1000, n_rows_after_3000]):
try:
os.remove(self.fname_copy)
except FileNotFoundError:
pass
FilterExtract(self.G_filter_test,
self.fname_copy,
buffer_lat=paris_lat,
buffer_lon=paris_lon,
buffer_distance_km=distance_km).create_filtered_copy()
for table_name, n_rows in n_rows_before.items():
self.assertEqual(len(self.G_filter_test.get_table(table_name)), n_rows, "Row counts before differ in " + table_name + ", distance: " + str(distance_km))
G_copy = GTFS(self.fname_copy)
for table_name, n_rows in n_rows_after.items():
table = G_copy.get_table(table_name)
self.assertEqual(len(table), n_rows, "Row counts after differ in " + table_name + ", distance: " + str(distance_km) + "\n" + str(table))
# assert that stop_times are resequenced starting from one
counts = pandas.read_sql("SELECT count(*) FROM stop_times GROUP BY trip_I ORDER BY trip_I", G_copy.conn)
max_values = pandas.read_sql("SELECT max(seq) FROM stop_times GROUP BY trip_I ORDER BY trip_I", G_copy.conn)
self.assertTrue((counts.values == max_values.values).all())
def test_remove_all_trips_fully_outside_buffer(self):
stops = self.G.stops()
stop_1 = stops[stops['stop_I'] == 1]
n_trips_before = len(self.G.get_table("trips"))
remove_all_trips_fully_outside_buffer(self.G.conn, float(stop_1.lat), float(stop_1.lon), 100000)
self.assertEqual(len(self.G.get_table("trips")), n_trips_before)
# 0.002 (=max 2 meters from the stop), rounding errors can take place...
remove_all_trips_fully_outside_buffer(self.G.conn, float(stop_1.lat), float(stop_1.lon), 0.002)
self.assertEqual(len(self.G.get_table("trips")), 2) # value "2" comes from the data
| python |
#!/usr/bin/python3
#
# Elliptic Curve test code
#
# Copyright (c) 2018 Alexei A. Smekalkine <[email protected]>
#
# SPDX-License-Identifier: BSD-2-Clause
#
import time
import ec, ec_swj, ecdsa, ecgost
from field import Fp
count = 100
rounds = 0
test_ecdsa = True
test_ecgost = True
test_swj = True
o = ecgost.group ('ecgost-test-a')
d = 0x7A929ADE789BB9BE10ED359DD39A72C11B60961F49397EEE1D19CE9891EC3B28
e = 0x2DFBC1B372D89A1188C09C52E0EEC61FCE52032AB1022E8E67ECE6672B043EE5
k = 0x77105C9B20BCD3122823C8CF6FCC7B956DE33814E95B7FE64FED924594DCEAB3
def test (name, d, P):
start = time.clock ()
for i in range (count):
Q = d * P
delta = (time.clock () - start) / count * 1000
print ('{}: {:.2f} ms'.format (name, delta))
for i in range (rounds):
P = ec.Point (o.curve, o.x, o.y)
test ('swa ', d, P)
P = ec.SecurePoint (o.curve, o.x, o.y)
test ('swa-sec', d, P)
P = ec_swj.Point (o.curve, o.x, o.y)
test ('swj ', d, P)
P = ec_swj.SecurePoint (o.curve, o.x, o.y)
test ('swj-sec', d, P)
print ()
if test_ecdsa:
ecdsa.test ()
# GOST R 34.10-2012
if test_ecgost:
ecgost.test ()
if test_swj:
P = ec_swj.SecurePoint (o.curve, o.x, o.y)
Q = d * P
C = k * P
print ('P =', P)
print ('qP =', o.q * P)
print ('Q = dP =', Q)
print ('C = kP =', C)
| python |
from django.urls import path
from departamento.views import IndexView, DetalleDepartamentoView, Register, ExitoRegistro
from django.contrib.auth.decorators import login_required
app_name="departamento"
urlpatterns=[
path("",login_required(IndexView.as_view()),name="index_departamento"),
path("crear/",login_required(Register.as_view()),name="crear_departamento"),
path("editar/<pk>/",login_required(DetalleDepartamentoView.as_view()),name="editar_departamento")
]
| python |
import hashlib
# https://en.wikipedia.org/wiki/Linear_congruential_generator
class lcg(object):
def __init__(self, seed=1):
self.state = seed
def _random(self):
self.state = (self.state * 1103515245 + 12345) & 0x7FFFFFFF
return self.state
def random(self):
return self._random() / 2147483647. # 0x7FFFFFFF in decimal
def randint(self, a, b):
rng = self._random() % (b - a + 1)
return rng + a
def choice(self, seq):
return seq[int(self.random() * len(seq))] # raises IndexError if seq is empty
def shuffle(self, seq):
for i in reversed(range(1, len(seq))):
# pick an element in x[:i+1] with which to exchange x[i]
j = int(self.random() * (i+1))
seq[i], seq[j] = seq[j], seq[i]
def serialize(self):
return self.state
def deserialize(self, seed):
self.state = seed
class StaticRandom(object):
def __init__(self, seed=0):
self.set_seed(seed)
def set_seed(self, seed):
print('Setting Seed %d' % seed)
self.seed = seed
self.combat_random = lcg(seed)
self.growth_random = lcg(seed + 1)
self.other_random = lcg(seed + 2)
r = StaticRandom()
def strhash(s: str) -> int:
"""
Converts a string to a corresponding integer
"""
h = hashlib.md5(s.encode('utf-8'))
h = int(h.hexdigest(), base=16)
return h
def set_seed(seed):
r.set_seed(seed)
def get_combat():
return r.combat_random.randint(0, 99)
def get_growth():
return r.growth_random.randint(0, 99)
def get_levelup(u_id, lvl):
superseed = strhash(u_id) + lvl + r.seed
return lcg(superseed)
def get_combat_random_state():
return r.combat_random.state
def set_combat_random_state(state):
r.combat_random.state = state
def shuffle(lst):
r.other_random.shuffle(lst)
return lst
def get_other(a, b):
return r.other_random.randint(a, b)
def get_other_random_state():
return r.other_random.state
def set_other_random_state(state):
r.other_random.state = state
# === Returns the index of a weighted list
def weighted_choice(choices, generator=None):
if generator:
rn = generator.randint(0, sum(choices) - 1)
else:
rn = r.growth_random.randint(0, sum(choices) - 1)
upto = 0
for index, w in enumerate(choices):
upto += w
if upto > rn:
return index
assert False, "Shouldn't get here"
if __name__ == '__main__':
print(get_combat())
state = r.combat_random.serialize()
print(get_combat())
print(get_combat())
r.combat_random.deserialize(state)
print(get_combat())
print(get_combat())
l = [1, 2, 3, 4, 5, 6, 7]
print(l)
shuffle(l)
print(l)
l = [1, 2, 3, 4, 5, 6, 7]
print(shuffle(l))
l = [1, 2, 3, 4, 5, 6, 7]
print(shuffle(l))
l = [1, 2, 3, 4, 5, 6, 7]
print(shuffle(l))
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Aug 1 23:01:34 2017
@author: kt12
"""
# Reversed
""" If there is no __reversed__ function, Python will call __len__ and
__getitem__ which are used to define a sequence
"""
normal_list = [1,2,3,4,5]
class CustomSequence():
def __len__(self):
return 5
def __getitem__(self, index):
return "x{0}".format(index)
class FunkyBackwards():
def __reversed__(self):
return "BACKWARDS!"
for seq in normal_list, CustomSequence(), FunkyBackwards():
print("\n{}: ".format(seq.__class__.__name__), end="")
for item in reversed(seq):
print(item, end=", ")
# Enumerate
import sys
filename = sys.argv[1]
with open(filename) as file:
for index, line in enumerate(file):
print("{0}: {1}".format(index+1, line), end='')
# Context manager
""" __enter__ and __exit__ turn file object into a context manager
"""
class StringJoiner(list):
def __enter__(self):
return self
def __exit__(self, type, value, tb):
self.result = ''.join(self)
import random, string
with StringJoiner() as joiner:
for i in range(15):
joiner.append(random.choice(string.ascii_letters))
print(joiner.result)
# Default arguments
""" Default arguments are evaluated when the function is first interpreted,
not when it is called"""
number = 5
def funky_function(number=number):
print(number)
number = 6
funky_function(8)
funky_function()
print(number)
""" Default arguments are tricky with empty containers such as lists etc """
def hello(b=[]):
b.append('a')
print(b)
# Variable argument lists
def get_pages(*links):
for link in links:
# download the link with urllib
print(link)
# kwargs are frequently used in configuration setups
class Options:
default_options = {
'port': 21,
'host': 'localhost',
'username': None,
'password': None,
'debug': False,
}
def __init__(self, **kwargs):
self.options = dict(Options.default_options)
self.options.update(kwargs)
def __getitem__(self, key):
return self.options[key]
options = Options(username='dusty', pasword='drowssap', debug=True)
options['debug']
options['port']
options['username']
import shutil
import os.path
def augmented_move(target_folder, *filenames,
verbose=False, **specific):
"""Move all filenames into the target_folder, allowing
specific treatment of certain files."""
def print_verbose(message, filename):
"""print the mesage only if verbose is enabled"""
if verbose:
print(message.fomat(filename))
for filename in filenames:
target_path = os.path.join(target_folder, filename)
if filename in specific:
if specific[filename] == 'ignore':
print_verbose("Ignoring {0}", filename)
elif specific[filename] == 'copy':
print_verbose("Copying {0}", filename)
shutil.copyfile(filename, target_path)
else:
print_verbose("Moving {0}", filename)
shutil.move(filename, target_path)
# Unpacking arguments
def show_args(arg1, arg2, arg3="THREE"):
print(arg1, arg2, arg3)
some_args = range(3)
more_args = {
"arg1": "ONE",
"arg2": "TWO"}
print("Unpacking a sequence:", end=" ")
show_args(*some_args)
print("Unpacking a dict:", end=" ")
show_args(**more_args)
# Functions are objects too
def my_function():
print("The Function Was Called")
my_function.description = 'A silly function'
def second_function():
print("The second was called")
second_function.description = "A sillier function."
def another_function(function):
print("The description:", end=" ")
print(function.description)
print("The name:", end=" ")
print(function.__name__)
print("The class:", end=" ")
print(function.__class__)
print("Now I'll call the function passed in")
function()
another_function(my_function)
another_function(second_function)
# Event driven timer
import datetime
import time
class TimedEvent:
def __init__(self, endtime, callback):
self.endtime = endtime
self.callback = callback
def ready(self):
return self.endtime <= datetime.datetime.now()
class Timer:
def __init__(self):
self.events = []
def call_after(self, delay, callback):
end_time = datetime.datetime.now() + \
datetime.timedelta(seconds=delay)
self.events.append(TimedEvent(end_time, callback))
def run(self):
while True:
ready_events = (e for e in self.events if e.ready())
for event in ready_events:
event.callback(self)
self.events.remove(event)
time.sleep(0.5)
# Set of callbacks to test the timer
# Need to be in the correct directory to use timer.py
from timer import Timer
import datetime
def format_time(message, *args):
now = datetime.datetime.now().strftime("%I:%M:%S")
print(message.format(*args, now=now))
def one(timer):
format_time("{now}: Called One")
def two(timer):
format_time("{now}: Called Two")
def three(timer):
format_time("{now}: Called Three")
class Repeater:
def __init__(self):
self.count = 0
def repeater(self, timer):
format_time("{now}: repeat {0}", self.count)
self.count += 1
timer.call_after(5, self.repeater)
timer = Timer()
timer.call_after(1, one)
timer.call_after(2, one)
timer.call_after(2, two)
timer.call_after(4, two)
timer.call_after(3, three)
timer.call_after(6, three)
repeater = Repeater()
timer.call_after(5, repeater.repeater)
format_time("{now}: Starting")
timer.run()
# Using functions as attributes
# Make Repeater callable
""" Only implement the __call__ function on an object if the object is
meant to be treated like a function """
class Repeater:
def __init__(self):
self.count = 0
def __call__(self, timer):
format_time("{now}: repeat {0}", self.count)
self.count += 1
timer.call_after(5, self)
timer = Timer()
timer.call_after(5, Repeater())
format_time("{now}: Starting")
timer.run()
# Mailing list manager
import smtplib
from email.mime.text import MIMEText
def send_email(subject, message, from_addr, *to_addrs,
host='localhost', port=1025, headers=None):
headers = {} if headers is None else headers
email = MIMEText(message)
email['Subject'] = subject
email['From'] = from_addr
for header, value in headers.items():
email[header] = value
sender = smtplib.SMTP(host, port)
for addr in to_addrs:
del email['To']
email['To'] = addr
sender.sendmail(from_addr, addr, email.as_string())
sender.quit()
send_email("A model subject", "The message contents",
"[email protected]", "[email protected]", "[email protected]")
# Email gropu mgmt system
# Store email addresses in a set container
from collections import defaultdict
class MailingList:
'''Manage groups of email addresses for sending emails'''
def __init__(self, data_file):
self.email_map = defaultdict(set)
self.data_file = data_file
def add_to_group(self, email, group):
self.email_map[email].add(group)
def emails_in_group(self, *groups):
groups = set(groups)
emails = set()
for e, g in self.email_map.items():
if g & groups: # short for g.intersection(groups)
emalis.add(e)
return emails
def send_mailing(self, subject, message, from_addr,
*groups, headers=None):
emails = self.emails_in_group(*groups)
send_email(subject, message, from_addr,
*emails, headers=headers)
# Save emails to data
def save(self):
with open(self.data_file, 'w') as file:
for email, groups in self.email_map.items():
file.write(
'{} {}\n'.format(email, ','.join(groups))
)
def load(self):
self.email_map = defaultdict(set)
try:
with open(self.data_file) as file:
for line in file:
email, groups = line.strip().split(' ')
groups = set(groups.split(','))
self.email_map[email] = groups
except IOError:
pass
# Support context manager
def __enter__(self):
self.load()
return self
def __exit__(self):
self.save() | python |
def mkdir_p(path):
"""Make directories for the full path, like mkdir -p."""
import os
import errno
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
mkdir_p('my_dir/hi/test')
print('Run, check via: find my_dir')
| python |
#!/usr/bin/env python
"""
patser_annotate_genome.py
search all chromosomes in a genome sequence file for specified matrix. Annotate hits as rows in sqlite table
"""
import sys
import sqlite3
import fasta_subseq_2
import patser_tools
#from multiprocessing import Pool
#from pprint import pprint as pp
class searchObj(object):
def __init__(self,
chrObj=None,
seq_name = None,
matrix=None,
matrix_name=None,
annotation=None):
self.chrObj = chrObj
self.seq = None
self.seq_name = seq_name
if chrObj:
self.seq = chrObj['sequence'][0:(chrObj['sequence'].length - 1)]
self.matrix = matrix
self.matrix_name = matrix_name
self.annotation = annotation
print "created %s object" % chrObj['ID']
def patSearch(self):
annot=None
try:
annot = patser_tools.makePatserAnnotation(sequence=self.seq,matrix=self.matrix)
except Exception:
print "warning: Exception for seq %s" % (self.seq)
annot = None
self.annotation = annot
def search(s):
print "starting search %s..." % s.chrObj["ID"]
s.patSearch()
if s.annotation:
print "search complete: %s" % s.chrObj["ID"]
return s
else:
print "search failed: %s!" % s.chrObj["ID"]
return None
def _main(args):
if len(args) != 4:
print "usage: patser_annotate_genome_noxgrid.py <machi_db> <genome_seq> <matrix_file> <matrix_name>"
sys.exit(0)
#processes = int(args[4])
# setup database
conn = sqlite3.connect(args[0])
conn.row_factory = sqlite3.Row
cur = conn.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS matrix (matrix_key INTEGER PRIMARY KEY,
name TEXT,
file TEXT)""")
cur.execute("""CREATE TABLE IF NOT EXISTS patser_hit (patser_hit_key INTEGER PRIMARY KEY,
chr TEXT,
start INT,
end INT,
strand INT,
score FLOAT,
pval FLOAT,
matrix_key INT)""")
cur.execute("""SELECT * FROM matrix WHERE file = ?""",(args[2],))
matrix_exists = cur.fetchall()
mtx_id = None
if not matrix_exists:
cur.execute("""INSERT INTO matrix VALUES (NULL,?,?)""", (args[3],args[2]))
mtx_id = cur.lastrowid
else:
mtx_id = matrix_exists[0]["matrix_key"]
# open fasta
fasta = fasta_subseq_2.FastaDB()
fasta.openFastaFile(args[1])
jobs = []
for (name,chr) in fasta.items():
srch = searchObj(chrObj=chr,
seq_name = name,
matrix=args[2],
matrix_name=args[3])
print srch
jobs.append(srch)
print jobs
#pool = Pool(processes)
#results = pool.imap(search,jobs)
for j in jobs:
s = search(j)
print "inserting %s, %i tags" % (s.seq_name, len(s.annotation.getAllFeatures()))
for feature in s.annotation.getAllFeatures():
print >> sys.stderr, feature
cur.execute("INSERT INTO patser_hit VALUES (NULL,?,?,?,?,?,?,?)",
(s.seq_name,feature.start,feature.end,feature.tags["strand"],feature.tags["score"],feature.tags["pval"],mtx_id))
conn.commit()
conn.close()
if __name__ == "__main__":
_main(sys.argv[1:])
| python |
acesso=2502
while True:
senha=int(input('digite sua senha'))
if senha== acesso:
print('acesso permitido')
break
else:
print('acesso negado')
| python |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Settings/Master/Pokemon/CameraAttributes.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Settings/Master/Pokemon/CameraAttributes.proto',
package='POGOProtos.Settings.Master.Pokemon',
syntax='proto3',
serialized_pb=_b('\n9POGOProtos/Settings/Master/Pokemon/CameraAttributes.proto\x12\"POGOProtos.Settings.Master.Pokemon\"\x97\x01\n\x10\x43\x61meraAttributes\x12\x15\n\rdisk_radius_m\x18\x01 \x01(\x02\x12\x19\n\x11\x63ylinder_radius_m\x18\x02 \x01(\x02\x12\x19\n\x11\x63ylinder_height_m\x18\x03 \x01(\x02\x12\x19\n\x11\x63ylinder_ground_m\x18\x04 \x01(\x02\x12\x1b\n\x13shoulder_mode_scale\x18\x05 \x01(\x02\x62\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_CAMERAATTRIBUTES = _descriptor.Descriptor(
name='CameraAttributes',
full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='disk_radius_m', full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes.disk_radius_m', index=0,
number=1, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cylinder_radius_m', full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes.cylinder_radius_m', index=1,
number=2, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cylinder_height_m', full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes.cylinder_height_m', index=2,
number=3, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cylinder_ground_m', full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes.cylinder_ground_m', index=3,
number=4, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='shoulder_mode_scale', full_name='POGOProtos.Settings.Master.Pokemon.CameraAttributes.shoulder_mode_scale', index=4,
number=5, type=2, cpp_type=6, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=98,
serialized_end=249,
)
DESCRIPTOR.message_types_by_name['CameraAttributes'] = _CAMERAATTRIBUTES
CameraAttributes = _reflection.GeneratedProtocolMessageType('CameraAttributes', (_message.Message,), dict(
DESCRIPTOR = _CAMERAATTRIBUTES,
__module__ = 'POGOProtos.Settings.Master.Pokemon.CameraAttributes_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Settings.Master.Pokemon.CameraAttributes)
))
_sym_db.RegisterMessage(CameraAttributes)
# @@protoc_insertion_point(module_scope)
| python |
import os
import sys
import platform
import pandas as pd
import easygui as eg
mtst_programs = pd.read_csv('mtst_programs.csv')
current_os = platform.system()
# change default file open directory depending on operating system
if current_os == 'Windows':
default_dir = "C:\\Users\\%USERNAME%\\Documents\\{}*.xlsx"
elif current_os == 'Linux':
current_user = os.getlogin()
default_dir = "/home/"+current_user+"/Documents/{}*.xlsx"
else:
default_dir = "Documents/{}*.xlsx"
def program_select():
""" buttonbox for selecting whether you are creating applications for MTST or PHAS """
msg = """Select a graduate program..."""
title = 'Program Selection'
choices = ['MTST', 'PHAS', 'Cancel']
reply = eg.buttonbox(choices=choices, title=title, msg=msg)
if reply in ['MTST', 'PHAS']:
return reply
else: # if 'Cancel' is selected
raise RuntimeError
def get_spreadsheet(program: str) -> str:
""" Accepts the program name as a string and searches for the corresponding Excel workbook """
filetypes = [[".xls", ".xlsx", "Microsoft Excel workbooks"]]
if program == 'PHAS':
file = eg.fileopenbox(
default=default_dir.format(program),
filetypes=filetypes
)
if file:
return file
else: # if 'Cancel' is selected
raise RuntimeError
elif program == 'MTST':
# select which MTST subprograms we are creating discussions for
mult_choices = mtst_programs['Program Name'].to_list()
msg = 'Choose the MTST programs for which you would like to generate D2L Discussions:'
title = "Choose MTST Programs"
subprograms = eg.multchoicebox(msg=msg, title=title, choices=mult_choices)
def get_heading(df: pd.DataFrame, cols=None):
""" Accepts a pandas DataFrame and list of columns
and creates a new 'heading' column.
:df:
pandas DataFrame
:cols:
list of columns used to make the 'heading'
:returns: df with 'heading' column added
"""
# change missing entries to empty strings
df[cols] = df[cols].fillna('')
df['heading'] = df[cols].agg('-'.join, axis=1)
return df
def make_discussions(path: str, program_name: str) -> int:
""" Accepts a Microsoft Excel file path, builds pandas DataFrame with the 'heading' column,
and creates the corresponding XML/HTML files for the D2L Discussions.
"""
df = pd.read_excel(path)
if program_name == 'PHAS':
cols = ''
elif program_name == 'MTST':
cols = ''
df = get_heading(df, cols=cols)
print(df['heading'].head())
# do the XML/HTML creation here
return 0
def splash_box():
""" Uses a ccbox to proceed with the creation of Discussions for D2L applications """
msg = """ This tool will help you create the necessary XML/HTML files\n
needed for uploading graduate applications as Discussions in\n
D2L. To continue, you need to first select the graduate program\n
for which you will upload applications. """
title = "Simple D2L MTST/PHAS Application Upload Helper"
if eg.ccbox(msg, title): # show a Continue/Cancel dialog
# user chose Continue
grad_program = program_select()
file_path = get_spreadsheet(grad_program)
if file_path:
return file_path
else:
sys.exit(0)
else: # user chose Cancel
sys.exit(0)
def main_loop():
""" Create the GUI and guide the user through the creation of D2L Discussions """
while True:
try:
file_path = splash_box()
return file_path
except RuntimeError: # this happens when the user cancels the program_select dialog
continue
| python |