content
stringlengths
0
894k
type
stringclasses
2 values
import time to_year = time.strftime("%Y", time.localtime()) # 返回单一用户信息,传入的是user实例 def single_counselors_to_dict_with_user(item, year=to_year): lic = [] for i in item.counselors_workload_user: if i.year == year: lic.append( { 'id': item.id, 'username': item.username, 'department': item.department, 'workload': item.workload, 'name': item.name, 'workNumber': item.work_number, 'jobCatecory': item.job_catecory, 'teacherTitle': item.teacher_title, 'teacherTitleNum': item.teacher_title_num, 'teacherPostion': item.teacher_postion, 'teacherPostionNum': item.teacher_postion_num, 'postionStatus': item.postion_status, 'notes': i.notes, 'totalPeople': i.total_people, 'beyondWorkloadPeople': i.beyond_workload_people, 'months': i.months, 'counselorsBeyondWorkload': i.counselors_beyond_workload, 'counselorsBeyondWorkloadScore': i.counselors_beyond_workload_score, 'counselorsBeyondWorkloadMoney': i.counselors_beyond_workload_money, 'studentsMoney': i.students_money, 'totalMoney': i.total_money, 'cId': i.id, 'year': i.year } ) return lic # 返回单一用户更新信息 传入的是counselors实例 def single_counselors_to_dict(item): lic = [] lic.append( { 'id': item.user.id, 'username': item.user.username, 'department': item.user.department, 'workload': item.user.workload, 'name': item.user.name, 'workNumber': item.user.work_number, 'jobCatecory': item.user.job_catecory, 'teacherTitle': item.user.teacher_title, 'teacherTitleNum': item.user.teacher_title_num, 'teacherPostion': item.user.teacher_postion, 'teacherPostionNum': item.user.teacher_postion_num, 'postionStatus': item.user.postion_status, 'notes': item.notes, 'totalPeople': item.total_people, 'beyondWorkloadPeople': item.beyond_workload_people, 'months': item.months, 'counselorsBeyondWorkload': item.counselors_beyond_workload, 'counselorsBeyondWorkloadScore': item.counselors_beyond_workload_score, 'counselorsBeyondWorkloadMoney': item.counselors_beyond_workload_money, 'studentsMoney': item.students_money, 'totalMoney': item.total_money, 'cId': item.id, 'year': item.year } ) return lic def counselors_to_dict(item): lic = [] for i in item: lic.append( { 'id': i.user.id, 'username': i.user.username, 'department': i.user.department, 'workload': i.user.workload, 'name': i.user.name, 'workNumber': i.user.work_number, 'jobCatecory': i.user.job_catecory, 'teacherTitle': i.user.teacher_title, 'teacherTitleNum': i.user.teacher_title_num, 'teacherPostion': i.user.teacher_postion, 'teacherPostionNum': i.user.teacher_postion_num, 'postionStatus': i.user.postion_status, 'notes': i.notes, 'totalPeople': i.total_people, 'beyondWorkloadPeople': i.beyond_workload_people, 'months': i.months, 'counselorsBeyondWorkload': i.counselors_beyond_workload, 'counselorsBeyondWorkloadScore': i.counselors_beyond_workload_score, 'counselorsBeyondWorkloadMoney': i.counselors_beyond_workload_money, 'studentsMoney': i.students_money, 'totalMoney': i.total_money, 'cId': i.id, 'year': i.year } ) return lic
python
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from typing import List FPP_DIR = os.environ.get("FPP_DIR") or "~/.cache/fpp" PICKLE_FILE = ".pickle" SELECTION_PICKLE = ".selection.pickle" OUTPUT_FILE = ".fpp.sh" LOGGER_FILE = ".fpp.log" def assert_dir_created() -> None: path = os.path.expanduser(FPP_DIR) if os.path.isdir(path): return try: os.makedirs(path) except OSError: if not os.path.isdir(path): raise def get_pickle_file_path() -> str: assert_dir_created() return os.path.expanduser(os.path.join(FPP_DIR, PICKLE_FILE)) def get_selection_file_path() -> str: assert_dir_created() return os.path.expanduser(os.path.join(FPP_DIR, SELECTION_PICKLE)) def get_script_output_file_path() -> str: assert_dir_created() return os.path.expanduser(os.path.join(FPP_DIR, OUTPUT_FILE)) def get_logger_file_path() -> str: assert_dir_created() return os.path.expanduser(os.path.join(FPP_DIR, LOGGER_FILE)) def get_all_state_files() -> List[str]: # keep this update to date! We do not include # the script output path since that gets cleaned automatically return [ get_pickle_file_path(), get_selection_file_path(), get_logger_file_path(), get_script_output_file_path(), ]
python
import tkinter as tk if __name__ == '__main__': master = tk.Tk() dict_entries = { 'item 1': int, 'item 2': str, } master.title('Hello World!') i = 0 dict_tk_entry = {} for key, val in dict_entries.items(): tk.Label(master, text=str(key)).grid(row=i) dict_tk_entry[key] = tk.Entry(master) dict_tk_entry[key].grid(row=i, column=1) i += 1 # tk.Label(master, text="First").grid(row=0) # tk.Label(master, text="Second").grid(row=1) # e1 = tk.Entry(master) # e2 = tk.Entry(master) # # e1.grid(row=0, column=1) # e2.grid(row=1, column=1) master.mainloop()
python
import base64 import os import numpy import sys import traceback from database import Database import queue import threading import os from time import sleep import pymysql import json class MysqlDatabase(Database): def __init__(self, crypto, db_path='localhost', db_name='iotdatabase', db_password='abc123', db_user='dbuser', flush=False, dispatchsleep=0): Database.__init__(self, crypto, db_path=db_path, flush=flush) self.dispatchsleep = dispatchsleep self.db_name = db_name self.db_password = db_password self.db_user = db_user self.db = None self.dispatcher_db = None self.log_db = None self.insertion_queue = queue.Queue() self.dispatcher_thread = threading.Thread( target=self.dispatcher, args=()) self.dispatcher_thread.start() self.log_queue = queue.Queue() self.log_thread = threading.Thread(target=self.log_dispatcher, args=()) self.log_thread.start() def __del__(self): self.close_db_connection() def get_queue_data(self, q): # q.get(block=True) while 1: try: input_dict = q.get_nowait() return input_dict except queue.Empty: sleep(0.1) continue def close_db_connection(self, thread='main'): sleep(5+2*self.dispatchsleep) # wait for dispatchers to finish if thread == 'main': if not self.db is None: if self.db.open == 1: self.db.commit() self.db.close() self.db = None # can't manipulate these sub connections within the main thread if thread == 'log': if not self.log_db is None: if self.log_db.open == 1: self.log_db.commit() self.log_db.close() self.log_db = None # can't manipulate these sub connections within the main thread if thread == 'dispatcher': if not self.dispatcher_db is None: if self.dispatcher_db.open == 1: self.dispatcher_db.commit() self.dispatcher_db.close() self.dispatcher_db = None def open_db_connection(self): # idiom to be used for each database connection to use a new database connection each time # if not (self.db is None): # self.db.commit() # self.db.close() # self.db = None if (self.db is None): self.db = pymysql.connect(self.db_path, self.db_user, self.db_password, self.db_name, charset='utf8', use_unicode=True) self.init_database(self.db) if (self.dispatcher_db is None): self.dispatcher_db = pymysql.connect(self.db_path, self.db_user, self.db_password, self.db_name, charset='utf8', use_unicode=True) self.init_database(self.dispatcher_db) if (self.log_db is None): self.log_db = pymysql.connect(self.db_path, self.db_user, self.db_password, self.db_name, charset='utf8', use_unicode=True) self.init_database(self.log_db) return self.db def query(self, cursor, q, params=None, thread='main', executemany=False): done = False while not done: try: if not (params is None): if executemany == False: result = cursor.execute(q, params) else: result = cursor.executemany(q, params) else: result = cursor.execute(q) if thread == 'main': self.db.commit() elif thread == 'log': self.log_db.commit() elif thread == 'dispatcher': self.dispatcher_db.commit() done = True except: e = sys.exc_info()[0] print('*** Database error on query ' + \ str(q) + ' from thread ' + thread + ', retrying: %s' % e) traceback.print_exception(*(sys.exc_info())) if thread == 'main': self.db.close() self.db = None self.db = self.open_db_connection() cursor = self.db.cursor() elif thread == 'log': self.log_db.close() self.log_db = None self.open_db_connection() cursor = self.log_db.cursor() elif thread == 'dispatcher': self.dispatcher_db.close() self.dispatcher_db = None self.open_db_connection() cursor = self.dispatcher_db.cursor() return result def init_database(self, conn): if self.flush == True: self.flush_database(conn) # http://stackoverflow.com/questions/6202726/writing-utf-8-string-to-mysql-with-python c = conn.cursor() # or utf8 or any other charset you want to handle c.execute("SET NAMES utf8mb4;") c.execute("SET CHARACTER SET utf8mb4;") # same as above c.execute("SET character_set_connection=utf8mb4;") # same as above c.execute('''CREATE TABLE IF NOT EXISTS IOTD(id INTEGER PRIMARY KEY AUTO_INCREMENT, absolute_timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, relative_timestamp BIGINT, interrogator_timestamp BIGINT, freeform VARBINARY(64535))''') # absolute_timestamp was DATETIME for more recent mysql c.execute('''CREATE TABLE IF NOT EXISTS AUDIT(id INTEGER PRIMARY KEY AUTO_INCREMENT, absolute_timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP, log TEXT)''') # absolute_timestamp was DATETIME for more recent mysql conn.commit() def flush_database(self, conn): c = conn.cursor() c.execute('''DROP TABLE IF EXISTS IOTD''') conn.commit() self.db_log('DROP IOTD') def flush_audit(self, conn): c = conn.cursor() c.execute('''DROP TABLE AUDIT''') conn.commit() def db_log(self, text): row = (text, ) self.log_queue.put(row) # get max data time in the db def get_max_rel_time(self): conn = self.open_db_connection() c = conn.cursor() data = [] result = self.query(c, "SELECT MAX(relative_timestamp) FROM IOTD") for row in c: d = dict() d['max_relative_timestamp'] = row[0] data.append(d) c.close() return data def log_dispatcher(self): self.open_db_connection() conn = self.log_db while 1: print('Getting data from log dispatcher...') row = self.get_queue_data(self.log_queue) print('Data received from log dispatcher...') c = conn.cursor() done = False while not done: try: c.execute('INSERT INTO AUDIT (log) VALUES (%s)', row) # conn.commit() # don't bother committing here, let the main database thread commit done = True except: e = sys.exc_info()[0] print('*** Database error on audit insertion, retrying: %s' % e) traceback.print_exception(*(sys.exc_info())) self.log_db.close() self.log_db = None self.open_db_connection() c = self.log_db.cursor() c.close() sleep(1) self.close_db_connection(thread='log') def db_encrypt(self, s, counter): # counter = int(counter) % 10^16 # counter must be at most 16 digits counter = int(str(counter)[-self.crypto.MAX_COUNTER_DIGITS:]) # counter must be at most 16 digits, take rightmost 16 characters if type(s) is int: val = str(s) elif type(s) is float: val = str(s) else: val = s aes = self.crypto.get_db_aes(self.db_password, counter) padded = self.crypto.pad(val) enc = aes.encrypt(padded) b64enc = base64.b64encode(enc) return b64enc def db_decrypt(self, s, counter): # counter = int(counter) % 10^16 # counter must be at most 16 digits counter = int(str(counter)[-self.crypto.MAX_COUNTER_DIGITS:]) # counter must be at most 16 digits, take rightmost 16 characters aes = self.crypto.get_db_aes(self.db_password, counter) b64dec = base64.b64decode(s) dec = aes.decrypt(b64dec) unpaddec = self.crypto.unpad(dec) unpaddec = unpaddec.decode() return unpaddec # dispatch insertions from the queue so that the webserver can continue receiving requests # log each request to the Audit def dispatcher(self): self.open_db_connection() conn = self.dispatcher_db while 1: queuelist = [] print('Getting data from dispatcher...') input_dict = self.get_queue_data(self.insertion_queue) print('Data received from dispatcher...') queuelist.append(input_dict) #print input_dict # http://stackoverflow.com/questions/156360/get-all-items-from-thread-queue # while we're here, try to pick up any more items that were inserted into the queue while 1: try: input_dict = self.insertion_queue.get_nowait() queuelist.append(input_dict) except queue.Empty: break c = conn.cursor() rowlist = [] for input_dict in queuelist: # the additional interrogatortime entries are for the encryption function which requires a counter to synchronize stream encryption and decryption; this time should be to the microsecond (6 places after the decimal for seconds) to ensure uniqueness, but can be less precise if the interrogator resolution is lower. relative_time is expected in microseconds, and both relativetime and interrogatortime are assumed to be whole numbers (i.e. epoch time) relativetime = input_dict['relativetime'] interrogatortime = input_dict['interrogatortime'] freeform = input_dict['freeform'] freeformjson = json.dumps(freeform) db_pw = input_dict['db_pw'] self.db_password = db_pw row = (relativetime, interrogatortime, self.db_encrypt(freeformjson, interrogatortime)) rowlist.append(row) result = self.query(c, 'INSERT INTO IOTD (relative_timestamp, interrogator_timestamp, freeform) VALUES (%s,%s,%s)', rowlist, thread='dispatcher', executemany=True) c.close() conn.commit() if self.dispatchsleep > 0: # if desired, sleep the dispatcher for a short time to queue up some inserts and give the producer some CPU time sleep(self.dispatchsleep) self.close_db_connection(thread='dispatcher') # just insert into a queue for the dispatcher to insert in the background def insert_row(self, relativetime, interrogatortime, freeform, db_pw=''): input_dict = dict() # read by the consumer dispatcher input_dict['relativetime'] = relativetime input_dict['interrogatortime'] = interrogatortime input_dict['freeform'] = freeform input_dict['db_pw'] = db_pw self.insertion_queue.put(input_dict) # log this request to the Audit def fetch_all(self, db_pw=''): self.db_password = db_pw data = [] conn = self.open_db_connection() c = conn.cursor() result = self.query(c, "SELECT id, absolute_timestamp, relative_timestamp, interrogator_timestamp, freeform FROM IOTD ORDER BY interrogator_timestamp ASC") for row in c: self.db_log('FETCH ' + str(row[0])) d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['relative_timestamp'] = row[2] d['interrogator_timestamp'] = row[3] d['freeform'] = self.db_decrypt(row[4], row[3]) data.append(d) c.close() return data # log this request to the Audit def fetch_last_window(self, windowsize, db_pw=''): self.db_password = db_pw data = [] conn = self.open_db_connection() c = conn.cursor() input = (windowsize, ) result = self.query(c, "SELECT id, absolute_timestamp, relative_timestamp, freeform FROM IOTD ORDER BY interrogator_timestamp DESC LIMIT %s", input) for row in c: self.db_log('FETCH ' + str(row[0])) d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['relative_timestamp'] = row[2] d['interrogator_timestamp'] = row[3] d['freeform'] = self.db_decrypt(row[4], row[3]) data.append(d) c.close() return data # log this request to the Audit def fetch_since(self, since, db_pw=''): self.db_password = db_pw data = [] conn = self.open_db_connection() c = conn.cursor() input = (since,) result = self.query(c, "SELECT id, absolute_timestamp, relative_timestamp, interrogator_timestamp, freeform FROM IOTD WHERE relative_timestamp >= %s ORDER BY interrogator_timestamp ASC", input) for row in c: self.db_log('FETCH ' + str(row[0])) d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['relative_timestamp'] = row[2] d['interrogator_timestamp'] = row[3] d['freeform'] = self.db_decrypt(row[4], row[3]) data.append(d) c.close() return data # log this request to the Audit def fetch_between_window(self, start, end, db_pw=''): self.db_password = db_pw data = [] conn = self.open_db_connection() c = conn.cursor() input = (start, end) result = self.query(c, "SELECT id, absolute_timestamp, relative_timestamp, interrogator_timestamp, freeform FROM IOTD WHERE relative_timestamp >= %s AND relative_timestamp <= %s ORDER BY interrogator_timestamp ASC", input) for row in c: self.db_log('FETCH ' + str(row[0])) d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['relative_timestamp'] = row[2] d['interrogator_timestamp'] = row[3] d['freeform'] = self.db_decrypt(row[4], row[3]) data.append(d) c.close() return data # log this request to the Audit def fetch_last_n_sec(self, n, db_pw=''): self.db_password = db_pw data = [] conn = self.open_db_connection() c = conn.cursor() self.query(c, "SELECT id, absolute_timestamp, relative_timestamp, interrogator_timestamp, freeform FROM IOTD WHERE absolute_timestamp >= NOW() - INTERVAL %s SECOND ORDER BY interrogator_timestamp ASC", (n,)) for row in c: self.db_log('FETCH ' + str(row[0])) d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['relative_timestamp'] = row[2] d['interrogator_timestamp'] = row[3] d['freeform'] = self.db_decrypt(row[4], row[3]) data.append(d) c.close() return data def get_audit(self): data = [] conn = self.open_db_connection() c = conn.cursor() self.db_log('FETCH AUDIT') result = self.query(c, "SELECT id, absolute_timestamp, log FROM AUDIT") for row in c: d = dict() d['id'] = row[0] d['absolute_timestamp'] = row[1] d['log'] = row[2] data.append(d) c.close() return data # References: # http://mysql-python.sourceforge.net/MySQLdb.html#mysqldb
python
import unittest import asyncio from pathlib import Path import glob import datetime import os #Third Party Imports from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Column, Integer, String, create_engine from sqlalchemy.orm import scoped_session, sessionmaker import pandas as pd #Local Imports from services.workfront import WorkfrontAPI from services.settings import ENVSettings, DatabaseConfig, Defaults from services.db_util import DBU class TestWorkfrontInterface(unittest.TestCase): def setUp(self): self._helper = DBU() self.files_processed = False self.init_db_dir_path = "" self.database = "" self.engine = "" self.Base = declarative_base() self.metadata = self.Base.metadata self.session = scoped_session(sessionmaker()) def tearDown(self): if self.database: os.remove(self.database) def test_set_up_database_no_sqlite_connection_string(self): #Import database settings self.database_settings = DatabaseConfig() #Check if user passed a connection string if not self.database_settings.sqlite_connection_string: if os.name == 'nt': self.init_db_dir_path = os.path.join(os.environ['temp'], 'workfront') else: self.init_db_dir_path = os.path.join(os.environ['TMPDIR'], 'workfront') if not os.path.exists(self.init_db_dir_path ): os.makedirs(self.init_db_dir_path ) self.database = os.path.join(self.init_db_dir_path , 'test.db') self.engine = create_engine(f"sqlite:///{self.database}", echo=False) #Create a workfront temp dir for the database self.session.remove() self.session.configure(bind=self.engine, autoflush=False, expire_on_commit=False) self.metadata.drop_all(self.engine) self.metadata.create_all(self.engine) self.assertTrue(os.path.isfile(self.database)) def test_settings(self): filter = Defaults("hour").obj_filter self.assertIsNotNone(filter) hour_options = { "entryDate": "2019-07-01", "entryDate_Mod": "between", "entryDate_Range": "$$TODAYb" } self.assertEqual(filter, hour_options) def test_hours_save(self): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) #Accepts parameters for fields and filter option for all data hour_api = WorkfrontAPI(objCode= 'hour') hours = hour_api.return_all() self.assertEqual(len(hours) , hour_api.count_of_objects) if __name__ == '__main__': unittest.main()
python
from threading import Timer class Delayed(object): """ Does a delayed Lua function call """ def __init__(self, seconds, lua_function, lua, start=True): """ :param seconds: Number of seconds to wait :param lua_function: The Lua function to execute :param lua: The Lua runtime to execute in :param start: Autostart the timer? :return: """ self.seconds = seconds self.lua_function = lua_function self.lua = lua self.timer = None if start: self.start() def start(self): """ Start the timer :return: """ self.timer = Timer(self.seconds, self._timer_callback) self.timer.start() def cancel(self): """ Stop/cancel the timer :return: """ self.timer.cancel() def _timer_callback(self): """ Called when the time has elapsed, calls the Lua function :return: """ call_lua = self.lua.eval(""" function (func) func() end """) call_lua(self.lua_function) class Interval(Delayed): """ Periodically call a Lua function """ def _timer_callback(self): """ Called when the time has elapsed, calls the Lua function, reschedules timer :return: """ try: super(Interval, self)._timer_callback() finally: self.start()
python
import argparse import chainer import chainer.functions as F import chainer.links as L from chainer import training from chainer.training import extensions from einconv import Einconv NUM_CLASSES = 10 # Network definition class MLP(chainer.Chain): def __init__(self, graph, shapes, initializer=None): super(MLP, self).__init__() self.image_size = shapes['image'] self.shapes = shapes with self.init_scope(): _shapes = shapes.copy() _shapes['inch'] = shapes['channels'][0] _shapes['outch'] = shapes['channels'][1] self.l1 = Einconv(graph, _shapes, initializer) _shapes = shapes.copy() _shapes['inch'] = shapes['channels'][1] _shapes['outch'] = shapes['channels'][2] _shapes['image'] = [n // 2 for n in shapes['image']] self.l2 = Einconv(graph, _shapes, initializer) self.l3 = L.Linear(None, shapes['channels'][-1], initializer) # n_units -> n_out def forward(self, x): shape = [-1] + [1] + self.image_size x = F.reshape(x, shape) h1 = self.l1(x) h1 = F.max_pooling_2d(h1, ksize=2, stride=2) h2 = self.l2(h1) h2 = F.max_pooling_2d(h2, ksize=2, stride=2) h3 = self.l3(h2) return h3 def main(): parser = argparse.ArgumentParser() parser.add_argument('--batchsize', '-b', type=int, default=128, help='Number of images in each mini-batch') parser.add_argument('--epoch', '-e', type=int, default=20, help='Number of sweeps over the dataset to train') parser.add_argument('--gpu', '-g', type=int, default=-1, help='GPU ID (negative value indicates CPU)') parser.add_argument('--out', '-o', default='result', help='Directory to output the result') parser.add_argument('--channels', nargs='+', help='Numbers of input/output channels', type=int, default=[64, 128]) parser.add_argument('--filter_size', type=int, default=3) parser.add_argument('--weight_decay', type=float, default=1e-6) parser.add_argument('--graph', default='1_2_1_1_1_1_1') args = parser.parse_args() print(args) print('GPU: {}'.format(args.gpu)) print('# Minibatch-size: {}'.format(args.batchsize)) print('# epoch: {}'.format(args.epoch)) print('') # Set up a neural network to train # Classifier reports softmax cross entropy loss and accuracy at every # iteration, which will be used by the PrintReport extension below. shapes = {'channels': [1] + args.channels + [NUM_CLASSES], 'image': [28, 28], 'filter': [args.filter_size] * 2, 'inner_exp': 1, 'batch': -1} model = L.Classifier(MLP(args.graph, shapes)) if args.gpu >= 0: # Make a specified GPU current chainer.backends.cuda.get_device_from_id(args.gpu).use() model.to_gpu() # Copy the model to the GPU # Setup an optimizer optimizer = chainer.optimizers.Adam(weight_decay_rate=args.weight_decay) optimizer.setup(model) # Load the Fashion-MNIST dataset train, test = chainer.datasets.get_fashion_mnist() train_iter = chainer.iterators.SerialIterator(train, args.batchsize) test_iter = chainer.iterators.SerialIterator(test, args.batchsize, repeat=False, shuffle=False) # Set up a trainer updater = training.updaters.StandardUpdater( train_iter, optimizer, device=args.gpu) trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out) # Evaluate the model with the test dataset for each epoch trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu)) # Write a log of evaluation statistics for each epoch trainer.extend(extensions.LogReport()) # Print selected entries of the log to stdout # Here "main" refers to the target link of the "main" optimizer again, and # "validation" refers to the default name of the Evaluator extension. # Entries other than 'epoch' are reported by the Classifier link, called by # either the updater or the evaluator. trainer.extend(extensions.PrintReport( ['epoch', 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'])) # Run the training trainer.run() if __name__ == '__main__': main()
python
""" The :mod:`sklearn.preprocessing` module includes scaling, centering, normalization, binarization methods. """ from ._function_transformer import FunctionTransformer from ._data import Binarizer from ._data import KernelCenterer from ._data import MinMaxScaler from ._data import MaxAbsScaler from ._data import Normalizer from ._data import RobustScaler from ._data import StandardScaler from ._data import QuantileTransformer from ._data import add_dummy_feature from ._data import binarize from ._data import normalize from ._data import scale from ._data import robust_scale from ._data import maxabs_scale from ._data import minmax_scale from ._data import quantile_transform from ._data import power_transform from ._data import PowerTransformer from ._data import PolynomialFeatures from ._encoders import OneHotEncoder from ._encoders import OrdinalEncoder from ._label import label_binarize from ._label import LabelBinarizer from ._label import LabelEncoder from ._label import MultiLabelBinarizer from ._discretization import KBinsDiscretizer __all__ = [ 'Binarizer', 'FunctionTransformer', 'KBinsDiscretizer', 'KernelCenterer', 'LabelBinarizer', 'LabelEncoder', 'MultiLabelBinarizer', 'MinMaxScaler', 'MaxAbsScaler', 'QuantileTransformer', 'Normalizer', 'OneHotEncoder', 'OrdinalEncoder', 'PowerTransformer', 'RobustScaler', 'StandardScaler', 'add_dummy_feature', 'PolynomialFeatures', 'binarize', 'normalize', 'scale', 'robust_scale', 'maxabs_scale', 'minmax_scale', 'label_binarize', 'quantile_transform', 'power_transform', ]
python
import Foundation import objc from PyObjCTools.TestSupport import TestCase, min_os_level class TestNSRegularExpression(TestCase): @min_os_level("10.7") def testConstants10_7(self): self.assertEqual(Foundation.NSRegularExpressionCaseInsensitive, 1 << 0) self.assertEqual( Foundation.NSRegularExpressionAllowCommentsAndWhitespace, 1 << 1 ) self.assertEqual(Foundation.NSRegularExpressionIgnoreMetacharacters, 1 << 2) self.assertEqual(Foundation.NSRegularExpressionDotMatchesLineSeparators, 1 << 3) self.assertEqual(Foundation.NSRegularExpressionAnchorsMatchLines, 1 << 4) self.assertEqual(Foundation.NSRegularExpressionUseUnixLineSeparators, 1 << 5) self.assertEqual(Foundation.NSRegularExpressionUseUnicodeWordBoundaries, 1 << 6) self.assertEqual(Foundation.NSMatchingReportProgress, 1 << 0) self.assertEqual(Foundation.NSMatchingReportCompletion, 1 << 1) self.assertEqual(Foundation.NSMatchingAnchored, 1 << 2) self.assertEqual(Foundation.NSMatchingWithTransparentBounds, 1 << 3) self.assertEqual(Foundation.NSMatchingWithoutAnchoringBounds, 1 << 4) self.assertEqual(Foundation.NSMatchingProgress, 1 << 0) self.assertEqual(Foundation.NSMatchingCompleted, 1 << 1) self.assertEqual(Foundation.NSMatchingHitEnd, 1 << 2) self.assertEqual(Foundation.NSMatchingRequiredEnd, 1 << 3) self.assertEqual(Foundation.NSMatchingInternalError, 1 << 4) @min_os_level("10.7") def testMethods10_7(self): self.assertArgIsOut( Foundation.NSRegularExpression.regularExpressionWithPattern_options_error_, 2, ) self.assertArgIsOut( Foundation.NSRegularExpression.initWithPattern_options_error_, 2 ) self.assertArgIsBlock( Foundation.NSRegularExpression.enumerateMatchesInString_options_range_usingBlock_, 3, b"v@" + objc._C_NSUInteger + b"o^" + objc._C_NSBOOL, ) self.assertArgIsOut(Foundation.NSDataDetector.dataDetectorWithTypes_error_, 1) self.assertArgIsOut(Foundation.NSDataDetector.initWithTypes_error_, 1)
python
from .dispatcher import CommandDispatcher, CommandDispatchError, UnknownCommandError from .command import Command from .call_match import CallMatch, CallMatchFail from .call_matcher import CallMatcher from .command import TooManyArguments from .syntax_tree.literal import MissingLiteral, MismatchedLiteral, MismatchedLiteralSuggestion from .syntax_tree.param import MissingParameter, MismatchedParameterType from .syntax_tree.tail import MissingTail from .syntax_tree.unordered_group import MissingUnorderedGroup from .syntax_tree.variant_group import MissingVariant __all__ = [ 'CommandDispatcher', 'CommandDispatchError', 'UnknownCommandError', 'CallMatch', 'CallMatcher', 'CallMatchFail', 'Command', 'TooManyArguments', 'MissingLiteral', 'MismatchedLiteral', 'MismatchedLiteralSuggestion', 'MissingParameter', 'MismatchedParameterType', 'MissingTail', 'MissingUnorderedGroup', 'MissingVariant', ]
python
import json from django.test.utils import override_settings from hc.api.models import Channel from hc.test import BaseTestCase from mock import patch @override_settings(ZENDESK_CLIENT_ID="t1", ZENDESK_CLIENT_SECRET="s1") class AddZendeskTestCase(BaseTestCase): url = "/integrations/add_zendesk/" def test_instructions_work(self): self.client.login(username="[email protected]", password="password") r = self.client.get(self.url) self.assertContains(r, "Connect Zendesk Support", status_code=200) def test_post_works(self): self.client.login(username="[email protected]", password="password") r = self.client.post(self.url, {"subdomain": "foo"}) self.assertEqual(r.status_code, 302) self.assertTrue("foo.zendesk.com" in r["Location"]) # There should now be a key in session self.assertTrue("zendesk" in self.client.session) @override_settings(ZENDESK_CLIENT_ID=None) def test_it_requires_client_id(self): self.client.login(username="[email protected]", password="password") r = self.client.get(self.url) self.assertEqual(r.status_code, 404) @patch("hc.front.views.requests.post") def test_it_handles_oauth_response(self, mock_post): session = self.client.session session["zendesk"] = "foo" session["subdomain"] = "foodomain" session.save() oauth_response = {"access_token": "test-token"} mock_post.return_value.text = json.dumps(oauth_response) mock_post.return_value.json.return_value = oauth_response url = self.url + "?code=12345678&state=foo" self.client.login(username="[email protected]", password="password") r = self.client.get(url, follow=True) self.assertRedirects(r, "/integrations/") self.assertContains(r, "The Zendesk integration has been added!") ch = Channel.objects.get() self.assertEqual(ch.zendesk_token, "test-token") self.assertEqual(ch.zendesk_subdomain, "foodomain") # Session should now be clean self.assertFalse("zendesk" in self.client.session) self.assertFalse("subdomain" in self.client.session) def test_it_avoids_csrf(self): session = self.client.session session["zendesk"] = "foo" session.save() url = self.url + "?code=12345678&state=bar" self.client.login(username="[email protected]", password="password") r = self.client.get(url) self.assertEqual(r.status_code, 400)
python
import argparse from utils import load_data, init_model, train_model, save_model, is_cuda_available def str2bool(v): if isinstance(v, bool): return v if v.lower() in ('yes', 'true', 't', 'y', '1'): return True elif v.lower() in ('no', 'false', 'f', 'n', '0'): return False else: raise argparse.ArgumentTypeError('Boolean value expected.') parser = argparse.ArgumentParser(description='Train a neural network to identify flowers.') parser.add_argument("data_dir", help="directory of the data", default="") parser.add_argument("--save_dir", help="directory where the checkpoints will be saved", default="models_checkpoints") parser.add_argument("--arch", help="architecture of the pre-trained network", choices=['resnet18', 'alexnet', 'vgg16', 'squeezenet1_0', 'densenet121', 'inception_v3', 'googlenet', 'shufflenet_v2_x1_0', 'mobilenet_v2', 'resnext50_32x4d'], default='densenet121') parser.add_argument("--learning_rate", type=float, help="learning rate", default=0.003) parser.add_argument("--drop_rate", type=float, help="drop rate", default=0.2) parser.add_argument("--hidden_units", type=int, help="hidden units for each hidden layer", nargs='+') parser.add_argument("--epochs", type=int, help="epochs", default=5) parser.add_argument("--gpu", type=str2bool, nargs='?', const=True, default=False, help="use cuda instead of cpu") # additional parameters parser.add_argument("--batch_size", type=int, help="batch size for the data loader", default=32) args = parser.parse_args() data_dir = "./" + args.data_dir save_dir = "./" + args.save_dir device = 'cuda' if (args.gpu == True and is_cuda_available() == True) else 'cpu' input_size = 1024 output_size = 102 dataloaders, image_datasets = load_data(data_dir, args.batch_size) model, optimizer, criterion = init_model(args.arch, input_size, output_size, args.hidden_units, args.drop_rate, args.learning_rate) model, optimizer, criterion, steps = train_model(dataloaders, image_datasets, model, optimizer, criterion, device, args.epochs) save_model(model, save_dir, optimizer, input_size, output_size, args.arch, args.drop_rate, args.epochs)
python
import sys, os, traceback, itertools from os import walk import json import subprocess32 as subprocess from shutil import copyfile import hashlib as h from common import * import uuid MAX_TASKS_PER_PROBLEM = 20 TOTAL_SMT = 0 TOTAL_PROBLEM = 0 TOTAL_TASK = 0 TOTAL_NO_TASK = 0 from haikunator import Haikunator haikunator = Haikunator() """ Check if the conjunction of the set of clauses is already unsat. If so, we have to drop the task because the user would never find a solution. """ def check_solvable(task): lines = list() lines.append("\\predicates {") #TODO IsOdd(int, int); for pred in task[predicate_key]: #conj with & type_sig="" comma = "" for arg in pred["args"]: type_sig+=comma comma = ", " type_sig+="int" lines.append(" {}({});".format(pred["name"], type_sig)) lines.append("}") lines.append("\\problem {") conj = "" for clause in task[clauses_key]: lines.append(conj + clause) conj = "& " lines.append("-> false") # \forall int v0; \forall int v1; (v1 >= 2 | -1 >= v1 | 0 >= v0 | IsOdd(1 + v0, v1)) lines.append("}") tmp_file = "_tmp.pri" with open(tmp_file, "w") as princess_file: princess_file.write("\n".join(lines)) output = run_cmd([princess_command, "-timeout=5000", "-clausifier=simple", tmp_file]) if output and 'output' in output: for line in output['output'].splitlines(): if line.rstrip() == "INVALID": log.debug("Task is UNSAT (which is good)") return True elif line.rstrip() == "VALID": log.debug("Task is SAT") return False elif line.startswith("ERROR:"): log.debug("Check failed: {}".format(line)) return False elif line.startswith("CANCELLED/TIMEOUT"): log.debug("Timeout") return True # return True log.debug("Check failed: \n{}".format(output['output'])) return False def parse_output(output_string): problem = dict() current_clause_key = "" buffer = "" for line in output_string.splitlines(): if "sat" == line : problem["solved"] = True return problem elif line == "unsat": problem["solved"] = True return problem if line=="**All Clauses": buffer="" current_clause_key = clauses_key problem[current_clause_key] = list() elif line=="---": if current_clause_key!="": if buffer.rstrip() != "true": problem[current_clause_key].append(buffer) buffer = "" elif line=="**End Clauses": buffer = "" elif line=="**Instantiated Clauses with Violating Predicates:": buffer="" current_clause_key = instantiated_clause_key problem[instantiated_clause_key] = list() elif line=="**VIOLATED CLAUSES:": buffer="" current_clause_key = violated_clause_key problem[current_clause_key] = list() elif line=="**Other Clauses with Violating Predicates:": buffer="" current_clause_key = "" #problem[violated_clause_key] = list() elif line=="End violated": #problem[violated_clause_key].append(buffer) buffer = "" elif line=="**Violating Predicates:": buffer="" problem[predicate_key] = "" elif line=="**End Predicates": problem[predicate_key] = buffer buffer = "" elif line=="psat": buffer="" else: if len(buffer)>0: buffer+="\n" buffer+=line return problem def parse_predicate_keys(pred_keys): parsed_keys = list() i=0 lastKey = "" for line in pred_keys.splitlines(): parsed_key = dict() if i%2 == 0: lastKey = line else: parsed_key = dict() parsed_key["name"] = lastKey[:lastKey.index('(')] param_string = lastKey[lastKey.index('(')+1:lastKey.index(')')] parsed_key["args"] = [x.strip() for x in param_string.split(',')] parsed_key["assignment"] = line parsed_keys.append(parsed_key) i+=1 return parsed_keys def create_tasks(eldarica_output, problem): tasks = list() relevant_clauses = list() #first add all violated clauses #TODO temporarily disabled # relevant_clauses += eldarica_output[violated_clause_key] #then add each power set of the remaining instantiated clauses counter = 0 subset = eldarica_output[instantiated_clause_key] #for subset in powerset(eldarica_output[instantiated_clause_key]): # if len(subset)==0: # continue # if counter>MAX_TASKS_PER_PROBLEM: # break task = dict() task[problem_id_key] = problem[problem_id_key] task[predicate_key] = problem[predicate_key] task[clauses_key] = list() task[clauses_key] += relevant_clauses for clause in subset: task[clauses_key].append(clause) tid = h.sha224(str(task)).hexdigest() task[task_id_key] = str(tid) # compute the hash before setting the random name log.info("Checking satisfiability of task.") if check_solvable(task) == True: counter += 1 tasks.append(task) else: #print "Unsat. Dropping task" pass if counter == 0 : log.info("Could not generate new tasks %s", str(json.dumps(eldarica_output, indent=2))) return tasks def create_problem_hash(eldarica_output): hash_string = "\n".join(eldarica_output[instantiated_clause_key]) hash_string += eldarica_output[predicate_key] return h.sha224(hash_string).hexdigest() def check_smt_file(smt_file, out_dir, timeout=5, hint_file=None, problem=None, generate=True): global TOTAL_PROBLEM, TOTAL_TASK cmd = [eldarica_command, smt_file, "-rt:{}".format(timeout), "-ssol"] if hint_file: cmd+=["-hints:{}".format(hint_file)] stats = run_cmd(cmd) eldarica_output = dict() if stats and not stats['timed_out']: eldarica_output = parse_output(stats["output"]) generated_tasks = 0 try: log.info("Eldarica says ====\n%s\n=====\n", stats["output"]) if eldarica_output: if "solved" in eldarica_output: if problem !=None: return problem[problem_id_key], True, 0 else: return "0", eldarica_output["solved"], 0 # create a new problem if needed. pid = create_problem_hash(eldarica_output) unique_problem_name = os.path.join(out_dir,"problem_{}.json".format(pid)) smt_file_copy=None # if we check a known problem, check if # anything changed. if problem and problem[problem_id_key]!=pid: #create a new problem but use the old smt file. smt_file_copy = problem["smt_file"] problem = None if os.path.isfile(unique_problem_name): with open(unique_problem_name, "r") as data_file: log.warning("Problem already created: %s", str(unique_problem_name)) problem = json.load(data_file) if problem==None: problem = dict() problem[problem_id_key] = str(pid) if "solved" in eldarica_output: log.info("solved in Eldarica's output") return problem[problem_id_key], True, 0 problem[clauses_key] = eldarica_output[clauses_key] problem[instantiated_clause_key] = eldarica_output[instantiated_clause_key] problem[predicate_key] = parse_predicate_keys(eldarica_output[predicate_key]) if smt_file_copy == None: smt_file_dir = os.path.join(os.path.dirname(out_dir), SMT_DIR_SUFFIX) if not os.path.exists(smt_file_dir): os.makedirs(smt_file_dir) smt_file_copy = os.path.join(smt_file_dir, "smt_{}.smt2".format(str(pid))) copyfile(smt_file, smt_file_copy) problem["smt_file"] = smt_file_copy problem_file = "problem_{}.json".format(pid) with open(os.path.join(out_dir, problem_file), "w") as jo: jo.write(json.dumps(problem, indent=2)) TOTAL_PROBLEM+=1 if "solved" in eldarica_output: log.info("solved in Eldarica's output") return problem[problem_id_key], True, 0 log.info("Creating tasks.") for task in create_tasks(eldarica_output, problem): task_file_name = os.path.join(out_dir, "task_{0}.json".format(task[task_id_key])) if os.path.isfile(task_file_name): log.info("Task already in DB: %s", str(task_file_name)) continue task["smt_file"] = problem["smt_file"] task["text_name"] = haikunator.haikunate(token_length=0, delimiter=' ') if generate: log.info("Generating %s", str(task_file_name)) with open(task_file_name, "w") as jo: jo.write(json.dumps(task, indent=2)) TOTAL_TASK+=1 generated_tasks+=1 log.info("Success. Generated %s tasks", str(generated_tasks)) except Exception as e: log.error("Failed. %s", str(e)) traceback.print_exc(file=sys.stdout) log.error(eldarica_output) if problem: return problem[problem_id_key], False, generated_tasks return "0", False, 0 def generate_problem_file(smt_file_list, out_dir, timeout=10, hint_file=None): global TOTAL_PROBLEM, TOTAL_TASK, TOTAL_NO_TASK if not os.path.exists(out_dir): os.makedirs(out_dir) for smt_file in smt_file_list: log.info("Processing %s", str(smt_file)) uid,solved,gentasks = check_smt_file(smt_file, out_dir, timeout, None, None) if gentasks==0 and solved == False: TOTAL_NO_TASK+=1 #print stats #raise e def get_file_hash(file_name): file_data = "" with open(file_name, "r") as f: file_data = f.read() return h.sha224(file_data).hexdigest() ### Only Utility stuff below this point ### def powerset(iterable): "powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)" s = list(iterable) return itertools.chain.from_iterable(itertools.combinations(s, r) for r in range(len(s)+1)) if __name__ == "__main__": if len(sys.argv)<3: print("Requires smt file dir and out dir") sys.exit() if not os.path.isdir(sys.argv[1]): print("SMT dir not a directory: {}".format(sys.argv[1])) sys.exit() smt_files = [] for (dirpath, _, filenames) in walk(sys.argv[1]): for fn in filenames: if ".smt2" in fn: TOTAL_SMT+=1 smt_files.append(os.path.join(dirpath, fn)) break generate_problem_file(smt_files, sys.argv[2]) print("Files {}, Problems {}, Tasks {}".format(TOTAL_SMT, TOTAL_PROBLEM, TOTAL_TASK)) print("Problems without tasks {}".format(TOTAL_NO_TASK))
python
from itertools import permutations def scrambled_letters_and_hash(inp, pwd): if not isinstance(pwd, list): pwd = list(pwd) for inst in inp: parts = inst.split() nums = [int(a) for a in parts if a.isdigit()] if parts[0] == 'swap': if parts[1] == 'position': x, y = nums[0], nums[1] # elif parts[1] == 'letter': else: x, y = pwd.index(parts[2]), pwd.index(parts[5]) pwd[x], pwd[y] = pwd[y], pwd[x] elif parts[0] == 'rotate': if parts[1] == 'based': ch = parts[-1] c = pwd.index(ch) c += (c >= 4) + 1 c = -(c % len(pwd)) else: c = nums[0] if parts[1] == 'right': c = -c # elif parts[1] == 'left': # pass pwd = pwd[c:] + pwd[:c] elif parts[0] == 'reverse': x, y = nums[0], nums[1] pwd[x:y + 1] = pwd[x:y + 1][::-1] elif parts[0] == 'move': x, y = nums[0], nums[1] c = pwd.pop(x) pwd = pwd[:y] + [c] + pwd[y:] else: print(f'Unrecognised instruction {inst}') return ''.join(pwd) def scrambled_letters_and_hash_find(inst, target): pwd = list(target) for test in permutations(pwd): if scrambled_letters_and_hash(inst, test) == target: return ''.join(test) if __name__ == '__main__': with open('input.txt') as instructions_file: instructions_list = instructions_file.read().splitlines(keepends=False) print(f'Day 21, part 1: {scrambled_letters_and_hash(instructions_list, "abcdefgh")}') print(f'Day 21, part 1: {scrambled_letters_and_hash_find(instructions_list, "fbgdceah")}') # Day 21, part 1: gcedfahb # Day 21, part 1: hegbdcfa
python
'''Collection of methods used for aquiring data for training and testing. ''' import os import numpy as np import pandas as pd def get_local_files(base_path): '''Gets a list of files in the specified directory and all sub-directories. Args: base_path (string): The base directory to search for files. Returns: list: The list of files. ''' files = [] for file in os.listdir(base_path): full_path = base_path + '//' + file if os.path.isdir(full_path): files.extend(get_local_files(full_path)) else: files.append(file) return files def write_list_to_file(items, path): '''Writes the contents of a list to the specified file path. Args: items (list): The list to write. path (string): The file to write to. ''' with open(path, 'w', encoding='utf-8') as f: _ = [f.write('%s\n' % item) for item in items] def get_train_test_data(): '''Reads the processed and split train/test data. Returns: x_train (numpy array): The training features. y_train (nunmpy array): The training labels. x_eval (numpy array): The evaluation features. y_eval (numpy array): The evaluation labels. x_test (numpy array): The evaluation features. y_test (numpy array): The evaluation labels. ''' return ( get_processed_data('x_train.csv'), get_processed_data('y_train.csv'), get_processed_data('x_eval.csv'), get_processed_data('y_eval.csv'), get_processed_data('x_test.csv'), get_processed_data('y_test.csv') ) def get_processed_data(name): '''Reads the specified file and returns the contents as a flattened array. Args: name (string): The name (and path) of the file to read. ''' df = pd.read_csv('data/processed/' + name, header=None) return np.ravel(df[0].to_numpy())
python
import os import subprocess def signfind(x): if x<0: return '-' else: return '+' def main(): avgnoise=subprocess.check_output('soundmeter --collect --seconds 3 | grep avg',shell=True) strbuff=str(avgnoise) print(strbuff) print('*********************') noisevolprev=int(strbuff[14:]) print(noisevolprev) print('*********************') while True: avgnoise=subprocess.check_output('soundmeter --collect --seconds 3 | grep avg',shell=True) strbuff=str(avgnoise) print(strbuff) noisevolcurr=int(strbuff[14:]) print('*********************') print(noisevolcurr) print('*********************') update_vol=noisevolcurr-noisevolprev noisevolprev=noisevolcurr print(update_vol) update_vol=update_vol/70; print(update_vol) os.system("amixer -D pulse sset Master " + str(abs(update_vol)) +"%"+signfind(update_vol)) if __name__ == '__main__': main()
python
from app import db class File(db.Model): id = db.Column(db.Integer, primary_key=True) file_hash = db.Column(db.String(64), index=True, unique=True) block_hash = db.Column(db.String(64)) block_index = db.Column(db.Integer) txn_index = db.Column(db.Integer)
python
import time import subprocess import os import sys import RPi.GPIO as GPIO from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor ROTATION_COUNT = 3600 # Raspberry Pi PGIO ports LED_GREEN = 0 LED_RED = 0 STEPPER = 0 SWITCH = 0 LASER = 0 state = State.Ready class State: Ready, Scanning = range(1,2) def get_picture(camera): if camera == 'left': subprocess.call(["fswebcam","-r","1920x1080","--no-banner","-d","/dev/video0","/images/image.jpg"]) elif camera == 'right': subprocess.call(["fswebcam","-r","1920x1080","--no-banner","-d","/dev/video1","/images/simage.jpg"]) f = open("image.jpg","rb") def rotate_turntable(): mh = Adafruit_MotorHAT() myStepper.setSpeed(30) myStepper.step(1, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.SINGLE) def laser(state): GPIO.setmode(GPIO.BCM) GPIO.setup(24, GPIO.OUT) if state: GPIO.output(24, 1) else: GPIO.output(24, 0) def scanning(): for index in range (0,ROTATION_COUNT): laser.off() get_picture('left') get_picture('right') laser.on() get_picture('left') get_gicture('right') rotate_turntable() def main(): command = sys.argv[0] if command == "camera-left": get_picture("left") elif command == "camera-right": get_picture("right") elif command == "turntable": rotate_turntable() elif command == "laser-on": laser(True) elif command == "laser-off": laser(False) print(command) if __name__ == "__main__": main()
python
import aiohttp import aiofiles import asyncio import os async def download_html(session: aiohttp.ClientSession, url: str): # Get http async with session.get(url, ssl=False) as res: filename = f'output/{os.path.basename(url)}.html' # Async write to file, using url as filename async with aiofiles.open(filename, 'wb') as f: while True: chunk = await res.content.read(1024) if not chunk: break await f.write(chunk) return await res.release() async def async_man(url): async with aiohttp.ClientSession() as session: await download_html(session, url) def time_start(): from timeit import default_timer as timer global start, timer start = 0.0 start = timer() def time_stop(): global start stop = timer() - start print(f"Took {format(stop, 'f')} seconds") def main(): urls = [ 'http://packtpub.com', 'http://python.org', 'http://docs.python.org/3/library/asyncio', 'http://aiohttp.readthedocs.io', 'http://google.com' ] loop = asyncio.get_event_loop() loop.run_until_complete( asyncio.gather(*[async_man(url) for url in urls]) ) if __name__ == '__main__': time_start() main() time_stop()
python
# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tests for gluster.swift """ import os import unittest import shutil import tempfile import file_connector.swift as gs class TestPkgInfo(unittest.TestCase): """ Tests for file_connector.swift PkgInfo class. """ def test_constructor(self): pi = gs.PkgInfo('a', 'b', 'c', 'd') assert pi.canonical_version == 'a' assert pi.name == 'c' self.assertEqual(pi.release, 'b') assert pi.final == 'd' def test_pretty_version(self): pi = gs.PkgInfo('a', 'b', 'c', False) assert pi.pretty_version == 'a-dev' pi = gs.PkgInfo('a', 'b', 'c', True) assert pi.pretty_version == 'a' def test_save_config(self): pi = gs.PkgInfo('a', 'b', 'c', 'd') td = tempfile.mkdtemp() try: sc = os.path.join(td, 'saved_config.txt') pi.save_config(sc) exp = 'NAME=c\nVERSION=a\nRELEASE=b\n' contents = file(sc, 'r').read() assert contents == exp finally: shutil.rmtree(td)
python
from machine import Pin l=Pin(0,Pin.OUT) l.high() l.low() l.value() l.value(1) l.value(0) #按钮 #GPIO 1 和 按钮 和 GND相连 b=Pin(1,Pin.OUT) b.value(1) b.value() #按钮按下 b.value() # 应用 按钮按下灯亮 while 1: if b.value(): l.value(1) else: l.value(0)
python
#!/user/bin/env python # -*- coding: utf-8 -*- from opensourcetest.builtin.autoParamInjection import AutoInjection class Login(AutoInjection): def __init__(self): super(Login, self).__init__(self.__class__.__name__) if __name__ == '__main__': ...
python
# my cnctoolbox script! # c = compiler t = gcodetools grbl = self.grbl import math thickness = 1 steps = 20 gcodes = [] gcodes.append("M3") gcodes.append("S0") gcodes.append("G0X0Y0") gcodes.append("F500") gcodes.append("G1") self.new_job() def spiral(cx, cy, r1, r2, windings, direction): gcode = [] steps_per_winding = 100 if direction == 1: r = r1 else: r = r2 r_inc = direction * (r2 - r1) / windings / steps_per_winding for anglestep in range(0, steps_per_winding * windings): r += r_inc angle = (direction + direction * anglestep) * 2 * math.pi / steps_per_winding x = cx + r * math.cos(angle) y = cy + r * math.sin(angle) print(angle) gcode.append("X{:.3f} Y{:.3f} S255".format(x, y)) return gcode self.grbl.preprocessor.do_fractionize_arcs = False self.grbl.preprocessor.do_fractionize_lines = False dir = 1 for z in range(1, steps): gcodes += spiral(0, 0, 2, 30, 4, dir) gcodes.append("G91") gcodes.append("G0 Z{} S0".format(-thickness/steps)) gcodes.append("G90") dir *= -1 self.grbl.write(gcodes) self.set_target("simulator") self.job_run()
python
# -*- coding: utf-8 -*- import pytest @pytest.fixture def vsphere_host(): from ati.terraform import vsphere_host return vsphere_host @pytest.fixture def vsphere_resource(): return { "type": "vsphere_virtual_machine", "primary": { "id": "12345678", "attributes": { "custom_configuration_parameters.#": "4", "custom_configuration_parameters.python_bin": "/usr/bin/python", "custom_configuration_parameters.role": "control", "custom_configuration_parameters.ssh_user": "vsphere-user", "custom_configuration_parameters.consul_dc": "module_name", "disk.#": "1", "disk.0.datastore": "main01", "disk.0.iops": "0", "disk.0.size": "0", "disk.0.template": "centos7-base", "domain": "domain.com", "id": "server01", "memory": "2048", "name": "mi-control-01", "network_interface.#": "1", "network_interface.0.adapter_type": "", "network_interface.0.ip_address": "5.6.7.8", "network_interface.0.ipv4_address": "1.2.3.4", "network_interface.0.ipv4_prefix_length": "24", "network_interface.0.ipv6_address": "", "network_interface.0.ipv6_prefix_length": "64", "network_interface.0.label": "VM Network", "network_interface.0.subnet_mask": "", "time_zone": "Etc/UTC", "vcpu": "1" } } } def test_name(vsphere_resource, vsphere_host): name, _, _ = vsphere_host(vsphere_resource, '') assert name == "mi-control-01" @pytest.mark.parametrize('attr,should', { 'id': 'server01', # ansible 'ansible_ssh_host': '1.2.3.4', 'ansible_ssh_user': 'vsphere-user', 'ansible_python_interpreter': '/usr/bin/python', # generic 'public_ipv4': '1.2.3.4', 'private_ipv4': '1.2.3.4', 'provider': 'vsphere', # mi 'consul_dc': 'module_name', 'role': 'control', }.items()) def test_attrs(vsphere_resource, vsphere_host, attr, should): _, attrs, _ = vsphere_host(vsphere_resource, 'module_name') assert attr in attrs assert attrs[attr] == should
python
from typing import List, Optional, Tuple import requests from NotionPy import constants from NotionPy.utils import parse_into_dict, parse_into_json class Query: """ class that retrieves data from page or database with option to get it json or dict like data """ TOKEN = None def __init__(self) -> None: pass def page( self, page_id: str, in_json: Optional[bool] = False, json_indent: Optional[int] = None, print_data: Optional[bool] = False, ): res = requests.post( url=constants.QUERY_PAGE_URL(page_id), headers=constants.HEADERS(Query.TOKEN), ) data = ( parse_into_json(res.json(), json_indent) if in_json is True else parse_into_dict(parse_into_json(res.json(), json_indent)) ) if print_data is True: print(data) def db( self, db_id, in_json: Optional[bool] = False, json_indent: Optional[int] = None, print_data: Optional[bool] = False, sort: Optional[List[Tuple]] = None, # [(prop_name,ascending or descending)] Filter: Optional[List[Tuple]] = None, # [(prop_name,prop_type,condition,value)] and_filter: Optional[ List[Tuple] ] = None, # [(prop_name,prop_type,condition,value)] or_filter: Optional[ List[Tuple] ] = None, # [(prop_name,prop_type,condition,value)] ): # the conditions is the same as in Notion Gui queyring_data = {} if sort is not None: queyring_data.update(constants.SORT(sorting_info=sort)) if Filter is not None: queyring_data.update(constants.FILTER(filtering_info=Filter)) if and_filter is not None: queyring_data.update(constants.AND_FILTER(filtering_info=and_filter)) if or_filter is not None: queyring_data.update(constants.OR_FILTER(filtering_info=or_filter)) res = requests.post( url=constants.QUERY_DB_URL(db_id), data=parse_into_json(queyring_data), headers=constants.HEADERS(Query.TOKEN), ) data = ( parse_into_json(res.json(), json_indent) if in_json is True else parse_into_dict(parse_into_json(res.json(), json_indent)) ) if print_data == True: print(data)
python
# Handcrafted _version.py to fix # https://github.com/conda-forge/ambertools-feedstock/issues/35 import json import sys version_json = """ { "date": "2020-02-26T10:02:00+0100", "dirty": false, "error": null, "full-revisionid": "aa15556ab201b53f99cf36394470c341526b69ed", "version": "3.2.0+27" } """ # END VERSION_JSON def get_versions(): return json.loads(version_json)
python
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models import autoslug.fields class Migration(migrations.Migration): dependencies = [ ] operations = [ migrations.CreateModel( name='Constituency', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique_with=(b'county__name',), editable=False)), ], options={ 'ordering': ['name'], 'verbose_name': 'Constituency', 'verbose_name_plural': 'Constituencies', }, ), migrations.CreateModel( name='County', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique=True, editable=False)), ], options={ 'ordering': ['name'], 'verbose_name': 'County', 'verbose_name_plural': 'Counties', }, ), migrations.CreateModel( name='District', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique_with=(b'province__name',), editable=False)), ], options={ 'ordering': ['name'], 'verbose_name': 'District', 'verbose_name_plural': 'Districts', }, ), migrations.CreateModel( name='Division', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique_with=(b'district__name',), editable=False)), ('district', models.ForeignKey(verbose_name='District', to='places.District')), ], options={ 'ordering': ['name'], 'verbose_name': 'Division', 'verbose_name_plural': 'Divisions', }, ), migrations.CreateModel( name='Location', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique_with=(b'division__name',), editable=False)), ('division', models.ForeignKey(verbose_name='Division', to='places.Division')), ], options={ 'ordering': ['name'], 'verbose_name': 'Location', 'verbose_name_plural': 'Locations', }, ), migrations.CreateModel( name='Province', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique=True, editable=False)), ], options={ 'ordering': ['name'], 'verbose_name': 'Province', 'verbose_name_plural': 'Provinces', }, ), migrations.CreateModel( name='SubLocation', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('name', models.CharField(max_length=255, verbose_name='Name')), ('slug', autoslug.fields.AutoSlugField(populate_from=b'name', unique_with=(b'location__name',), editable=False)), ('location', models.ForeignKey(verbose_name='Location', to='places.Location')), ], options={ 'ordering': ['name'], 'verbose_name': 'Sub Location', 'verbose_name_plural': 'Sub Locations', }, ), migrations.AddField( model_name='district', name='province', field=models.ForeignKey(verbose_name='Province', to='places.Province'), ), migrations.AddField( model_name='constituency', name='county', field=models.ForeignKey(verbose_name='County', to='places.County'), ), ]
python
from django.contrib import admin from .models import ObjectViewed,UserSession admin.site.register(ObjectViewed) admin.site.register(UserSession)
python
from collections import defaultdict from logging import getLogger from typing import Dict, Mapping from ordered_set import OrderedSet from squares.tyrell.spec import Type, TyrellSpec logger = getLogger('squares.conditions') class ConditionTable: def __init__(self) -> None: self.graphs = defaultdict(lambda: defaultdict(OrderedSet)) def append(self, t: Type, origin: str, destination: str): self.graphs[t][origin].append(destination) def dfs(self, t: Type, key: str, visited: OrderedSet[str] = None) -> OrderedSet[str]: if visited is None: visited = OrderedSet() if key not in visited: visited.add(key) for neighbour in self.graphs[t][key]: self.dfs(t, neighbour, visited) return visited - OrderedSet([key]) def compile(self, spec: TyrellSpec) -> Mapping[int, OrderedSet[int]]: return ConditionTableJIT(self, spec) class ConditionTableJIT: def __init__(self, base_conditions: ConditionTable, spec: TyrellSpec) -> None: self.base_conditions = base_conditions self.spec: TyrellSpec = spec self.compiled: Dict[int, OrderedSet[int]] = {} def dfs(self, key: int) -> OrderedSet[str]: if key not in self.compiled.keys(): self.compiled[key] = OrderedSet() production = self.spec.get_production(key) if production and production.is_enum(): for neighbour in self.base_conditions.graphs[production.lhs][production.rhs]: n_production = self.spec.get_enum_production(production.lhs, neighbour) if n_production: tmp = self.dfs(n_production.id) self.compiled[key].update(tmp) else: logger.warning('Unknown production "%s" in type %s', neighbour, production.lhs) return self.compiled[key] | {key} def __getitem__(self, item: int) -> OrderedSet[int]: if item not in self.compiled: self.dfs(item) return self.compiled[item] - {item}
python
""" When running in term-mode (import `pwn` rather than `pwnlib`, stdout is a TTY and not running in a REPL), we can do proper indentation where lines too long to fit on a screen are split into multiple individually indented lines. Too see the difference try running with:: $ python indented.py and $ python -i indented.py Also notice that `pause()` can react on any key when in `term_mode`. """ from pwn import * context.log_level = 'info' log.indented('A' * 100) log.indented('B' * 100) log.indented('C' * 100) pause()
python
#!/usr/bin/env python import datetime import os import cv2 import time import rospy import numpy as np from bolt_msgs.msg import Control from std_msgs.msg import Int32 from sensor_msgs.msg import Image import sys sys.path.append('../neural_net/') import const from image_converter import ImageConverter from drive_run import DriveRun from config import Config from image_process import ImageProcess class NeuralControl: def __init__(self, weight_file_name, default_speed): rospy.init_node('run_neural') self.ic = ImageConverter() self.image_process = ImageProcess() self.rate = rospy.Rate(30) self.drive= DriveRun(weight_file_name) rospy.Subscriber('/bolt/front_camera/image_raw', Image, self.controller_cb) self.image = None self.image_processed = False self.config = Config() self.default_speed = default_speed def controller_cb(self, image): img = self.ic.imgmsg_to_opencv(image) cropped = img[const.CROP_Y1:const.CROP_Y2, const.CROP_X1:const.CROP_X2] img = cv2.resize(cropped,(const.IMAGE_WIDTH, const.IMAGE_HEIGHT)) self.image = self.image_process.process(img) if self.config.net_model_type == const.NET_TYPE_LSTM_FC6 \ or self.config.net_model_type == const.NET_TYPE_LSTM_FC7: self.image = np.array(self.image).reshape(1, self.config.image_size[1], self.config.image_size[0], self.config.image_size[2]) self.image_processed = True def main(): if len(sys.argv) != 3: exit('Usage:\n$ rosrun run_neural run_neural.py weight_file_name default_speed(0~1)') neural_control = NeuralControl(sys.argv[1], float(sys.argv[2])) print('\nStart running. Vroom. Vroom. Vroooooom......') joy_pub = rospy.Publisher('/bolt', Control, queue_size = 10) joy_data = Control() while not rospy.is_shutdown(): if neural_control.image_processed is False: continue prediction = neural_control.drive.run(neural_control.image) joy_data.steer = prediction joy_data.throttle = neural_control.default_speed joy_pub.publish(joy_data) ## print out sys.stdout.write('steer: ' + str(joy_data.steer) +' throttle: ' + str(joy_data.throttle) + '\r') sys.stdout.flush() ## ready for processing a new input image neural_control.image_processed = False neural_control.rate.sleep() if __name__ == "__main__": try: main() except KeyboardInterrupt: print ('\nShutdown requested. Exiting...')
python
''' MIT License Copyright (c) 2019 Arshdeep Bahga and Vijay Madisetti Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' import boto3 from boto3.dynamodb.conditions import Key, Attr AWS_KEY="<enter>" AWS_SECRET="<enter>" REGION="us-east-1" dynamodb = boto3.resource('dynamodb', aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET, region_name=REGION) client = boto3.client('dynamodb', aws_access_key_id=AWS_KEY, aws_secret_access_key=AWS_SECRET, region_name=REGION) table = dynamodb.Table('customers') #Describe table response = client.describe_table(TableName='customers') print response #Scan table response=table.scan() items = response['Items'] for item in items: print item #Scan table with filter response = table.scan(FilterExpression=Attr('country').eq('India')) items = response['Items'] for item in items: print item #Scan table with filters response = table.scan( FilterExpression=Attr('createdAt').between('2012-03-26T00:00:00-00:00', '2013-03-26T00:00:00-00:00')) items = response['Items'] for item in items: print item #Query table with partition key response = table.query( KeyConditionExpression=Key('customerID').eq('1623072020799')) items = response['Items'] for item in items: print item
python
import io import json import torch from torchvision import models import torchvision.transforms as transforms import numpy as np from PIL import Image imagenet_class_index = json.load(open('imagenet_class_index.json')) model = models.densenet121(pretrained=True) model.eval() def transform_image(image_path): transforms_compose = transforms.Compose([transforms.Resize(255), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize( [0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) image = Image.open(image_path) return transforms_compose(image).unsqueeze(0) def get_prediction(image_path): tensor = transform_image(image_path=image_path) outputs = model.forward(tensor) results = torch.softmax(outputs, dim=1).detach().numpy()[0] pred_idx = np.argmax(results) pred_prob = np.max(results) class_id, class_name = imagenet_class_index[str(pred_idx)] return {'class_id': class_id, 'class_name': class_name, 'class_prob': pred_prob}
python
""" Configure file for hypoDD interface """ import os import numpy as np class Config(object): def __init__(self): # 1. format input self.fsta_in = 'input/HYPO.sta' self.fsta_out = 'input/station.dat' self.fpha_in = 'input/merge.pha' self.fpha_out = 'input/phase.dat' self.dep_corr = 5 # avoid air quake # 2. format output self.out_ctlg = 'output/indonesia.ctlg' self.out_pha = 'output/indonesia.pha' self.out_pha_all = 'output/indonesia_all.pha'
python
def plot(self, game): """ matplotlib plot representation of the resource game """ # Create figure and axes fig, ax = plt.subplots() pc = self.player_cover(strategies) colors = mcolors.cnames.keys() for i in range(self.r_m): width = 10 height = len(pc[i])*10 + 4 x, y = (15*i, 0) rect = patches.Rectangle((x, y), width, height, facecolor='none') for j in range(len(pc[i])): r = 4 color = colors[pc[i][j]] circ = patches.Circle((x+5, 3 + r + (r+1)*2*j), r, color=color, ec=color) ax.add_patch(circ) ax.add_patch(rect) axwidth = 15*self.r_m + 5 ax.set_xlim((-5, axwidth)) ax.set_ylim((-5, max(10*self.n + 4, axwidth*.7))) plt.show()
python
# -*- coding: utf-8 -*- """ Created on Thu Jan 28 15:08:07 2021 @author: saadl """ import inspect import itertools import os import sys import unittest import numpy as np from tqdm import tqdm currentdir = os.path.dirname(os.path.abspath( inspect.getfile(inspect.currentframe()))) parentdir = os.path.dirname(currentdir) sys.path.insert(0, parentdir) import time_ordering def test_kappa(list_times, matrix_kappa, m_max=None): d = len(list_times) for i, j in itertools.product(range(d), range(d)): if (m_max is not None) and (m_max < len(list_times[i])): stop_index = m_max else: stop_index = len(list_times[i]) for m in tqdm(range(stop_index)): kappa_m = matrix_kappa[j][i][m] if kappa_m > -1 and kappa_m <= len(list_times[j])-2: t_im = list_times[i][m] if not ((list_times[j][kappa_m] < t_im) & (list_times[j][kappa_m+1] >= t_im)): return False return True def test_varkappa(list_times, matrix_varpi, h_max=None): d = len(list_times) for i, j in itertools.product(range(d), range(d)): if ((h_max is not None) and (h_max >= 1) and (h_max < len(matrix_varpi[i][j]))): stop_index = h_max else: stop_index = len(matrix_varpi[i][j]) for h in tqdm(range(1, stop_index)): varpi_h = matrix_varpi[i][j][h] t_jn = list_times[j][h-1] if varpi_h >= 1: if not ((list_times[i][varpi_h] > t_jn) & (list_times[i][varpi_h-1] <= t_jn)): return False return True # # matrix_kappa[j][i][10]=1 # Introduce an error for sanity check # test_kappa(matrix_kappa,list_times,i,j,m_max=None) # # matrix_varkappa[i][j][10]=1 # Introduce an error for sanity check # test_varkappa(matrix_varkappa,matrix_kappa,list_times,i,j,m_max=None) if __name__ == '__main__': unittest.main()
python
__copyright__ = \ """ Copyright &copyright © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation. All rights reserved. This software is covered by US patents and copyright. This source code is to be used for academic research purposes only, and no commercial use is allowed. For any questions, please contact Edward J. Delp ([email protected]) at Purdue University. Last Modified: 10/02/2019 """ __license__ = "CC BY-NC-SA 4.0" __authors__ = "Javier Ribera, David Guera, Yuhao Chen, Edward J. Delp" __version__ = "1.6.0" import os import argparse import ast import math from tqdm import tqdm import numpy as np import pandas as pd from . import metrics from . import get_image_size # Parse command-line arguments parser = argparse.ArgumentParser( description='Compute metrics from results and GT.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) required_args = parser.add_argument_group('MANDATORY arguments') optional_args = parser._action_groups.pop() required_args.add_argument('results', help='Input CSV file with the estimated locations.') required_args.add_argument('gt', help='Input CSV file with the groundtruthed locations.') required_args.add_argument('metrics', help='Output CSV file with the metrics ' '(MAE, AHD, Precision, Recall...)') required_args.add_argument('--dataset', type=str, required=True, help='Dataset directory with the images. ' 'This is used only to get the image diagonal, ' 'as the worst estimate for the AHD.') optional_args.add_argument('--radii', type=str, default=range(0, 15 + 1), metavar='Rs', help='Detections at dist <= R to a GT pt are True Positives.') args = parser.parse_args() # Prepare Judges that will compute P/R as fct of r and th judges = [metrics.Judge(r=r) for r in args.radii] df_results = pd.read_csv(args.results) df_gt = pd.read_csv(args.gt) df_metrics = pd.DataFrame(columns=['r', 'precision', 'recall', 'fscore', 'MAHD', 'MAPE', 'ME', 'MPE', 'MAE', 'MSE', 'RMSE', 'r', 'R2']) for j, judge in enumerate(tqdm(judges)): for idx, row_result in df_results.iterrows(): filename = row_result['filename'] row_gt = df_gt[df_gt['filename'] == filename].iloc()[0] w, h = get_image_size.get_image_size(os.path.join(args.dataset, filename)) diagonal = math.sqrt(w**2 + h**2) judge.feed_count(row_result['count'], row_gt['count']) judge.feed_points(ast.literal_eval(row_result['locations']), ast.literal_eval(row_gt['locations']), max_ahd=diagonal) df = pd.DataFrame(data=[[judge.r, judge.precision, judge.recall, judge.fscore, judge.mahd, judge.mape, judge.me, judge.mpe, judge.mae, judge.mse, judge.rmse, judge.pearson_corr \ if not np.isnan(judge.pearson_corr) else 1, judge.coeff_of_determination]], columns=['r', 'precision', 'recall', 'fscore', 'MAHD', 'MAPE', 'ME', 'MPE', 'MAE', 'MSE', 'RMSE', 'r', 'R2'], index=[j]) df.index.name = 'idx' df_metrics = df_metrics.append(df) # Write CSV of metrics to disk df_metrics.to_csv(args.metrics) """ Copyright &copyright © (c) 2019 The Board of Trustees of Purdue University and the Purdue Research Foundation. All rights reserved. This software is covered by US patents and copyright. This source code is to be used for academic research purposes only, and no commercial use is allowed. For any questions, please contact Edward J. Delp ([email protected]) at Purdue University. Last Modified: 10/02/2019 """
python
# sorting algorithm -> mergesort # About mergesort: Best case O(n log n), Average case O(n log n), Worst case O(n log n) # @author unobatbayar # Thanks to HackerRank's mergesort tutorial title = 'Welcome to Mergesort Algorithm!' print(title + '\n' + 'Enter unsorted data set: ') user_input = input() array = user_input.split() def merge_halves(array, left, end): def merge_sort(array, start, end): if (start >= end): return middle = (start + end)//2 merge_sort(array, start, middle) merge_sort(array, middle + 1, end) merge_halves(array, left, end) merge_sort(array, 0, len(array) - 1) print(array)
python
from neo4j_engine import Neo4JEngine from os import path import pandas as pd import numpy as np import pathlib from utils import parse_data, extract_gpa_data, merge_gpa_data from tqdm import tqdm # not official courses, but need to be taken into account special_prereqs = ['THREE YEARS OF HIGH SCHOOL MATHEMATICS', 'ONE YEAR OF HIGH SCHOOL CHEMISTRY'] # used to insert a class node insert_command = 'CREATE (c: Class {courseId: "%s", courseTitle: "%s", creditHours: %d, description: "%s", GPA: %f})' # used to create the OR and AND nodes, as well as the relevant relationships and_insert_command = 'MATCH (c: Class {courseId: "%s"}) CREATE (c)<-[r: HAS]-(a: AND {courseId: "%s"})' or_insert_command = 'MATCH (a: AND {courseId: "%s"}) CREATE (a)<-[r:HAS]-(o: OR {courseId: "%s", prereqId: %d})' prereq_rel_command = 'MATCH (o: OR {courseId: "%s", prereqId: %d}), (c: Class {courseId: "%s"}) CREATE (o)<-[r:PREREQ]-(c)' def extract_prereqs(prerequisite): """ Extracts rough prerequisites based on tokenization, then converts into JSON format. Each key-value pair represents an OR combination. :param prerequisite: A raw string :return: A JSON-ized dictionary of lists. """ # Replacement mapping for prereq strings PREREQ_REPLACE_MAPPING = { ': ': '', ' OR': ',', ' AND': ';', 'ONE OF': '', } if type(prerequisite) == pd.core.series.Series: prerequisite = str(prerequisite.to_numpy()) prereq_dict = {} prerequisite = prerequisite.strip().upper() if 'PREREQUISITE' not in prerequisite: return {'req1': []} if 'PREREQUISITES' in prerequisite: prerequisite = prerequisite[prerequisite.find('PREREQUISITES') + 14:] else: prerequisite = prerequisite[prerequisite.find('PREREQUISITE') + 13:] prerequisite = prerequisite.strip() for key, value in PREREQ_REPLACE_MAPPING.items(): prerequisite = prerequisite.replace(key, value).split(".")[0] # Splitting AND values based on semicolons and OR values after that based on commas # Also removes empty values split_values = [list(filter(lambda x: x != '', map(lambda x: x.strip(), string.split(",")))) for string in prerequisite.split(";")] #Adding each requisite to the JSON dictionary for i, value in enumerate(split_values): prereq_dict['req' + str(i + 1)] = value return prereq_dict def clean_entries(data): """ Guarantees that each prerequisite set has at least 1 by validating class list. Ignores "no prereq" classes. :param data: a Pandas dataframe :return: a cleaned pandas dataframe. """ print('\nValidating entries...') valid_rows = np.array([True] * len(data)) pbar = tqdm(total=len(data)) i = 0 for _, row in data.iterrows(): prereqs = row['calculated_prereqs'] if len(prereqs) == 1 and len(prereqs['req1']) == 0: continue for or_req in prereqs: i = 0 while i < len(prereqs[or_req]): match = data['courseId'].loc[[substring in prereqs[or_req][i] for substring in data['courseId']]] if len(match) > 0: prereqs[or_req][i] = str(match.to_numpy()[0]) i += 1 elif prereqs[or_req][i] in special_prereqs: i += 1 else: del prereqs[or_req][i] if len(prereqs[or_req]) == 0: valid_rows[i] = False pbar.update(1) break i += 1 pbar.update(1) out_data = data.loc[valid_rows] def remove_quotes(desc): return desc.replace('"', '').replace("'", '') out_data['description'] = out_data['description'].apply(remove_quotes) print('\nFinished cleaning entries') return out_data def insert_to_database(file_path: str, engine: Neo4JEngine): """ Inserts all class data into Neo4J database. Takes the latest class definition to generate prereqs. :param file_path: directory containing CSVs scraped from UIUC's courses site. :param engine: a Neo4J engine used to insert data """ df = parse_data(file_path) # replaces NaNs with empty strings for classes w/o prereqs df['prereqs'].loc[[type(val) == float for val in df['prereqs']]] = '' df['calculated_prereqs'] = df['prereqs'].apply(extract_prereqs) # keeps only the rows that aren't identical to another class. df = df.loc[~((df['calculated_prereqs'] == {'req1': []}) & (df['prereqs'].str.contains('Same as')))] df = clean_entries(df) gpa_df = extract_gpa_data(file_path) df = merge_gpa_data(df, gpa_df) print('\nInserting class nodes to Neo4J...') pbar = tqdm(total=len(df)) # inserts all the class nodes for row in df.to_numpy(): # print(tuple(row[:-3])) to_insert = tuple(list(row[:-3]) + [row[-1]]) exec_command = insert_command % to_insert try: engine.insert_node(exec_command) except Exception as e: print(exec_command) print('\n\n') print(e) break pbar.update(1) print('\nInserting special nodes to Neo4J...') pbar = tqdm(total=len(special_prereqs) + 1) # create special nodes engine.insert_node('CREATE (c: Class {courseId: "NOPREREQS"})') pbar.update(1) for val in special_prereqs: engine.insert_node('CREATE (c: Class {courseId: "%s"})' % val) pbar.update(1) print('\nInserting relationship nodes to Neo4J...') pbar = tqdm(total=len(df)) # insert all relationship nodes for _, row in df.iterrows(): calculated_prereqs = row['calculated_prereqs'] and_exec_command = and_insert_command % (row['courseId'], row['courseId']) engine.raw_operation(and_exec_command) if len(calculated_prereqs) == 1 and len(calculated_prereqs['req1']) == 0: or_exec_command = or_insert_command % (row['courseId'], row['courseId'], 0) prereq_exec_command = prereq_rel_command % (row['courseId'], 0, "NOPREREQS") engine.raw_operation(or_exec_command) engine.raw_operation(prereq_exec_command) else: for i, or_prereq in enumerate(calculated_prereqs): or_exec_command = or_insert_command % (row['courseId'], row['courseId'], i) engine.raw_operation(or_exec_command) for prereq in calculated_prereqs[or_prereq]: prereq_exec_command = prereq_rel_command % (row['courseId'], i, prereq) engine.raw_operation(prereq_exec_command) pbar.update(1) print('\nFinished uploading nodes and relationships to Neo4J') if __name__ == '__main__': f = open('../server_info') f.readline() uri, username, password = f.readline().strip().split(',') f.close() file_path = '../data' e = Neo4JEngine(uri, username, password) insert_to_database(file_path, e) del e
python
import json import datetime from collections import defaultdict from itertools import groupby from odoo import api, fields, models, _ from odoo.exceptions import AccessError, UserError from odoo.tools import date_utils, float_compare, float_round, float_is_zero class ReportBomStructure(models.AbstractModel): _inherit = 'report.mrp.report_bom_structure' @api.model def _get_report_values(self, docids, data=None): docs = [] if docids is None and data.get('docids', False): docids = data.get('docids') for bom_id in docids: bom = self.env['mrp.bom'].browse(bom_id) candidates = bom.product_id or bom.product_tmpl_id.product_variant_ids quantity = float(data.get('quantity', 1)) for product_variant_id in candidates: if data and data.get('childs'): doc = self._get_pdf_line(bom_id, product_id=product_variant_id, qty=quantity, child_bom_ids=json.loads(data.get('childs'))) else: doc = self._get_pdf_line(bom_id, product_id=product_variant_id, qty=quantity, unfolded=True) doc['report_type'] = 'pdf' doc['report_structure'] = data and data.get('report_type') or 'all' docs.append(doc) if not candidates: if data and data.get('childs'): doc = self._get_pdf_line(bom_id, qty=quantity, child_bom_ids=json.loads(data.get('childs'))) else: doc = self._get_pdf_line(bom_id, qty=quantity, unfolded=True) doc['report_type'] = 'pdf' doc['report_structure'] = data and data.get('report_type') or 'all' docs.append(doc) return { 'doc_ids': docids, 'doc_model': 'mrp.bom', 'docs': docs, } class MrpProduction(models.Model): """ Manufacturing Orders """ _inherit = 'mrp.production' parent_id = fields.Many2one(comodel_name='mrp.production') children_ids = fields.One2many(comodel_name='mrp.production', inverse_name='parent_id') user_rev = fields.Many2one('res.users', string='Revisó', required=False) date_rev = fields.Datetime(string='Fecha revisó') user_apr = fields.Many2one('res.users', string='Aprobó', required=False) date_apr = fields.Datetime(string='Fecha aprobó') user_con = fields.Many2one('res.users', string='Confirmó', required=False) date_con = fields.Datetime(string='Fecha confirmó') user_ter = fields.Many2one('res.users', string='Terminó', required=False) date_ter = fields.Datetime(string='Fecha terminó') state = fields.Selection([ ('draft', 'Elaboración'), ('review', 'Revisión'), ('approv', 'Aprobación'), ('confirmed', 'Confirmed'), ('progress', 'In Progress'), ('to_close', 'To Close'), ('done', 'Done'), ('cancel', 'Cancelled')], string='State', compute='_compute_state', copy=False, index=True, readonly=True, store=True, tracking=True, help=" * Draft: The MO is not confirmed yet.\n" " * Confirmed: The MO is confirmed, the stock rules and the reordering of the components are trigerred.\n" " * In Progress: The production has started (on the MO or on the WO).\n" " * To Close: The production is done, the MO has to be closed.\n" " * Done: The MO is closed, the stock moves are posted. \n" " * Cancelled: The MO has been cancelled, can't be confirmed anymore.") def to_draft(self): self._check_company() for mrp in self: mrp.write({'state': 'draft'}) (mrp.move_raw_ids | mrp.move_finished_ids).to_draft_production_stock_move() mrp.write({'user_rev': False}) mrp.write({'user_apr': False}) mrp.write({'date_rev': False}) mrp.write({'date_apr': False}) self._onchange_move_raw() return True def to_review(self): self._check_company() for mrp in self: mrp.write({'state': 'review'}) mrp.write({'user_rev': self.env.uid}) mrp.write({'date_rev': datetime.datetime.now()}) return True def to_approv(self): self._check_company() for mrp in self: mrp.write({'state': 'approv'}) mrp.write({'user_apr': self.env.uid}) mrp.write({'date_apr': datetime.datetime.now()}) return True def action_confirm(self): self._check_company() for mrp in self: mrp.write({'date_con': datetime.datetime.now()}) for production in self: production.write({'user_con': self.env.uid}) if not production.move_raw_ids: raise UserError(_("Add some materials to consume before marking this MO as to do.")) for move_raw in production.move_raw_ids: move_raw.write({ 'unit_factor': move_raw.product_uom_qty / production.product_qty, }) production._generate_finished_moves() production.move_raw_ids._adjust_procure_method() (production.move_raw_ids | production.move_finished_ids)._action_confirm() for picking in self.env['stock.picking'].search([['origin', '=', production.name]]): if picking.location_dest_id and picking.location_dest_id.name and 'Pre-Producción' in picking.location_dest_id.name: picking.action_assign() # Doing action assign on created stock picking return True def action_print_bom(self): data = dict(quantity=self.product_qty, docids=[self.bom_id.id], no_price=True, report_type='bom_structure') report = self.env.ref('mrp.action_report_bom_structure').with_context(discard_logo_check=True) report.name = 'Estructura de materiales - {}'.format(self.name) return report.report_action(self.bom_id, data) @api.model def create(self, values): if values.get('origin', False): parent = self.env['mrp.production'].search([['name', '=', values['origin']]]) if parent: prods = self.env['mrp.production'].search([['name', 'like', values['origin'] + '.']]) if len(prods) == 0: index = '0' else: index = max(list(map(lambda prod: prod.name.split('.')[-1], prods))) values['name'] = parent.name + '.' + str(int(index) + 1) values['parent_id'] = parent.id if not values.get('name', False) or values['name'] == _('New'): picking_type_id = values.get('picking_type_id') or self._get_default_picking_type() picking_type_id = self.env['stock.picking.type'].browse(picking_type_id) if picking_type_id: values['name'] = picking_type_id.sequence_id.next_by_id() else: values['name'] = self.env['ir.sequence'].next_by_code('mrp.production') or _('New') if not values.get('procurement_group_id'): procurement_group_vals = self._prepare_procurement_group_vals(values) values['procurement_group_id'] = self.env["procurement.group"].create(procurement_group_vals).id production = super(MrpProduction, self).create(values) production.move_raw_ids.write({ 'group_id': production.procurement_group_id.id, 'reference': production.name, # set reference when MO name is different than 'New' }) # Trigger move_raw creation when importing a file if 'import_file' in self.env.context: production._onchange_move_raw() return production class MrpBomLineOver(models.Model): _inherit = 'mrp.bom.line' def _get_default_product_uom_id(self): return self.env['uom.uom'].search([], limit=1, order='id').id product_qty_display = fields.Float('Cantidad', default=1.0, digits='Unit of Measure', required=False) product_uom_id_display = fields.Many2one( 'uom.uom', 'Unidad de medida', default=_get_default_product_uom_id, required=True, help="Unit of Measure (Unit of Measure) is the unit of measurement for the inventory control", domain="[('category_id', '=', product_uom_category_id)]") @api.model_create_multi def create(self, vals_list): for values in vals_list: if 'product_id' in values and 'product_uom_id' not in values: values['product_uom_id'] = self.env['product.product'].browse(values['product_id']).uom_id.id mrp_bom_line = super(MrpBomLineOver, self).create(vals_list) mrp_bom_line.onchange_product_uom_id_display() mrp_bom_line.onchange_product_qty_display() return mrp_bom_line @api.onchange('product_uom_id_display') def onchange_product_uom_id_display(self): for mbl in self: res = {} if not mbl.product_uom_id_display or not mbl.product_id: return res if mbl.product_uom_id_display.category_id != mbl.product_id.uom_id.category_id: mbl.product_uom_id_display = self.product_id.uom_id.id res['warning'] = {'title': _('Warning'), 'message': _('The Product Unit of Measure you chose has a different category than in the product form.')} return res @api.onchange('product_id') def onchange_product_id_display(self): for mbl in self: if mbl.product_id: mbl.product_uom_id_display = mbl.product_id.uom_id.id @api.onchange('product_qty_display', 'product_uom_id_display') def onchange_product_qty_display(self): for mbl in self: if mbl.product_qty_display and mbl.product_uom_id_display: mbl.product_qty = mbl.product_qty_display * mbl.product_uom_id_display.factor_inv * mbl.product_id.uom_id.factor class MrpProductProduce(models.TransientModel): _inherit = "mrp.product.produce" def do_produce(self): """ Save the current wizard and go back to the MO. """ for line in self.raw_workorder_line_ids: for line_lot in line.lot_id.quant_ids: if line_lot.location_id == self.move_raw_ids.location_id: if line_lot.quantity < line.qty_done: raise UserError(_('No hay existencias suficientes en el lote ' + line_lot.lot_id.name + ' en la ubicación ' + line_lot.location_id.complete_name + '.')) self.ensure_one() self._record_production() self._check_company() for mrp in self.production_id: mrp.write({'user_ter': self.env.uid}) mrp.write({'date_ter': datetime.datetime.now()}) return {'type': 'ir.actions.act_window_close'}
python
# Common shapes for the aafigure package. # # (C) 2009 Chris Liechti <[email protected]> # # This is open source software under the BSD license. See LICENSE.txt for more # details. # # This intentionally is no doc comment to make it easier to include the module # in Sphinx ``.. automodule::`` import math def point(object): """return a Point instance. - if object is already a Point instance it's returned as is - complex numbers are converted to Points - a tuple with two elements (x,y) """ if isinstance(object, Point): return object #~ print type(object), object.__class__ if type(object) is complex: return Point(object.real, object.imag) if type(object) is tuple and len(object) == 2: return Point(object[0], object[1]) raise ValueError('can not convert %r to a Point') def group(list_of_shapes): """return a group if the number of shapes is greater than one""" if len(list_of_shapes) > 1: return [Group(list_of_shapes)] else: return list_of_shapes class Point: """A single point. This class primary use is to represent coordinates for the other shapes. """ def __init__(self, x, y): self.x = x self.y = y def __repr__(self): return 'Point(%r, %r)' % (self.x, self.y) def distance(self, other): return math.sqrt( (self.x - other.x)**2 + (self.y - other.y)**2 ) def midpoint(self, other): return Point( (self.x + other.x)/2, (self.y + other.y)/2 ) class Line: """Line with starting and ending point. Both ends can have arrows""" def __init__(self, start, end, thick=False): self.thick = thick self.start = point(start) self.end = point(end) def __repr__(self): return 'Line(%r, %r)' % (self.start, self.end) class Rectangle: """Rectangle with two edge coordinates.""" def __init__(self, p1, p2): self.p1 = point(p1) self.p2 = point(p2) def __repr__(self): return 'Rectangle(%r, %r)' % (self.p1, self.p2) class Circle: """Circle with center coordinates and radius.""" def __init__(self, center, radius): self.center = point(center) self.radius = radius def __repr__(self): return 'Circle(%r, %r)' % (self.center, self.radius) class Label: """A text label at a position""" def __init__(self, position, text): self.position = position self.text = text def __repr__(self): return 'Label(%r, %r)' % (self.position, self.text) class Group: """A group of shapes""" def __init__(self, shapes=None): if shapes is None: shapes = [] self.shapes = shapes def __repr__(self): return 'Group(%r)' % (self.shapes,) class Arc: """A smooth arc between two points""" def __init__(self, start, start_angle, end, end_angle, start_curve=True, end_curve=True): self.start = point(start) self.end = point(end) self.start_angle = start_angle self.end_angle = end_angle self.start_curve = start_curve self.end_curve = end_curve def __repr__(self): return 'Arc(%r, %r, %r, %r, %r, %r)' % (self.start, self.start_angle, self.end, self.end_angle, self.start_curve, self.end_curve) def start_angle_rad(self): return self.start_angle * math.pi / 180 def end_angle_rad(self): return self.end_angle * math.pi / 180 def __tension(self): return self.start.distance( self.end )/3 # assumptions: x increases going right, y increases going down def start_control_point(self): if self.start_curve: dd = self.__tension() angle = self.start_angle_rad() return Point(self.start.x + dd * math.cos(angle), self.start.y - dd * math.sin(angle)) else: return self.start def end_control_point(self): if self.end_curve: dd = self.__tension() angle = self.end_angle_rad() return Point(self.end.x + dd * math.cos(angle), self.end.y - dd * math.sin(angle)) else: return self.end
python
import json from collections import OrderedDict from keycloak.admin import KeycloakAdminBase __all__ = ('Users',) class Users(KeycloakAdminBase): _paths = { 'collection': '/auth/admin/realms/{realm}/users' } _realm_name = None def __init__(self, realm_name, *args, **kwargs): self._realm_name = realm_name super(Users, self).__init__(*args, **kwargs) def create(self, username, **kwargs): """ Create a user in Keycloak http://www.keycloak.org/docs-api/3.4/rest-api/index.html#_users_resource :param str username: :param object credentials: (optional) :param str first_name: (optional) :param str last_name: (optional) :param str email: (optional) :param boolean enabled: (optional) """ payload = OrderedDict(username=username) if 'credentials' in kwargs: payload['credentials'] = [kwargs['credentials']] if 'first_name' in kwargs: payload['firstName'] = kwargs['first_name'] if 'last_name' in kwargs: payload['lastName'] = kwargs['last_name'] if 'email' in kwargs: payload['email'] = kwargs['email'] if 'enabled' in kwargs: payload['enabled'] = kwargs['enabled'] return self._client.post( url=self._client.get_full_url( self.get_path('collection', realm=self._realm_name) ), data=json.dumps(payload) )
python
import requests import json from .helper import Helper class Tasks(Helper): def __init__(self, base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination): super().__init__(base_url, org_pk, teams_pk, access_token, _csrf_token, headers, pagination) def empty_tasks_trash(self, project_id): """ Set delete all not-completed archived tasks in project """ route = 'v1/tasks/empty-trash/{0}/'.format(project_id) response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, None) return self.process_response(response) def get_task_labels_list(self, page=1): """ Get the list of tasks labels """ route = 'v1/tasks/label/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response, True) def create_task_label(self, data): """ Create a new task label Keywords arguments: data -- data of the new label to be created: { "creator": orguser_pk, "team": team_pk, "title": "label title", "description": "new task label" } """ route = 'v1/tasks/label/list/{0}/'.format(self.org_pk) response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def get_task_label_details(self, label_pk): """ Get the task label details Keywords arguments: label_pk -- pk of the task label """ route = 'v1/tasks/label/{0}/'.format(label_pk) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response) def update_task_label_details(self, label_pk, data): """ Update the task label details Keywords arguments: label_pk -- pk of the task label data -- content of the update: { "creator": orguser_pk, "team": team_pk, "title": "new title", "description": "description updated" } """ route = 'v1/tasks/label/{0}/'.format(label_pk) response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def delete_task_label(self, label_pk): """ Delete the task label details Keywords arguments: label_pk -- pk of the task label """ route = 'v1/tasks/label/{0}/'.format(label_pk) response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None) return self.process_response(response) def get_tasks_list(self, page=1): """ Get the tasks list """ route = 'v1/tasks/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response, True) def create_task(self, data): """ Create a new task Keywords arguments: data -- data of the new task to be created: { "creator": orguser_pk, "created_at": "string", "labels": [ label_pk, ... ], "title": "string", "due_date": "string", "description": "string" } """ route = 'v1/tasks/list/{0}/'.format(self.org_pk) response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def get_tasks_lists_list(self, page=1): """ Get the list of tasks list """ route = 'v1/tasks/lists/list/{0}/?page_size={1}&page={2}'.format(self.org_pk, self.pagination, page) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response, True) def create_tasks_list(self, data): """ Create a new list of tasks Keywords arguments: data -- data of the new list of tasks to be created: { "author": orguser_pk, "title": "new list", "tasks": [ task_pk, ... ], "followers": [ orguser_pk, ... ] } """ route = 'v1/tasks/lists/list/{0}/'.format(self.org_pk) response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def get_tasks_list_details(self, list_pk): """ Get the list of tasks details Keywords arguments: list_pk -- the pk of list of tasks """ route = 'v1/tasks/lists/{0}/'.format(list_pk) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response) def update_tasks_list_details(self, list_pk, data): """ Update the list of tasks details Keywords arguments: list_pk -- the pk of list of tasks data -- content of the update: { "author": orguser_pk, "title": "new list", "tasks": [ task_pk, ... ], "followers": [ orguser_pk, ... ] } """ route = 'v1/tasks/lists/{0}/'.format(list_pk) response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def delete_tasks_list(self, list_pk): """ Delete the list of tasks Keywords arguments: list_pk -- the pk of list of tasks """ route = 'v1/tasks/lists/{0}/'.format(list_pk) response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None) return self.process_response(response) def log_tasks(self): """ Set all tasks to is_logged True """ route = 'v1/tasks/log-tasks/{0}/'.format(self.org_pk) response = self.process_request(requests, 'POST', self.base_url, route, self.headers, None, None) return self.process_response(response) def get_tasks_timeline(self): route = 'v1/tasks/timeline/{0}/'.format(self.org_pk) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response) def get_task_details(self, pk): """ Get task details Keywords arguments: pk -- the pk of the task """ route = 'v1/tasks/{0}/'.format(pk) response = self.process_request(requests, 'GET', self.base_url, route, self.headers, None, None) return self.process_response(response) def update_task_details(self, pk, data): """ Update task details Keywords arguments: pk -- the pk of the task data -- content of the update: { "creator": orguser_pk, "created_at": "string", "estimate": 0, "is_logged": true, "labels": [ "string" ], "title": "string", "due_date": "string", "completed_at": "string", "description": "string", "is_completed": true } """ route = 'v1/tasks/{0}/'.format(pk) response = self.process_request(requests, 'PATCH', self.base_url, route, self.headers, None, json.dumps(data)) return self.process_response(response) def delete_task(self, pk): """ Delete task Keywords arguments: pk -- the pk of the task """ route = 'v1/tasks/{0}/'.format(pk) response = self.process_request(requests, 'DELETE', self.base_url, route, self.headers, None, None) return self.process_response(response)
python
from .util import * class __THMTeam(object): def get_teams(self) -> list: """ Returns all teams :return: List containing all teams """ return http_get(self.session, '/api/all-teams')
python
import dht11 import RPi.GPIO as GPIO import time from datetime import date, datetime from pathlib import Path import math import pickle import numpy as np sleep_time_high = 0.5 model_filename = r'/home/pi/code/raspi/4/models/zing_brightness_v0.pkl' # motor pins motor_in1 = 11 motor_in2 = 13 motor_in3 = 15 motor_in4 = 35 # motor pins motor_in1 = 11 motor_in2 = 13 motor_in3 = 15 motor_in4 = 35 led_pin = 12 ir_pin = 16 ultrasonic_trig_pin = 38 ultrasonic_echo_pin = 37 internal_ldr_pin = 32 external_ldr_pin = 29 dht11_pin = 40 ir_key = 'IR' ultrasonic_key = 'Ultrasonic' internal_ldr_key = 'internal LDR' external_ldr_key = 'external LDR' temperature_key = 'DHT 11 temperature' humidity_key = 'DHT 11 humidity' half_of_speed_of_sound = 343000 / 2 # mm/sec ultrasonic_trigger_interval = 0.00001 # sec far_away_threshold = 200 # mm sensor_stabilise_time = 0.5 #min_ai_luminosity = 80 #max_ai_luminosity = 90 pwm_frequency = 1000 # hertz. dimming_interval = 5 brightening_interval = 2 luminosity_steps = 100 ldr_max = 700 ldr_min = 90 # -------------------- # motor params step_sleep = 0.004 # ms # For motor 28BYJ-48 and driver ULN2003 step_sequence = [ [1,0,0,1], [1,0,0,0], [1,1,0,0], [0,1,0,0], [0,1,1,0], [0,0,1,0], [0,0,1,1], [0,0,0,1] ] # -------------------- GPIO.setmode(GPIO.BOARD) GPIO.setup(led_pin, GPIO.OUT) GPIO.setup(ir_pin, GPIO.IN) GPIO.setup(ultrasonic_echo_pin, GPIO.IN) GPIO.setup(ultrasonic_trig_pin, GPIO.OUT) # LDR pin setup occurs inside the ldr() method GPIO.setup(motor_in1, GPIO.OUT) GPIO.setup(motor_in2, GPIO.OUT) GPIO.setup(motor_in3, GPIO.OUT) GPIO.setup(motor_in4, GPIO.OUT) GPIO.output(motor_in1, GPIO.LOW) GPIO.output(motor_in2, GPIO.LOW) GPIO.output(motor_in3, GPIO.LOW) GPIO.output(motor_in4, GPIO.LOW) motor_pins = [motor_in1, motor_in2, motor_in3, motor_in4] brightness_model = pickle.load(open(model_filename, 'rb')) log_file_loc = "/home/pi/log/" sensor_status_path = '/home/pi/code/raspi/4/persist_sensor_status.txt' def main(): pwm = GPIO.PWM(led_pin, pwm_frequency) pwm.start(0) brightness = 100 logfile = None dht11_sensor = dht11.DHT11(pin = dht11_pin) prev_temperature = 26.8 prev_humidity = 78.0 reset_motor() try: logfile = initialise_log() print("Timestamp\tIR Status\tUltrasonic Status\tInternal Incident Radiation\tExternal Incident Radiation\tTemperature\tHumidity\tHeadcount\tBrightness Level") while True: ir_output = GPIO.input(ir_pin) ultrasonic_data = get_distance() internal_ldr_data = ldr(internal_ldr_pin) external_ldr_data = ldr(external_ldr_pin) temperature, humidity = measure_temperature_humidity(dht11_sensor) if temperature == 0: temperature = prev_temperature if humidity == 0: humidity = prev_humidity prev_temperature = temperature prev_humidity = humidity sensor_data = {ir_key : ir_output , ultrasonic_key : ultrasonic_data , internal_ldr_key : internal_ldr_data , external_ldr_key : external_ldr_data , temperature_key : temperature , humidity_key : humidity} output = decide(sensor_data) headcount = 0 if output == 100: headcount = 1 print(f"{datetime.now().strftime('%H:%M:%S')}\t{ir_output}\t{ultrasonic_data}\t{internal_ldr_data}\t{external_ldr_data}\t{temperature}\t{humidity}\t{headcount}\t{output}") logfile.write(f"{datetime.now().strftime('%H:%M:%S')}\t{ir_output}\t{ultrasonic_data}\t{internal_ldr_data}\t{external_ldr_data}\t{temperature}\t{humidity}\t{headcount}\t{output}\n") prev_brightness = brightness brightness = output dim_led(pwm, brightness, prev_brightness) except KeyboardInterrupt: pass finally: GPIO.cleanup() logfile.close() def reset_motor(): print("~~~~ resetting windows blinds to 0° ...") motor_angular_displacement = 0 with open(sensor_status_path, 'r') as fileHandler: motor_angular_displacement = int(fileHandler.read()) if motor_angular_displacement > 0: with open(sensor_status_path, 'w') as fileHandler: fileHandler.write('0') run_motor(motor_angular_displacement, False) def decide(sensor_data): rotate_motor(sensor_data[internal_ldr_key]) output = compute_intensity_and_postprocess(sensor_data) return output def compute_intensity_and_postprocess(sensor_data): output = predict_brightness(sensor_data) return output def rotate_motor(external_luminosity): motor_angular_displacement = int((90 * external_luminosity) / 100) with open(sensor_status_path, 'r') as fileHandler: prev_motor_angular_displacement = int(fileHandler.read()) diff = abs(motor_angular_displacement - prev_motor_angular_displacement) if diff >= 10: run_motor(diff, motor_angular_displacement > prev_motor_angular_displacement) with open(sensor_status_path, 'w') as fileHandler: fileHandler.write(str(motor_angular_displacement)) def measure_temperature_humidity(dht11_sensor): result = dht11_sensor.read() humidity, temperature = result.humidity, result.temperature return temperature, humidity def ldr(ldr_pin): GPIO.setup(ldr_pin, GPIO.OUT) GPIO.output(ldr_pin, GPIO.LOW) time.sleep(0.1) GPIO.setup(ldr_pin, GPIO.IN) t0 = time.time_ns() while (GPIO.input(ldr_pin) == GPIO.LOW): pass t1 = time.time_ns() diff = math.log(t1 - t0) diff = diff * diff scaled_value = ((diff - ldr_max) * 100) / (ldr_min - ldr_max) if scaled_value > 100: scaled_value = 100 elif scaled_value < 25: scaled_value = 25 scaled_value = (scaled_value - 25) * 100 / (75) scaled_value = round(scaled_value, 2) return scaled_value def motor_cleanup(): GPIO.output( motor_in1, GPIO.LOW ) GPIO.output( motor_in2, GPIO.LOW ) GPIO.output( motor_in3, GPIO.LOW ) GPIO.output( motor_in4, GPIO.LOW ) def run_motor(angle, direction): motor_step_counter = 0 # 4096 steps is 360° <=> 5.625*(1/64) per step, step_count = int(angle * 4096 / 360) try: for i in range(step_count): for pin in range(0, len(motor_pins)): GPIO.output(motor_pins[pin], step_sequence[motor_step_counter][pin]) if direction == True: # anticlockwise motor_step_counter = (motor_step_counter - 1) % 8 elif direction == False: # clockwise motor_step_counter = (motor_step_counter + 1) % 8 else: print("direction must be True / False only. Other value was provided.") break time.sleep(step_sleep) except KeyboardInterrupt: pass finally: motor_cleanup() def motor_cleanup(): GPIO.output( motor_in1, GPIO.LOW ) GPIO.output( motor_in2, GPIO.LOW ) GPIO.output( motor_in3, GPIO.LOW ) GPIO.output( motor_in4, GPIO.LOW ) def run_motor(angle, direction): motor_step_counter = 0 # 4096 steps is 360° <=> 5.625*(1/64) per step, step_count = int(angle * 4096 / 360) try: for i in range(step_count): for pin in range(0, len(motor_pins)): GPIO.output(motor_pins[pin], step_sequence[motor_step_counter][pin]) if direction == True: # anticlockwise motor_step_counter = (motor_step_counter - 1) % 8 elif direction == False: # clockwise motor_step_counter = (motor_step_counter + 1) % 8 else: print("direction must be True / False only. Other value was provided.") motor_cleanup() time.sleep(step_sleep) except KeyboardInterrupt: pass finally: motor_cleanup() def initialise_log(): today = date.today() d = today.strftime("%Y-%m-%d") logfileName = log_file_loc + d + ".log" f = Path(logfileName) fileExists = f.exists() logfile = open(logfileName, "a") if not fileExists: logfile.write("Timestamp\tIR Status\tUltrasonic Status\tInternal Incident Radiation\tExternal Incident Radiation\tTemperature\tHumidity\tHeadcount\tBrightness Level\n") return logfile def normalise_brightness(level): if level > 100: level = 100 elif level == 0: level = 10 elif level < 0: level = 0 return level def dim_led(pwm, brightness, prev_brightness): if brightness == prev_brightness: time.sleep(sleep_time_high) return brightness = int(round(normalise_brightness(brightness), 0)) prev_brightness = int(round(normalise_brightness(prev_brightness), 0)) transition_interval = brightening_interval if brightness < prev_brightness: transition_interval = dimming_interval delta = brightness - prev_brightness stay_interval = transition_interval * 1.0 / luminosity_steps step = int(delta * 1.0 / luminosity_steps) if delta != 0: if step == 0: if delta < 0: step = -1 else: step = 1 stay_interval = step * 1.0 / delta brightness += step if brightness > 100: brightness = 101 for i in range(prev_brightness, brightness, step): pwm.ChangeDutyCycle(i) time.sleep(stay_interval) brightness += step if brightness > 100: brightness = 101 for i in range(prev_brightness, brightness, step): pwm.ChangeDutyCycle(i) time.sleep(stay_interval) def get_distance(): # Initialise distance and pin distance = -1 GPIO.output(ultrasonic_trig_pin, False) time.sleep(sensor_stabilise_time) GPIO.output(ultrasonic_trig_pin, True) time.sleep(ultrasonic_trigger_interval) GPIO.output(ultrasonic_trig_pin, False) while GPIO.input(ultrasonic_echo_pin) == 0: t_init = time.time() while GPIO.input(ultrasonic_echo_pin) == 1: t_final = time.time() distance = 0 if distance == 0: time_taken = t_final - t_init distance = round(time_taken * half_of_speed_of_sound, 2) return distance def compute_led_intensity(inputs): if ir_key in inputs: inputs[ir_key] = not inputs[ir_key] # When the ultrasonic_key sensor doesn't work, we set the default value to be 25000 mm # This is set to 25000 mm assuming no object is detected by the sensor if ultrasonic_key not in inputs: inputs[ultrasonic_key] = 25000 brightness_level = call_model(inputs) return brightness_level def predict_brightness(inputs): output = 10 preprocessed_sensor_data = preprocess_sensor_data_for_brightness(inputs) brightness_level = brightness_model.predict(preprocessed_sensor_data) if brightness_level[0] <= 1: output = 10 else: output = brightness_level[0] * 20 output = int(round(output)) if output > 100: output = 100 elif output < 0: output = 0 return output def preprocess_sensor_data_for_brightness(inputs): if ir_key not in inputs: inputs[ir_key] = 1 if internal_ldr_key not in inputs: inputs[internal_ldr_key] = 50 if ultrasonic_key not in inputs: inputs[ultrasonic_key] = 500 external_luminosity = inputs[internal_ldr_key] if external_luminosity <= 10: external_luminosity_level = 0 # something like pitch black night elif external_luminosity <= 20: external_luminosity_level = 1 # 4 - 6 AM elif external_luminosity <= 40: external_luminosity_level = 2 # 6 - 8 AM elif external_luminosity <= 60: external_luminosity_level = 3 # 8 - 10 AM elif external_luminosity <= 80: external_luminosity_level = 4 # 10 - 12 A/PM else: external_luminosity_level = 5 # 12 - 2 PM distance = inputs[ultrasonic_key] # in millimeters if distance <= 200: distance_level = 0 elif distance <= 300: distance_level = 1 elif distance <= 400: distance_level = 2 elif distance <= 500: distance_level = 3 elif distance <= 600: distance_level = 4 else: distance_level = 5 sensor_data = [external_luminosity_level, distance_level, inputs[ir_key]] sensor_data_arr = np.array(sensor_data) sensor_data_arr = sensor_data_arr.reshape(1, -1) return sensor_data_arr main()
python
import datetime from decimal import Decimal import pytest from leasing.enums import ContactType, InvoiceState, InvoiceType from leasing.models import Invoice, ReceivableType from leasing.models.invoice import InvoiceSet @pytest.mark.django_db def test_create_credit_invoice_full(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('123.45'), billed_amount=Decimal('123.45'), outstanding_amount=Decimal('123.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice.create_credit_invoice() assert invoice.outstanding_amount == Decimal(0) credit_note = Invoice.objects.get(credited_invoice=invoice) assert credit_note.type == InvoiceType.CREDIT_NOTE assert credit_note.lease == lease assert credit_note.recipient == contact assert credit_note.rows.all().count() == 1 assert credit_note.billing_period_start_date == billing_period_start_date assert credit_note.billing_period_end_date == billing_period_end_date assert credit_note.billed_amount == Decimal(0) credit_note_row = credit_note.rows.first() assert credit_note_row.amount == pytest.approx(Decimal('123.45')) assert credit_note_row.receivable_type == receivable_type assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.REFUNDED @pytest.mark.django_db def test_create_credit_invoice_fails(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( type=InvoiceType.CREDIT_NOTE, lease=lease, total_amount=Decimal('123.45'), billed_amount=Decimal('123.45'), outstanding_amount=Decimal('123.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) with pytest.raises(RuntimeError) as e: invoice.create_credit_invoice() assert str(e.value) == 'Can not credit invoice with the type "credit_note". Only type "charge" allowed.' with pytest.raises(Invoice.DoesNotExist): Invoice.objects.get(credited_invoice=invoice) @pytest.mark.django_db def test_create_credit_invoice_full_two_rows(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice.create_credit_invoice() credit_note = Invoice.objects.get(credited_invoice=invoice) assert credit_note.type == InvoiceType.CREDIT_NOTE assert credit_note.lease == lease assert credit_note.recipient == contact assert credit_note.rows.all().count() == 2 assert credit_note.billing_period_start_date == billing_period_start_date assert credit_note.billing_period_end_date == billing_period_end_date assert credit_note.billed_amount == Decimal(0) credit_note_row = credit_note.rows.filter(receivable_type=receivable_type).first() assert credit_note_row.amount == pytest.approx(Decimal('123.45')) assert credit_note_row.receivable_type == receivable_type credit_note_row2 = credit_note.rows.filter(receivable_type=receivable_type2).first() assert credit_note_row2.amount == pytest.approx(Decimal(70)) assert credit_note_row2.receivable_type == receivable_type2 assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.REFUNDED @pytest.mark.django_db def test_create_credit_invoice_one_row_full(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row2 = invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice.create_credit_invoice(row_ids=[invoice_row2.id]) credit_note = Invoice.objects.get(credited_invoice=invoice) assert credit_note.type == InvoiceType.CREDIT_NOTE assert credit_note.lease == lease assert credit_note.recipient == contact assert credit_note.rows.all().count() == 1 assert credit_note.billing_period_start_date == billing_period_start_date assert credit_note.billing_period_end_date == billing_period_end_date assert credit_note.billed_amount == Decimal(0) credit_note_row = credit_note.rows.first() assert credit_note_row.amount == pytest.approx(Decimal(70)) assert credit_note_row.receivable_type == receivable_type2 assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.OPEN @pytest.mark.django_db def test_create_credit_invoice_one_row_partly(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row2 = invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice.create_credit_invoice(row_ids=[invoice_row2.id], amount=20) assert invoice.outstanding_amount == Decimal('173.45') credit_note = Invoice.objects.get(credited_invoice=invoice) assert credit_note.type == InvoiceType.CREDIT_NOTE assert credit_note.lease == lease assert credit_note.recipient == contact assert credit_note.rows.all().count() == 1 assert credit_note.billing_period_start_date == billing_period_start_date assert credit_note.billing_period_end_date == billing_period_end_date assert credit_note.billed_amount == Decimal(0) credit_note_row = credit_note.rows.first() assert credit_note_row.amount == pytest.approx(Decimal(20)) assert credit_note_row.receivable_type == receivable_type2 assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.OPEN @pytest.mark.django_db def test_create_credit_invoice_one_row_too_much(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row2 = invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) with pytest.raises(RuntimeError) as e: invoice.create_credit_invoice(row_ids=[invoice_row2.id], amount=200) assert str(e.value) == 'Cannot credit more than invoice row amount' @pytest.mark.django_db def test_create_credit_invoice_full_one_receivable_type(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice.create_credit_invoice(receivable_type=receivable_type2) assert invoice.outstanding_amount == Decimal('123.45') credit_note = Invoice.objects.get(credited_invoice=invoice) assert credit_note.type == InvoiceType.CREDIT_NOTE assert credit_note.lease == lease assert credit_note.recipient == contact assert credit_note.rows.all().count() == 1 assert credit_note.billing_period_start_date == billing_period_start_date assert credit_note.billing_period_end_date == billing_period_end_date assert credit_note.billed_amount == Decimal(0) credit_note_row = credit_note.rows.first() assert credit_note_row.amount == pytest.approx(Decimal(70)) assert credit_note_row.receivable_type == receivable_type2 assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.OPEN @pytest.mark.django_db def test_create_credit_invoiceset_fails(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_set_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice_set = invoice_set_factory( lease=lease, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) invoice = invoice_factory( type=InvoiceType.CREDIT_NOTE, lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice2 = invoice_factory( type=InvoiceType.CREDIT_NOTE, lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(150), ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) with pytest.raises(RuntimeError) as e: invoice_set.create_credit_invoiceset() assert str(e.value) == 'No refundable invoices found (no invoices with the type "charge" found)' assert InvoiceSet.objects.count() == 1 @pytest.mark.django_db def test_create_credit_invoiceset_full(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_set_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice_set = invoice_set_factory( lease=lease, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) invoice = invoice_factory( lease=lease, total_amount=Decimal('193.45'), billed_amount=Decimal('193.45'), outstanding_amount=Decimal('193.45'), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal('123.45'), ) invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice2 = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(150), ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_set.create_credit_invoiceset() assert InvoiceSet.objects.count() == 2 credit_note_invoiceset = InvoiceSet.objects.first() assert credit_note_invoiceset.lease == lease assert credit_note_invoiceset.billing_period_start_date == billing_period_start_date assert credit_note_invoiceset.billing_period_end_date == billing_period_end_date credit_note1 = Invoice.objects.get(credited_invoice=invoice) assert credit_note1.type == InvoiceType.CREDIT_NOTE assert credit_note1.lease == lease assert credit_note1.recipient == contact assert credit_note1.rows.count() == 2 assert credit_note1.billing_period_start_date == billing_period_start_date assert credit_note1.billing_period_end_date == billing_period_end_date assert credit_note1.billed_amount == Decimal(0) credit_note_row1 = credit_note1.rows.filter(receivable_type=receivable_type).first() assert credit_note_row1.amount == pytest.approx(Decimal('123.45')) credit_note_row2 = credit_note1.rows.filter(receivable_type=receivable_type2).first() assert credit_note_row2.amount == pytest.approx(Decimal(70)) credit_note2 = Invoice.objects.get(credited_invoice=invoice2) assert credit_note2.type == InvoiceType.CREDIT_NOTE assert credit_note2.lease == lease assert credit_note2.recipient == contact assert credit_note2.rows.count() == 2 assert credit_note2.billing_period_start_date == billing_period_start_date assert credit_note2.billing_period_end_date == billing_period_end_date assert credit_note2.billed_amount == Decimal(0) credit_note_row3 = credit_note2.rows.filter(receivable_type=receivable_type).first() assert credit_note_row3.amount == pytest.approx(Decimal(150)) credit_note_row4 = credit_note2.rows.filter(receivable_type=receivable_type2).first() assert credit_note_row4.amount == pytest.approx(Decimal(50)) assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.REFUNDED assert Invoice.objects.get(pk=invoice2.id).state == InvoiceState.REFUNDED @pytest.mark.django_db def test_create_credit_invoiceset_receivable_type(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_set_factory, tenant_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) contact2 = contact_factory(first_name="First name2", last_name="Last name2", type=ContactType.PERSON) tenant1 = tenant_factory(lease=lease, share_numerator=1, share_denominator=2) tenant2 = tenant_factory(lease=lease, share_numerator=1, share_denominator=2) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice_set = invoice_set_factory( lease=lease, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) invoice = invoice_factory( lease=lease, total_amount=Decimal(170), billed_amount=Decimal(170), outstanding_amount=Decimal(170), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, tenant=tenant1, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice_row_factory( invoice=invoice, tenant=tenant1, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(70), ) invoice2 = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) invoice_row_factory( invoice=invoice2, tenant=tenant2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(150), ) invoice_row_factory( invoice=invoice2, tenant=tenant2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_set.create_credit_invoiceset(receivable_type=receivable_type) assert InvoiceSet.objects.count() == 2 credit_note_invoiceset = InvoiceSet.objects.first() assert credit_note_invoiceset.lease == lease assert credit_note_invoiceset.billing_period_start_date == billing_period_start_date assert credit_note_invoiceset.billing_period_end_date == billing_period_end_date credit_note1 = Invoice.objects.get(credited_invoice=invoice) assert credit_note1.type == InvoiceType.CREDIT_NOTE assert credit_note1.lease == lease assert credit_note1.recipient == contact assert credit_note1.rows.count() == 1 assert credit_note1.billing_period_start_date == billing_period_start_date assert credit_note1.billing_period_end_date == billing_period_end_date assert credit_note1.billed_amount == Decimal(0) credit_note_row1 = credit_note1.rows.filter(receivable_type=receivable_type).first() assert credit_note_row1.amount == pytest.approx(Decimal(100)) credit_note2 = Invoice.objects.get(credited_invoice=invoice2) assert credit_note2.type == InvoiceType.CREDIT_NOTE assert credit_note2.lease == lease assert credit_note2.recipient == contact2 assert credit_note2.rows.count() == 1 assert credit_note2.billing_period_start_date == billing_period_start_date assert credit_note2.billing_period_end_date == billing_period_end_date assert credit_note2.billed_amount == Decimal(0) credit_note_row2 = credit_note2.rows.filter(receivable_type=receivable_type).first() assert credit_note_row2.amount == pytest.approx(Decimal(150)) @pytest.mark.django_db def test_create_credit_invoiceset_receivable_type_partly(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_set_factory, tenant_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) contact2 = contact_factory(first_name="First name2", last_name="Last name2", type=ContactType.PERSON) tenant1 = tenant_factory(lease=lease, share_numerator=3, share_denominator=6) tenant2 = tenant_factory(lease=lease, share_numerator=1, share_denominator=6) tenant3 = tenant_factory(lease=lease, share_numerator=2, share_denominator=6) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice_set = invoice_set_factory( lease=lease, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) invoice = invoice_factory( lease=lease, total_amount=Decimal(400), billed_amount=Decimal(400), outstanding_amount=Decimal(400), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, tenant=tenant1, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(300), ) invoice_row_factory( invoice=invoice, tenant=tenant1, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice2 = invoice_factory( lease=lease, total_amount=Decimal(400), billed_amount=Decimal(400), outstanding_amount=Decimal(400), recipient=contact2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) invoice_row_factory( invoice=invoice2, tenant=tenant2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice_row_factory( invoice=invoice2, tenant=tenant2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_row_factory( invoice=invoice2, tenant=tenant3, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_row_factory( invoice=invoice2, tenant=tenant3, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_set.create_credit_invoiceset_for_amount(receivable_type=receivable_type, amount=200) assert InvoiceSet.objects.count() == 2 credit_note_invoiceset = InvoiceSet.objects.first() assert credit_note_invoiceset.lease == lease assert credit_note_invoiceset.billing_period_start_date == billing_period_start_date assert credit_note_invoiceset.billing_period_end_date == billing_period_end_date credit_note1 = Invoice.objects.get(credited_invoice=invoice) assert credit_note1.type == InvoiceType.CREDIT_NOTE assert credit_note1.lease == lease assert credit_note1.recipient == contact assert credit_note1.rows.count() == 1 assert credit_note1.billing_period_start_date == billing_period_start_date assert credit_note1.billing_period_end_date == billing_period_end_date assert credit_note1.billed_amount == Decimal(0) credit_note_row1 = credit_note1.rows.filter(receivable_type=receivable_type).first() assert credit_note_row1.amount == pytest.approx(Decimal(100)) credit_note2 = Invoice.objects.get(credited_invoice=invoice2) assert credit_note2.type == InvoiceType.CREDIT_NOTE assert credit_note2.lease == lease assert credit_note2.recipient == contact2 assert credit_note2.rows.count() == 2 assert credit_note2.rows.filter(tenant=tenant2).count() == 1 assert credit_note2.rows.filter(tenant=tenant3).count() == 1 assert credit_note2.billing_period_start_date == billing_period_start_date assert credit_note2.billing_period_end_date == billing_period_end_date assert credit_note2.billed_amount == Decimal(0) credit_note_row2 = credit_note2.rows.filter(tenant=tenant2).first() assert credit_note_row2.amount == pytest.approx(Decimal('33.33')) credit_note_row3 = credit_note2.rows.filter(tenant=tenant3).first() assert credit_note_row3.amount == pytest.approx(Decimal('66.67')) @pytest.mark.django_db def test_create_credit_invoiceset_receivable_type_partly_no_tenants(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_set_factory): lease = lease_factory( type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) contact2 = contact_factory(first_name="First name2", last_name="Last name2", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice_set = invoice_set_factory( lease=lease, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) invoice = invoice_factory( lease=lease, total_amount=Decimal(300), billed_amount=Decimal(300), outstanding_amount=Decimal(300), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) receivable_type = ReceivableType.objects.get(pk=1) receivable_type2 = ReceivableType.objects.get(pk=2) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_row_factory( invoice=invoice, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice2 = invoice_factory( lease=lease, total_amount=Decimal(300), billed_amount=Decimal(300), outstanding_amount=Decimal(300), recipient=contact2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, invoiceset=invoice_set, ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(100), ) invoice_row_factory( invoice=invoice2, receivable_type=receivable_type2, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(50), ) invoice_set.create_credit_invoiceset_for_amount(receivable_type=receivable_type, amount=200) assert InvoiceSet.objects.count() == 2 credit_note_invoiceset = InvoiceSet.objects.first() assert credit_note_invoiceset.lease == lease assert credit_note_invoiceset.billing_period_start_date == billing_period_start_date assert credit_note_invoiceset.billing_period_end_date == billing_period_end_date credit_note1 = Invoice.objects.get(credited_invoice=invoice) assert credit_note1.type == InvoiceType.CREDIT_NOTE assert credit_note1.lease == lease assert credit_note1.recipient == contact assert credit_note1.rows.count() == 1 assert credit_note1.billing_period_start_date == billing_period_start_date assert credit_note1.billing_period_end_date == billing_period_end_date assert credit_note1.billed_amount == Decimal(0) credit_note_row1 = credit_note1.rows.filter(receivable_type=receivable_type).first() assert credit_note_row1.amount == pytest.approx(Decimal('66.67')) credit_note2 = Invoice.objects.get(credited_invoice=invoice2) assert credit_note2.type == InvoiceType.CREDIT_NOTE assert credit_note2.lease == lease assert credit_note2.recipient == contact2 assert credit_note2.rows.count() == 2 assert credit_note2.rows.filter(receivable_type=receivable_type).count() == 2 assert credit_note2.billing_period_start_date == billing_period_start_date assert credit_note2.billing_period_end_date == billing_period_end_date assert credit_note2.billed_amount == Decimal(0) credit_note_row2 = credit_note2.rows.first() assert credit_note_row2.amount == pytest.approx(Decimal('66.66')) credit_note_row3 = credit_note2.rows.last() assert credit_note_row3.amount == pytest.approx(Decimal('66.66')) @pytest.mark.django_db def test_create_credit_invoice_refunded_in_parts(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice.create_credit_invoice(amount=100) assert invoice.outstanding_amount == Decimal(100) invoice.create_credit_invoice(amount=100) assert invoice.outstanding_amount == Decimal(0) credit_notes = Invoice.objects.filter(credited_invoice=invoice) assert credit_notes.count() == 2 assert Invoice.objects.get(pk=invoice.id).state == InvoiceState.REFUNDED @pytest.mark.django_db def test_create_credit_invoice_too_much(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) with pytest.raises(RuntimeError) as e: invoice.create_credit_invoice(amount=205) assert str(e.value) == 'Cannot credit more than invoice row amount' @pytest.mark.django_db def test_create_credit_invoice_too_much_already_credited(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice.create_credit_invoice(amount=100) assert invoice.outstanding_amount == Decimal(100) with pytest.raises(RuntimeError) as e: invoice.create_credit_invoice(amount=105) assert str(e.value) == 'Cannot credit more than total amount minus already credited amount' @pytest.mark.django_db def test_create_credit_invoice_full_already_credited_partly(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice.create_credit_invoice(amount=50) assert invoice.outstanding_amount == Decimal(150) credit_note = invoice.create_credit_invoice() assert credit_note.total_amount == Decimal(150) @pytest.mark.django_db def test_outstanding_amount_after_partial_payment(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(100), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(100) assert invoice.state == InvoiceState.OPEN @pytest.mark.django_db def test_outstanding_amount_after_one_full_payment(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(200), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(0) assert invoice.state == InvoiceState.PAID @pytest.mark.django_db def test_outstanding_amount_after_multiple_payments_partial(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(20), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(30), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(150) assert invoice.state == InvoiceState.OPEN @pytest.mark.django_db def test_outstanding_amount_after_multiple_payments_full(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=Decimal(200), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(100), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(100), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(0) assert invoice.state == InvoiceState.PAID @pytest.mark.django_db def test_outstanding_amount_with_collection_charge(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=None, collection_charge=Decimal(5), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(205) assert invoice.state == InvoiceState.OPEN @pytest.mark.django_db def test_outstanding_amount_with_collection_charge_one_payment(django_db_setup, lease_factory, contact_factory, invoice_factory, invoice_row_factory, invoice_payment_factory): lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1, ) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=7, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(200), billed_amount=Decimal(200), outstanding_amount=None, collection_charge=Decimal(5), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date ) receivable_type = ReceivableType.objects.get(pk=1) invoice_row_factory( invoice=invoice, receivable_type=receivable_type, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date, amount=Decimal(200), ) invoice_payment_factory( invoice=invoice, paid_amount=Decimal(100), paid_date=datetime.date(year=2018, month=1, day=1) ) invoice.update_amounts() assert invoice.outstanding_amount == Decimal(105) assert invoice.state == InvoiceState.OPEN @pytest.mark.django_db def test_calculate_penalty_amount(django_db_setup, lease_factory, contact_factory, invoice_factory): calculation_date = datetime.date(year=2018, month=9, day=6) lease = lease_factory(type_id=1, municipality_id=1, district_id=5, notice_period_id=1) contact = contact_factory(first_name="First name", last_name="Last name", type=ContactType.PERSON) billing_period_start_date = datetime.date(year=2017, month=1, day=1) billing_period_end_date = datetime.date(year=2017, month=12, day=31) invoice = invoice_factory( lease=lease, total_amount=Decimal(500), billed_amount=Decimal(500), outstanding_amount=Decimal(100), due_date=datetime.date(year=2017, month=1, day=1), recipient=contact, billing_period_start_date=billing_period_start_date, billing_period_end_date=billing_period_end_date ) penalty_interest_data = invoice.calculate_penalty_interest(calculation_date=calculation_date) assert penalty_interest_data['interest_start_date'] == datetime.date(year=2017, month=1, day=2) assert penalty_interest_data['interest_end_date'] == calculation_date assert penalty_interest_data['total_interest_amount'].compare(Decimal('11.76')) == 0 assert len(penalty_interest_data['interest_periods']) == 4 @pytest.mark.django_db def test_is_same_recipient_and_tenants(django_db_setup, invoices_test_data): assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2']) @pytest.mark.django_db def test_is_same_recipient_and_tenants_dict(django_db_setup, invoices_test_data): invoice_keys = [ 'type', 'lease', 'recipient', 'due_date', 'billing_period_start_date', 'billing_period_end_date', 'total_amount', 'billed_amount', 'state' ] invoice2_dict = {} for key in invoice_keys: invoice2_dict[key] = getattr(invoices_test_data['invoice2'], key) invoice2_dict['rows'] = [] invoice_row_keys = ['tenant', 'receivable_type', 'billing_period_start_date', 'billing_period_end_date', 'amount'] for row in invoices_test_data['invoice2'].rows.all(): invoice_row_dict = {} for key in invoice_row_keys: invoice_row_dict[key] = getattr(row, key) invoice2_dict['rows'].append(invoice_row_dict) assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoice2_dict) @pytest.mark.django_db def test_is_same_recipient_and_tenants2(django_db_setup, invoices_test_data): invoice_row = invoices_test_data['invoice2'].rows.first() invoice_row.tenant = invoices_test_data['tenant2'] invoice_row.save() assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2']) is False @pytest.mark.django_db def test_is_same_recipient_and_tenants3(django_db_setup, invoices_test_data, contact_factory): contact3 = contact_factory(first_name="First name 3", last_name="Last name 3", type=ContactType.PERSON) invoice1 = invoices_test_data['invoice1'] invoice1.recipient = contact3 invoice1.save() assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2']) is False @pytest.mark.django_db def test_is_same_recipient_and_tenants4(django_db_setup, invoices_test_data, contact_factory): assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2']) invoices_test_data['invoice1'].rows.all().delete() assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2']) is False invoices_test_data['invoice2'].rows.all().delete() assert invoices_test_data['invoice1'].is_same_recipient_and_tenants(invoices_test_data['invoice2'])
python
import json def is_string_or_unicode(s): """ Determine whether or not this object is a string or unicode. :param s: object :return: bool """ return isinstance(s, basestring) def is_json(s): """ Determine whether or not this object can be converted into JSON. :param s: object :return: bool """ if is_string_or_unicode(s): try: json.loads(s) return True except: pass return False
python
from errno import ENOENT class InvalidArchiveError(Exception): """Raised when libarchive can't open a file""" def __init__(self, fn, msg, *args, **kw): msg = ("Error with archive %s. You probably need to delete and re-download " "or re-create this file. Message from libarchive was:\n\n%s" % (fn, msg)) self.errno = ENOENT super(InvalidArchiveError, self).__init__(msg)
python
import sqlalchemy as sql import sqlalchemy.sql.functions as db_func from schools3.config.data import db_tables from sqlalchemy.dialects.postgresql import aggregate_order_by def get_student_data(grade_bounds): metadata = sql.MetaData() all_snapshots = db_tables.clean_all_snapshots_table hs_grade_gpa = get_students_grade_gpa().cte('hs_grade_gpa') inv_table = db_tables.clean_intervention_table get_ordered_array = lambda c, o : db_func.array_agg(aggregate_order_by(c, o)) discipline_incidents_rate = \ db_func.sum(all_snapshots.c.discipline_incidents) /\ db_func.count(sql.distinct(all_snapshots.c.school_year)) absenteeism_rate = db_func.sum(all_snapshots.c.days_absent) /\ db_func.count(sql.distinct(all_snapshots.c.school_year)) unexcused_absenteeism_rate = db_func.sum(all_snapshots.c.days_absent_unexcused) /\ db_func.count(sql.distinct(all_snapshots.c.school_year)) basic_info = sql.select([ all_snapshots.c.student_lookup, db_func.max(all_snapshots.c.gender).label('gender'), db_func.max(all_snapshots.c.ethnicity).label('ethnicity'), discipline_incidents_rate.label('discipline_incidents_rate'), absenteeism_rate.label('absenteeism_rate'), unexcused_absenteeism_rate.label('unexcused_absenteeism_rate'), db_func.array_agg(sql.distinct(all_snapshots.c.disability)).label('disabilities'), db_func.array_agg(sql.distinct(all_snapshots.c.disadvantagement)).label('disadvantagements'), db_func.array_agg(sql.distinct(all_snapshots.c.limited_english)).label('limited_english'), db_func.array_agg(sql.distinct(all_snapshots.c.special_ed)).label('special_ed'), db_func.max(all_snapshots.c.graduation_date).label('graduation_date'), get_ordered_array(all_snapshots.c.school_code, all_snapshots.c.grade).label('school_codes'), get_ordered_array(all_snapshots.c.school_name, all_snapshots.c.grade).label('school_names'), get_ordered_array(all_snapshots.c.grade, all_snapshots.c.grade).label('snapshots_grades'), get_ordered_array(all_snapshots.c.school_year, all_snapshots.c.grade).label('snapshots_school_years') ]).\ where( sql.and_( all_snapshots.c.grade >= grade_bounds[0], all_snapshots.c.grade <= grade_bounds[1] ) ).\ group_by( all_snapshots.c.student_lookup ).cte('basic_info') hs_gpa_info = sql.select([ hs_grade_gpa.c.student_lookup, get_ordered_array(hs_grade_gpa.c.gpa, hs_grade_gpa.c.grade).label('gpas'), get_ordered_array(hs_grade_gpa.c.grade, hs_grade_gpa.c.grade).label('hs_grades'), get_ordered_array(hs_grade_gpa.c.school_year, hs_grade_gpa.c.grade).label('hs_school_years'), get_ordered_array(hs_grade_gpa.c.num_classes, hs_grade_gpa.c.grade).label('num_classes') ]).where( sql.and_( hs_grade_gpa.c.grade >= grade_bounds[0], hs_grade_gpa.c.grade <= grade_bounds[1] ) ).group_by( hs_grade_gpa.c.student_lookup ).cte('hs_gpa_info') inv_info = sql.select([ inv_table.c.student_lookup, get_ordered_array(inv_table.c.inv_group, inv_table.c.grade).label('inv_groups'), get_ordered_array(inv_table.c.membership_code, inv_table.c.grade).label('membership_codes'), get_ordered_array(inv_table.c.grade, inv_table.c.grade).label('inv_grades'), get_ordered_array(inv_table.c.school_year, inv_table.c.grade).label('inv_school_years'), ]).where( sql.and_( inv_table.c.grade >= grade_bounds[0], inv_table.c.grade <= grade_bounds[1] ) ).group_by( inv_table.c.student_lookup ).cte('inv_info') labels = db_tables.sketch_temp_labels_table to_join = [basic_info, hs_gpa_info, inv_info, labels] joined = to_join[0] for i in range(1, len(to_join)): if i == 1: on_clause = (joined.c.student_lookup == to_join[i].c.student_lookup) else: on_clause = (joined.c[to_join[0].name +'_student_lookup'] == to_join[i].c.student_lookup) joined = sql.join( left=joined, right=to_join[i], onclause=on_clause, isouter=True ) cs = [] added_student_lookup = False for c in joined.c: if c.name == 'student_lookup': if not added_student_lookup: cs.append(c) added_student_lookup = True else: cs.append(c) return sql.select(cs).select_from(joined) def get_query_with_students(query, student_lookup_query): s = student_lookup_query.cte('s') student_lookups = sql.select([s.c.student_lookup]).cte('s_lookup') q = query.cte('query') joined = sql.join( student_lookups, q, onclause=(student_lookups.c.student_lookup == q.c.student_lookup), ) return sql.select( [student_lookups.c.student_lookup] + [c for c in q.c if c.name != 'student_lookup'] ).select_from( joined ) def get_students_grade_gpa(): ''' Returns a query that can returns a table with a "grade" column to the high_school_gpa table ''' high_school_gpa = db_tables.clean_high_school_gpa_table all_snapshots = db_tables.clean_all_snapshots_table left = sql.select([ all_snapshots.c.student_lookup, all_snapshots.c.grade, all_snapshots.c.school_year ]).\ where( sql.and_( all_snapshots.c.grade >= 9, all_snapshots.c.grade <= 12 ) ).alias('a') right = high_school_gpa.alias('b') joined = sql.join( left=left, right=right, onclause=sql.and_( left.c.student_lookup == right.c.student_lookup, left.c.school_year == right.c.school_year, ) ) return sql.select([ joined.c.a_student_lookup, joined.c.a_grade, joined.c.a_school_year, joined.c.b_gpa, joined.c.b_num_classes ]).\ select_from(joined).\ group_by(*list(joined.c)) def get_snapshot_students(cols=[], hs_only=True): assert isinstance(cols, list), 'cols must be a list' all_snapshots = db_tables.clean_all_snapshots_table select_cols = [ all_snapshots.c.student_lookup, all_snapshots.c.school_year, all_snapshots.c.grade ] + cols if hs_only: return sql.select( select_cols ).where( all_snapshots.c.grade >= 9 ) return sql.select( select_cols ) def get_labels(): labels_table = db_tables.sketch_temp_labels_table return sql.select([sql.distinct(labels_table.c.label)]) def get_students_with_label(label): labels_table = db_tables.sketch_temp_labels_table return \ sql.select( [labels_table.c.student_lookup] ).where( labels_table.c.label == label )
python
import sys sys.path.append("../") from appJar import gui with gui("FRAME DEMO", "250x150", bg='yellow') as app: with app.frame("LEFT", row=0, column=0, bg='blue', sticky='NEW', stretch='COLUMN'): app.label("Label on the left 1", bg='red') app.label("Label on the left 2", bg='orange') app.label("Label on the left 3", bg='yellow') with app.frame("RIGHT", row=0, column=1, bg='green', fg='white'): for x in range(5): app.radio("RADIO", "Choice " + str(x))
python
""" Copyright 2018 Skyscanner Ltd Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import boto3 from pycfmodel.model.resources.properties.policy import Policy class ManagedPolicyTransformer(object): """ Go through managed policie ARNs, fetch them and add them as regular policies so that they can be checked by the rules. """ def __init__(self, cf_model): self.cf_model = cf_model self.iam_client = boto3.client("iam") def transform_managed_policies(self): self.parse_fetch_update( self.cf_model.resources.get("AWS::IAM::Role", []), ) self.parse_fetch_update( self.cf_model.resources.get("AWS::IAM::Group", []), ) def parse_fetch_update(self, resources): for resource in resources: for managed_policy_arn in resource.managed_policy_arns: managed_policy = self.iam_client.get_policy( PolicyArn=managed_policy_arn, ) version_id = managed_policy.get("Policy", {}).get("DefaultVersionId") if not version_id: continue policy_version = self.iam_client.get_policy_version( PolicyArn=managed_policy_arn, VersionId=version_id, ) policy_document_json = { "PolicyDocument": policy_version["PolicyVersion"]["Document"], "PolicyName": "AutoTransformedManagedPolicy{}".format(version_id), } policy_document = Policy(policy_document_json) resource.policies.append(policy_document)
python
from mojo.roboFont import CurrentGlyph from plum import Plum Plum(CurrentGlyph()).toggle()
python
from collections import OrderedDict from sympy import symbols, Range from sympy import Tuple from sympde.topology import Mapping from sympde.topology import ScalarFunction from sympde.topology import SymbolicExpr from sympde.topology.space import element_of from sympde.topology.derivatives import _logical_partial_derivatives from psydac.pyccel.ast.core import IndexedVariable from psydac.pyccel.ast.core import For from psydac.pyccel.ast.core import Assign from psydac.pyccel.ast.core import Slice from psydac.pyccel.ast.core import FunctionDef from .basic import SplBasic from .utilities import build_pythran_types_header, variables from .utilities import build_pyccel_types_decorator from .utilities import rationalize_eval_mapping from .utilities import compute_atoms_expr_mapping from .utilities import compute_atoms_expr_field #============================================================================== # TODO move it def _create_loop(indices, ranges, body): dim = len(indices) for i in range(dim-1,-1,-1): rx = ranges[i] x = indices[i] start = rx.start end = rx.stop rx = Range(start, end) body = [For(x, rx, body)] return body #============================================================================== # NOTE: this is used in module 'psydac.api.ast.glt' class EvalArrayField(SplBasic): def __new__(cls, space, fields, boundary=None, name=None, boundary_basis=None, mapping=None, is_rational_mapping=None,backend=None): if not isinstance(fields, (tuple, list, Tuple)): raise TypeError('> Expecting an iterable') obj = SplBasic.__new__(cls, space, name=name, prefix='eval_field', mapping=mapping, is_rational_mapping=is_rational_mapping) obj._space = space obj._fields = Tuple(*fields) obj._boundary = boundary obj._boundary_basis = boundary_basis obj._backend = backend obj._func = obj._initialize() return obj @property def space(self): return self._space @property def fields(self): return self._fields @property def map_stmts(self): return self._map_stmts @property def boundary_basis(self): return self._boundary_basis @property def backend(self): return self._backend def build_arguments(self, data): other = data return self.basic_args + other def _initialize(self): space = self.space dim = space.ldim mapping = self.mapping field_atoms = self.fields.atoms(ScalarFunction) fields_str = sorted([SymbolicExpr(f).name for f in self.fields]) # ... declarations degrees = variables( 'p1:%s'%(dim+1), 'int') orders = variables( 'k1:%s'%(dim+1), 'int') indices_basis = variables( 'jl1:%s'%(dim+1), 'int') indices_quad = variables( 'g1:%s'%(dim+1), 'int') basis = variables('basis1:%s'%(dim+1), dtype='real', rank=3, cls=IndexedVariable) fields_coeffs = variables(['coeff_{}'.format(f) for f in field_atoms], dtype='real', rank=dim, cls=IndexedVariable) fields_val = variables(['{}_values'.format(f) for f in fields_str], dtype='real', rank=dim, cls=IndexedVariable) spans = variables( 'spans1:%s'%(dim+1), dtype = 'int', rank = 1, cls = IndexedVariable ) i_spans = variables( 'i_span1:%s'%(dim+1), 'int') # ... # ... ranges # we add the degree because of the padding ranges_basis = [Range(i_spans[i], i_spans[i]+degrees[i]+1) for i in range(dim)] ranges_quad = [Range(orders[i]) for i in range(dim)] # ... # ... basic arguments self._basic_args = (orders) # ... # ... body = [] updates = [] # ... # ... Nj = element_of(space, name='Nj') init_basis = OrderedDict() init_map = OrderedDict() inits, updates, map_stmts, fields = compute_atoms_expr_field(self.fields, indices_quad, indices_basis, basis, Nj, mapping=mapping) self._fields = fields for init in inits: basis_name = str(init.lhs) init_basis[basis_name] = init for stmt in map_stmts: init_map[str(stmt.lhs)] = stmt init_basis = OrderedDict(sorted(init_basis.items())) body += list(init_basis.values()) body += updates self._map_stmts = init_map # ... # put the body in tests for loops body = _create_loop(indices_basis, ranges_basis, body) # put the body in for loops of quadrature points assign_spans = [] for x, i_span, span in zip(indices_quad, i_spans, spans): assign_spans += [Assign(i_span, span[x])] body = assign_spans + body body = _create_loop(indices_quad, ranges_quad, body) # initialization of the matrix init_vals = [f[[Slice(None,None)]*dim] for f in fields_val] init_vals = [Assign(e, 0.0) for e in init_vals] body = init_vals + body func_args = self.build_arguments(degrees + spans + basis + fields_coeffs + fields_val) decorators = {} header = None if self.backend['name'] == 'pyccel': decorators = {'types': build_pyccel_types_decorator(func_args)} elif self.backend['name'] == 'numba': decorators = {'jit':[]} elif self.backend['name'] == 'pythran': header = build_pythran_types_header(self.name, func_args) return FunctionDef(self.name, list(func_args), [], body, decorators=decorators,header=header) #============================================================================== # NOTE: this is used in module 'psydac.api.ast.glt' class EvalArrayMapping(SplBasic): def __new__(cls, space, mapping, name=None, nderiv=1, is_rational_mapping=None, backend=None): if not isinstance(mapping, Mapping): raise TypeError('> Expecting a Mapping object') obj = SplBasic.__new__(cls, mapping, name=name, prefix='eval_mapping', mapping=mapping, is_rational_mapping=is_rational_mapping) obj._space = space obj._backend = backend dim = mapping.ldim # ... lcoords = ['x1', 'x2', 'x3'][:dim] obj._lcoords = symbols(lcoords) # ... # ... ops = _logical_partial_derivatives[:dim] M = mapping components = [M[i] for i in range(0, dim)] d_elements = {} d_elements[0] = list(components) if nderiv > 0: ls = [d(M[i]) for d in ops for i in range(0, dim)] d_elements[1] = ls if nderiv > 1: ls = [d1(d2(M[i])) for e,d1 in enumerate(ops) for d2 in ops[:e+1] for i in range(0, dim)] d_elements[2] = ls if nderiv > 2: raise NotImplementedError('TODO') elements = [i for l in d_elements.values() for i in l] obj._elements = tuple(elements) obj._d_elements = d_elements obj._components = tuple(components) obj._nderiv = nderiv # ... obj._func = obj._initialize() return obj @property def space(self): return self._space @property def nderiv(self): return self._nderiv @property def lcoords(self): return self._lcoords @property def elements(self): return self._elements @property def d_elements(self): return self._d_elements @property def components(self): return self._components @property def mapping_coeffs(self): return self._mapping_coeffs @property def mapping_values(self): return self._mapping_values @property def backend(self): return self._backend @property def weights(self): return self._weights def build_arguments(self, data): other = data return self.basic_args + other def _initialize(self): space = self.space dim = space.ldim mapping_atoms = [SymbolicExpr(f).name for f in self.components] mapping_str = [SymbolicExpr(f).name for f in self.elements ] # ... declarations degrees = variables( 'p1:%s'%(dim+1), 'int') orders = variables( 'k1:%s'%(dim+1), 'int') indices_basis = variables( 'jl1:%s'%(dim+1), 'int') indices_quad = variables( 'g1:%s'%(dim+1), 'int') basis = variables('basis1:%s'%(dim+1), dtype='real', rank=3, cls=IndexedVariable) mapping_coeffs = variables(['coeff_{}'.format(f) for f in mapping_atoms], dtype='real', rank=dim, cls=IndexedVariable) mapping_values = variables(['{}_values'.format(f) for f in mapping_str], dtype='real', rank=dim, cls=IndexedVariable) spans = variables( 'spans1:%s'%(dim+1), dtype = 'int', rank = 1, cls = IndexedVariable ) i_spans = variables( 'i_span1:%s'%(dim+1), 'int') # ... needed for area weights = variables('quad_w1:%s'%(dim+1), dtype='real', rank=1, cls=IndexedVariable) self._weights = weights # ... weights_elements = [] if self.is_rational_mapping: # TODO check if 'w' exist already weights_pts = element_of(self.space, name='w') weights_elements = [weights_pts] # ... nderiv = self.nderiv ops = _logical_partial_derivatives[:dim] if nderiv > 0: weights_elements += [d(weights_pts) for d in ops] if nderiv > 1: weights_elements += [d1(d2(weights_pts)) for e,d1 in enumerate(ops) for d2 in ops[:e+1]] if nderiv > 2: raise NotImplementedError('TODO') # ... mapping_weights_str = [SymbolicExpr(f).name for f in weights_elements] mapping_wvalues = variables(['{}_values'.format(f) for f in mapping_weights_str], dtype='real', rank=dim, cls=IndexedVariable) mapping_coeffs = mapping_coeffs + (IndexedVariable('coeff_w', dtype='real', rank=dim),) mapping_values = mapping_values + tuple(mapping_wvalues) weights_elements = tuple(weights_elements) # ... # ... ranges # we add the degree because of the padding ranges_basis = [Range(i_spans[i], i_spans[i]+degrees[i]+1) for i in range(dim)] ranges_quad = [Range(orders[i]) for i in range(dim)] # ... # ... basic arguments self._basic_args = (orders) # ... # ... self._mapping_coeffs = mapping_coeffs self._mapping_values = mapping_values # ... # ... Nj = element_of(space, name='Nj') body = [] init_basis = OrderedDict() atomic_exprs = self.elements + weights_elements inits, updates = compute_atoms_expr_mapping(atomic_exprs, indices_quad, indices_basis, basis, Nj) for init in inits: basis_name = str(init.lhs) init_basis[basis_name] = init init_basis = OrderedDict(sorted(init_basis.items())) body += list(init_basis.values()) body += updates # ... # put the body in tests for loops body = _create_loop(indices_basis, ranges_basis, body) if self.is_rational_mapping: stmts = rationalize_eval_mapping(self.mapping, self.nderiv, self.space, indices_quad) body += stmts assign_spans = [] for x, i_span, span in zip(indices_quad, i_spans, spans): assign_spans += [Assign(i_span, span[x])] body = assign_spans + body # put the body in for loops of quadrature points body = _create_loop(indices_quad, ranges_quad, body) # initialization of the matrix init_vals = [f[[Slice(None,None)]*dim] for f in mapping_values] init_vals = [Assign(e, 0.0) for e in init_vals] body = init_vals + body func_args = self.build_arguments(degrees + spans + basis + mapping_coeffs + mapping_values) decorators = {} header = None if self.backend['name'] == 'pyccel': decorators = {'types': build_pyccel_types_decorator(func_args)} elif self.backend['name'] == 'numba': decorators = {'jit':[]} elif self.backend['name'] == 'pythran': header = build_pythran_types_header(self.name, func_args) return FunctionDef(self.name, list(func_args), [], body, decorators=decorators,header=header)
python
import unittest from unittest import mock from stapy.sta.post import Post from stapy.sta.entity import Entity from stapy.sta.request import Request import stapy.sta.entities as ent class PostMock(object): def __init__(self, json_data, status_code): self.json_data = json_data self.status_code = status_code self.ok = status_code < 400 self.headers = {"location": "(1)"} def json(self): return self.json_data class TestAbstractRequestMethods(unittest.TestCase): def test_get_entity(self): self.assertEqual(Post.get_entity(Entity.Datastream), ent.Datastream) def test_cast_params(self): self.assertEqual({"Locations": 123}, Post.cast_params(location_id=123)) self.assertEqual({}, Post.cast_params(value=None)) with self.assertRaises(Exception): Post.cast_params(10) with self.assertRaises(Exception): Post.cast_params(xyz_id=10) @mock.patch("requests.post") def test_send_request(self, mocked_post): mocked_post.side_effect = Exception() with self.assertRaises(ValueError): Post.send_request(Request.POST, "", "") mocked_post.side_effect = None mocked_post.return_value = PostMock({"message": "test"}, 404) self.assertEqual(Post.send_request(Request.POST, "", ""), -1) mocked_post.return_value = PostMock({}, 404) self.assertEqual(Post.send_request(Request.POST, "", ""), -1) with self.assertRaises(Exception): Post.send_request(Request.DELETE, "", "") if __name__ == "__main__": unittest.main()
python
# Create your views here. from django.shortcuts import render, get_object_or_404 from django.http import HttpResponse, HttpResponseRedirect, JsonResponse from django.forms import ModelForm, modelformset_factory from django.urls import reverse from .models import Tweets, StreamFilters from Mining.twitter_miner import Twitter # Create twitter miner instance twitter = Twitter() twitter.connect_twitter() class FilterForm(ModelForm): class Meta: model = StreamFilters fields = ['tracks', 'locations', 'languages'] def index(request): tweet_list = Tweets.objects.order_by('-created_at')[:20] context = { 'tweet_list': tweet_list, } twitter.disconnet_from_stream() return render(request, 'news/index.html', context) def overview(request): # Load data with ajax if request.method == 'POST': # post_text = request.POST.get('text') # print(post_text) if not twitter.streaming: twitter.connect_to_stream(1) new = twitter.get_new_tweets() data = [{'tweet_id': item.tweet_id,'text': item.text, 'user_location': item.user_location, 'retweet': item.retweeted_status_id} for item in new] return JsonResponse({"list": data}) # Load page return render(request, 'news/stream.html') def tweet(request, tweetid): entry = get_object_or_404(Tweets, tweet_id=tweetid) return render(request, 'news/tweet.html', {'tweet': entry}) def filterView(request): form = modelformset_factory(StreamFilters, form=FilterForm) if request.method == 'POST': formset = form(request.POST, request.FILES) if formset.is_valid(): formset.save() return HttpResponseRedirect(reverse('news:index')) else: formset = form() return render(request, 'news/filter.html', {'formset': formset})
python
import math def quadratic(a, b, c): DT=b*b-4*a*c if DT<0: print('此方程无解') else : return (math.sqrt(DT)-b)/(2*a),(-math.sqrt(DT)-b/(2*a)) print(quadratic(1,3,2))
python
import sqlite3 conn = sqlite3.connect(":memory:") cur = conn.cursor() cur.execute("create table stocks (symbol text, shares integer, price real)") conn.commit()
python
# -*- coding: utf-8 -*- """ How do plugins work? There are a few patterns we use to "register" plugins with the core app. Entry Points 1. Plugins can use entry_points in the setup, pointing to "pioreactor.plugins" 2. Automations are defined by a subclassing the respective XXXAutomationContrib. There is a hook in this parent class that will add the subclass to XXXController, hence the Controller will know about it and be able to run it (as the module is loaded in pioreactor.__init__.py) 3. command-line additions, like background jobs, are found by searching the plugin's namespace for functions prepended with `click_`. Adding to ~/.pioreactor/plugins 1. Scripts placed in ~/.pioreactor/plugins are automagically loaded. The authors can add metadata to their file with the following variables at the highest level in the file: __plugin_name__ __plugin_author__ __plugin_summary__ __plugin_version__ __plugin_homepage__ """ from __future__ import annotations import glob import importlib import os import pathlib import sys from importlib.metadata import entry_points from importlib.metadata import metadata from typing import Any from msgspec import Struct from .install_plugin import click_install_plugin from .list_plugins import click_list_plugins from .uninstall_plugin import click_uninstall_plugin from pioreactor.whoami import is_testing_env class Plugin(Struct): module: Any description: str version: str homepage: str author: str source: str def get_plugins() -> dict[str, Plugin]: """ This function is really time consuming... """ # get entry point plugins # Users can use Python's entry point system to create rich plugins, see # example here: https://github.com/Pioreactor/pioreactor-air-bubbler eps = entry_points() pioreactor_plugins: tuple = eps.get("pioreactor.plugins", tuple()) plugins: dict[str, Plugin] = {} for plugin in pioreactor_plugins: try: md = metadata(plugin.name) plugins[md["Name"]] = Plugin( plugin.load(), md["Summary"], md["Version"], md["Home-page"], md["Author"], "entry_points", ) except Exception as e: print(f"{plugin.name} plugin load error: {e}") # get file-based plugins. # Users can put .py files into the MODULE_DIR folder below. # The below code will load it into Python, and treat it like any other plugin. # The authors can add metadata to their file with the following variables at the # highest level in the file: # __plugin_name__ # __plugin_author__ # __plugin_summary__ # __plugin_version__ # __plugin_homepage__ BLANK = "Unknown" # The directory containing your modules needs to be on the search path. if is_testing_env(): MODULE_DIR = "plugins_dev" else: MODULE_DIR = "/home/pioreactor/.pioreactor/plugins" sys.path.append(MODULE_DIR) # Get the stem names (file name, without directory and '.py') of any # python files in your directory, load each module by name and run # the required function. py_files = glob.glob(os.path.join(MODULE_DIR, "*.py")) for py_file in py_files: module_name = pathlib.Path(py_file).stem module = importlib.import_module(module_name) plugins[getattr(module, "__plugin_name__", module_name)] = Plugin( module, getattr(module, "__plugin_summary__", BLANK), getattr(module, "__plugin_version__", BLANK), getattr(module, "__plugin_homepage__", BLANK), getattr(module, "__plugin_author__", BLANK), "plugins_folder", ) return plugins __all__ = ( "click_uninstall_plugin", "click_install_plugin", "click_list_plugins", "get_plugins", )
python
from django.contrib.auth.decorators import login_required from django.http import HttpResponse from django.urls import include, path from django.contrib.auth import views as auth_views from django.contrib import admin urlpatterns = [ path('', lambda request: HttpResponse("Hello World", content_type="text/plain")), path('login', auth_views.LoginView.as_view(template_name='admin/login.html')), path('admin/', admin.site.urls), path('profile', login_required(lambda request: HttpResponse(request.user.username, content_type="text/plain"))), ]
python
""" Bulky data structures for assertion in pyteomics test suites. """ import numpy as np from copy import deepcopy import sys from pyteomics.auxiliary import basestring # http://stackoverflow.com/q/14246983/1258041 class ComparableArray(np.ndarray): def __eq__(self, other): if not isinstance(other, np.ndarray): return False other = np.asarray(other, dtype=np.float) return self.shape == other.shape and np.allclose(self, other) def makeCA(arr): if not isinstance(arr, np.ndarray): arr = np.array(arr) return ComparableArray(arr.shape, arr.dtype, arr) pepxml_results = [ {'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0100.0100.1', 'end_scan': 100, 'start_scan': 100, 'index': 1, 'assumed_charge': 1, 'precursor_neutral_mass': 860.392, 'search_hit': [{ 'num_missed_cleavages': 0, 'tot_num_ions': 12, 'is_rejected': False, 'search_score': { 'deltacn': 0.081, 'sprank': 1.0, 'deltacnstar': 0.0, 'spscore': 894.0, 'xcorr': 1.553}, 'hit_rank': 1, 'num_matched_ions': 11, 'num_tot_proteins': 1, 'peptide': 'SLNGEWR', 'massdiff': -0.5, 'analysis_result': [{'analysis': 'peptideprophet', 'peptideprophet_result': {'all_ntt_prob': [0.0422, 0.509, 0.96], 'parameter': {'fval': 1.4723, 'massd': -0.5, 'nmc': 0.0, 'ntt': 2.0}, 'probability': 0.96}}], 'modifications': [], 'modified_peptide': 'SLNGEWR', 'proteins': [{'num_tol_term': 2, 'protein': 'sp|P00722|BGAL_ECOLI', 'peptide_prev_aa': 'R', 'protein_descr': 'BETA-GALACTOSIDASE (EC 3.2.1.23) ' '(LACTASE) - Escherichia coli.', 'peptide_next_aa': 'F'}], 'calc_neutral_pep_mass': 860.892}]}, {'precursor_neutral_mass': 677.392, 'spectrum': 'pps_sl20060731_18mix_25ul_r1_1154456409.0040.0040.1', 'start_scan': 40, 'assumed_charge': 1, 'index': 2, 'end_scan': 40, 'search_hit': [{'tot_num_ions': 10, 'num_missed_cleavages': 1, 'is_rejected': False, 'hit_rank': 1, 'num_matched_ions': 8, 'search_score': { 'sprank': 1.0, 'deltacn': 0.165, 'deltacnstar': 0.0, 'spscore': 427.0, 'xcorr': 1.644}, 'num_tot_proteins': 1, 'peptide': 'GKKFAK', 'massdiff': -0.5, 'analysis_result': [{'analysis': 'peptideprophet', 'peptideprophet_result': { 'all_ntt_prob': [0.0491, 0.548, 0.9656], 'parameter': { 'fval': 2.0779, 'massd': -0.5, 'nmc': 1.0, 'ntt': 1.0}, 'probability': 0.548}}], 'modifications': [], 'modified_peptide': 'GKKFAK', 'proteins': [{'num_tol_term': 1, 'protein': 'gi|3212198|gb|AAC22319.1|', 'peptide_prev_aa': 'N', 'protein_descr': 'hemoglobin-binding protein ' '[Haemophilus influenzae Rd]', 'peptide_next_aa': 'I'}], 'calc_neutral_pep_mass': 677.892}]}, {'assumed_charge': 2, 'end_scan': 1366, 'index': 29, 'precursor_neutral_mass': 718.4136, 'retention_time_sec': 38.426123, 'search_hit': [{'calc_neutral_pep_mass': 718.4126, 'search_score': { 'expect': 0.0, 'homologyscore': 46.61, 'identityscore': 25.38, 'star': 0.0, 'ionscore': 36.45}, 'hit_rank': 1, 'is_rejected': False, 'massdiff': 0.0011, 'modifications': [], 'modified_peptide': 'VGQFIR', 'num_matched_ions': 5, 'num_missed_cleavages': 0, 'num_tot_proteins': 1, 'peptide': 'VGQFIR', 'analysis_result': [{'analysis': 'peptideprophet', 'peptideprophet_result': {'all_ntt_prob': [0., 0.5741, 0.7264], 'parameter': { 'fval': 0.6052, 'massd': 0.001, 'nmc': 0.0, 'ntt': 2.0}, 'probability': 0.7264}}], 'proteins': [{'num_tol_term': 2, 'peptide_next_aa': 'L', 'peptide_prev_aa': 'K', 'protein': 'IPI00200898', 'protein_descr': None}], 'tot_num_ions': 10}], 'spectrum': 'MASCOT', 'start_scan': 1366}, {'assumed_charge': 2, 'end_scan': 6862, 'index': 49, 'precursor_neutral_mass': 1404.7476, 'search_hit': [{'search_score': { 'bscore': 2.0, 'expect': 0.012, 'nextscore': 14.6, 'hyperscore': 23.5, 'yscore': 8.7}, 'calc_neutral_pep_mass': 1404.7435, 'hit_rank': 1, 'is_rejected': False, 'massdiff': 0.004, 'modifications': [{'mass': 1.0079, 'position': 0}, {'mass': 147.0354, 'position': 10}, {'mass': 17.0031, 'position': 13}], 'modified_peptide': 'EVPLNTIIFM[147]GR', 'num_matched_ions': 8, 'num_missed_cleavages': 0, 'num_tot_proteins': 2, 'peptide': 'EVPLNTIIFMGR', 'proteins': [{'num_tol_term': 2, 'peptide_next_aa': 'V', 'peptide_prev_aa': 'R', 'protein': 'sp|P01008|ANT3_HUMAN', 'protein_descr': 'Antithrombin-III OS=Homo sapiens GN=SERPINC1 PE=1 SV=1'}, {'num_tol_term': 2, 'protein': 'tr|Q8TCE1|Q8TCE1_HUMAN', 'protein_descr': 'SERPINC1 protein OS=Homo sapiens GN=SERPINC1 PE=2 SV=1'}], 'tot_num_ions': 22}], 'spectrum': 'X!Tandem', 'start_scan': 6862}, {'assumed_charge': 3, 'end_scan': 23, 'index': 3, 'precursor_neutral_mass': 3254.044921875, 'search_hit': [{'calc_neutral_pep_mass': 3254.04711914062, 'search_score': { 'expect': 13690.946579388728, 'pvalue': 59.52585469299447}, 'hit_rank': 1, 'is_rejected': False, 'massdiff': -0.002197265625, 'modifications': [{'mass': 166.99803, 'position': 6}, {'mass': 166.99803, 'position': 7}, {'mass': 166.99803, 'position': 9}, {'mass': 160.03019, 'position': 15}, {'mass': 160.03019, 'position': 21}], 'modified_peptide': 'DQQFDS[166]S[166]SS[166]MALEDCGEETNCQSDFK', 'num_matched_ions': 3, 'num_tot_proteins': 1, 'peptide': 'DQQFDSSSSMALEDCGEETNCQSDFK', 'proteins': [{'num_tol_term': 0, 'peptide_next_aa': 'I', 'peptide_prev_aa': 'R', 'protein': 'BL_ORD_ID:125453', 'protein_descr': 'sp|O43149|ZZEF1_HUMAN Zinc finger ZZ-type and EF-hand domain-containing protein 1 OS=Homo sapiens GN=ZZEF1 PE=1 SV=6:reversed'}], 'tot_num_ions': 50}, {'calc_neutral_pep_mass': 3254.04711914062, 'search_score': {'expect': 14837.682803311733, 'pvalue': 64.51166436222492}, 'hit_rank': 2, 'is_rejected': False, 'massdiff': -0.002197265625, 'modifications': [{'mass': 243.02933, 'position': 6}, {'mass': 170.10596, 'position': 8}, {'mass': 181.01368, 'position': 11}, {'mass': 181.01368, 'position': 13}, {'mass': 181.01368, 'position': 18}, {'mass': 181.01368, 'position': 21}, {'mass': 160.03019, 'position': 1}, {'mass': 160.03019, 'position': 4}], 'modified_peptide': 'CENCNY[243]PK[170]EGT[181]HT[181]NQHET[181]LHT[181]SR', 'num_matched_ions': 6, 'num_tot_proteins': 2, 'peptide': 'CENCNYPKEGTHTNQHETLHTSR', 'proteins': [{'num_tol_term': 0, 'peptide_next_aa': 'S', 'peptide_prev_aa': 'R', 'protein': 'BL_ORD_ID:144314', 'protein_descr': 'tr|Q6ZND3|Q6ZND3_HUMAN Zinc finger protein 184 OS=Homo sapiens GN=ZNF184 PE=2 SV=1:reversed'}, {'protein': 'BL_ORD_ID:154629', 'protein_descr': 'sp|Q99676|ZN184_HUMAN Zinc finger protein 184 OS=Homo sapiens GN=ZNF184 PE=1 SV=4:reversed'}], 'tot_num_ions': 44}], 'spectrum': '"Cmpd 24, +MSn(1085.6886), 1.2 min.23.23.3"', 'start_scan': 23}] mzid_spectra = {(False, False): [{'id': 'SEQ_spec1', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=1'}, {'id': 'SEQ_spec2a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=2'}, {'id': 'SEQ_spec3a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=3'}, {'id': 'SEQ_spec10', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=10'}, {'id': 'SEQ_spec11a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=11'}, {'id': 'SEQ_spec12', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=12'}, {'id': 'SEQ_spec13', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=13'}, {'id': 'SEQ_spec15', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=15'}, {'id': 'SEQ_spec20', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=20'}, {'id': 'Mas_spec2b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=2'}, {'id': 'Mas_spec3b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=3'}, {'id': 'Mas_spec4', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=4'}, {'id': 'Mas_spec6', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=6'}, {'id': 'Mas_spec11b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=11'}, {'id': 'Mas_spec12', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=12'}, {'id': 'Mas_spec35', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=35'}, {'id': 'Mas_spec36b1', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=36'}, {'id': 'Mas_spec40', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=40'}], (False, True): [{'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=1'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=2'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=3'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=10'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=11'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=12'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=13'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=15'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=20'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=2'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=3'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=4'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=6'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=11'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=12'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=35'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=36'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=40'}], (True, False): [{'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec1_pep1'}], 'ProteinScape:IntensityCoverage': 0.3919545603809718, 'ProteinScape:SequestMetaScore': 7.59488518903425, 'calculatedMassToCharge': 1507.695, 'chargeState': 1, 'experimentalMassToCharge': 1507.696, 'id': 'SEQ_spec1_pep1', 'passThreshold': True, 'peptide_ref': 'prot1_pep1', 'rank': 1}], 'id': 'SEQ_spec1', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=1'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec2a_pep1'}], 'ProteinScape:IntensityCoverage': 0.5070386909133888, 'ProteinScape:SequestMetaScore': 10.8810331335713, 'calculatedMassToCharge': 1920.9224, 'chargeState': 1, 'experimentalMassToCharge': 1920.923, 'id': 'SEQ_spec2a_pep1', 'passThreshold': True, 'peptide_ref': 'prot1_pep2', 'rank': 1}], 'id': 'SEQ_spec2a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=2'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec3a_pep1'}], 'ProteinScape:IntensityCoverage': 0.43376827663349576, 'ProteinScape:SequestMetaScore': 6.1021771936508955, 'calculatedMassToCharge': 864.4752, 'chargeState': 1, 'experimentalMassToCharge': 864.474, 'id': 'SEQ_spec3a_pep1', 'passThreshold': True, 'peptide_ref': 'prot1_pep3', 'rank': 1}], 'id': 'SEQ_spec3a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=3'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec10_pep1'}], 'ProteinScape:IntensityCoverage': 0.16164593872706742, 'ProteinScape:SequestMetaScore': 5.635013787097159, 'calculatedMassToCharge': 1832.862115, 'chargeState': 1, 'experimentalMassToCharge': 1832.863, 'id': 'SEQ_spec10_pep1', 'passThreshold': True, 'peptide_ref': 'prot1_pep4', 'rank': 1}], 'id': 'SEQ_spec10', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=10'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec11a_pep1'}], 'ProteinScape:IntensityCoverage': 0.6146634530945828, 'ProteinScape:SequestMetaScore': 10.17510605321669, 'calculatedMassToCharge': 911.4144, 'chargeState': 1, 'experimentalMassToCharge': 911.413, 'id': 'SEQ_spec11a_pep1', 'passThreshold': True, 'peptide_ref': 'prot2_pep1', 'rank': 1}, {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec11a_pep2'}], 'ProteinScape:IntensityCoverage': 0.2517734933944088, 'ProteinScape:SequestMetaScore': 6.005532583410669, 'calculatedMassToCharge': 1365.722015, 'chargeState': 1, 'experimentalMassToCharge': 1365.721, 'id': 'SEQ_spec11a_pep2', 'passThreshold': True, 'peptide_ref': 'prot3_pep1', 'rank': 2}], 'id': 'SEQ_spec11a', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=11'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec12_pep1'}], 'ProteinScape:IntensityCoverage': 0.4884754815768041, 'ProteinScape:SequestMetaScore': 12.042955809241318, 'calculatedMassToCharge': 2255.9515, 'chargeState': 1, 'experimentalMassToCharge': 2255.95, 'id': 'SEQ_spec12_pep1', 'passThreshold': True, 'peptide_ref': 'prot3_pep2', 'rank': 1}, {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec12_pep2'}], 'ProteinScape:IntensityCoverage': 0.554279316913958, 'ProteinScape:SequestMetaScore': 3.1184106313104283, 'calculatedMassToCharge': 3941.036315, 'chargeState': 1, 'experimentalMassToCharge': 3941.081, 'id': 'SEQ_spec12_pep2', 'passThreshold': True, 'peptide_ref': 'prot2_pep2', 'rank': 2}], 'id': 'SEQ_spec12', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=12'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec13_pep1'}], 'ProteinScape:IntensityCoverage': 0.39717937427768873, 'ProteinScape:SequestMetaScore': 4.159878401845841, 'calculatedMassToCharge': 911.4144, 'chargeState': 1, 'experimentalMassToCharge': 911.415, 'id': 'SEQ_spec13_pep1', 'passThreshold': True, 'peptide_ref': 'prot2_pep1', 'rank': 1}, {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec13_pep2'}], 'ProteinScape:IntensityCoverage': 0.136423966822031, 'ProteinScape:SequestMetaScore': 5.725397508852668, 'calculatedMassToCharge': 2192.932715, 'chargeState': 1, 'experimentalMassToCharge': 2192.9, 'id': 'SEQ_spec13_pep2', 'passThreshold': True, 'peptide_ref': 'prot3_pep3', 'rank': 2}], 'id': 'SEQ_spec13', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=13'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec15_pep1'}], 'ProteinScape:IntensityCoverage': 0.2854129700126088, 'ProteinScape:SequestMetaScore': 6.181682868401155, 'calculatedMassToCharge': 1469.8071, 'chargeState': 1, 'experimentalMassToCharge': 1469.806, 'id': 'SEQ_spec15_pep1', 'passThreshold': True, 'peptide_ref': 'prot4_pep1', 'rank': 1}], 'id': 'SEQ_spec15', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=15'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_SEQ_spec20_pep1'}], 'ProteinScape:IntensityCoverage': 0.29049959198538566, 'ProteinScape:SequestMetaScore': 6.669916225794168, 'calculatedMassToCharge': 1225.6059, 'chargeState': 1, 'experimentalMassToCharge': 1225.604, 'id': 'SEQ_spec20_pep1', 'passThreshold': True, 'peptide_ref': 'prot4_pep2', 'rank': 1}], 'id': 'SEQ_spec20', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=20'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec2b_pep1'}], 'calculatedMassToCharge': 2035.0745, 'chargeState': 1, 'experimentalMassToCharge': 2035.075, 'id': 'Mas_spec2b_pep1', 'passThreshold': True, 'peptide_ref': 'prot5_pep1', 'rank': 1}], 'id': 'Mas_spec2b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=2'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec3b_pep1'}], 'calculatedMassToCharge': 1834.8856, 'chargeState': 1, 'experimentalMassToCharge': 1834.884, 'id': 'Mas_spec3b_pep1', 'passThreshold': True, 'peptide_ref': 'prot5_pep2', 'rank': 1}], 'id': 'Mas_spec3b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=3'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec4_pep1'}], 'calculatedMassToCharge': 1097.5049, 'chargeState': 1, 'experimentalMassToCharge': 1097.503, 'id': 'Mas_spec4_pep1', 'passThreshold': True, 'peptide_ref': 'prot5_pep3', 'rank': 1}], 'id': 'Mas_spec4', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=4'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec6_pep1'}], 'calculatedMassToCharge': 975.4457, 'chargeState': 1, 'experimentalMassToCharge': 975.446, 'id': 'Mas_spec6_pep1', 'passThreshold': True, 'peptide_ref': 'prot6_pep1', 'rank': 1}, {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec6_pep2'}], 'calculatedMassToCharge': 912.3993, 'chargeState': 1, 'experimentalMassToCharge': 912.29, 'id': 'Mas_spec6_pep2', 'passThreshold': True, 'peptide_ref': 'prot7_pep1', 'rank': 1}], 'id': 'Mas_spec6', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=6'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec11b_pep1'}], 'ProteinScape:IntensityCoverage': 0.0, 'ProteinScape:MascotScore': 33.82, 'calculatedMassToCharge': 1365.722015, 'chargeState': 1, 'experimentalMassToCharge': 1365.721, 'id': 'Mas_spec11b_pep1', 'passThreshold': True, 'peptide_ref': 'prot3_pep1', 'rank': 1}], 'id': 'Mas_spec11b', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=11'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec12_pep1'}], 'ProteinScape:IntensityCoverage': 0.0, 'ProteinScape:MascotScore': 39.0, 'calculatedMassToCharge': 2256.9515, 'chargeState': 1, 'experimentalMassToCharge': 2256.952, 'id': 'Mas_spec12_pep1', 'passThreshold': True, 'peptide_ref': 'prot3_pep2', 'rank': 1}], 'id': 'Mas_spec12', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=12'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec35_pep1'}], 'calculatedMassToCharge': 2261.0939, 'chargeState': 1, 'experimentalMassToCharge': 2261.092, 'id': 'Mas_spec35_pep1', 'passThreshold': True, 'peptide_ref': 'prot6_pep2', 'rank': 1}], 'id': 'Mas_spec35', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=35'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec36b1_pep1'}], 'calculatedMassToCharge': 1115.6168, 'chargeState': 1, 'experimentalMassToCharge': 1115.617, 'id': 'Mas_spec36b1_pep1', 'passThreshold': True, 'peptide_ref': 'prot7_pep2', 'rank': 1}], 'id': 'Mas_spec36b1', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=36'}, {'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'peptideEvidence_ref': 'PE1_Mas_spec40_pep1'}], 'calculatedMassToCharge': 2035.0017, 'chargeState': 1, 'experimentalMassToCharge': 2035.002, 'id': 'Mas_spec40_pep1', 'passThreshold': True, 'peptide_ref': 'prot7_pep3', 'rank': 1}], 'id': 'Mas_spec40', 'spectraData_ref': 'LCMALDI_spectra', 'spectrumID': 'databasekey=40'}], (True, True): [{'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'AGTQIENIDEDFR', 'Seq': 'MVDYHAANQSYQYGPSSAGNGAGGGGSMGDYMAQEDDWDRDLLLDPAWEKQQRKTFTAWCNSHLRKAGTQIENIDEDFRDGLKLMLLLEVISGERLPKPERGKMRVHKINNVNKALDFIASKGVKLVSIGAEEIVDGNAKMTLGMIWTIILRFAIQDISVEETSAKEGLLLWCQRKTAPYKNVNVQNFHISWKDGLAFNALIHRHRPELIEYDKLRKDDPVTNLNNAFEVAEKYLDIPKMLDAEDIVNTARPDEKAIMTYVSSFYHAFSGAQKAETAANRICKVLAVNQENEHLMEDYEKLASDLLEWIRRTIPWLEDRVPQKTIQEMQQKLEDFRDYRRVHKPPKVQEKCQLEINFNTLQTKLRLSNRPAFMPSEGKMVSDINNGWQHLEQAEKGYEEWLLNEIRRLERLDHLAEKFRQKASIHEAWTDGKEAMLKHRDYETATLSDIKALIRKHEAFESDLAAHQDRVEQIAAIAQELNELDYYDSHNVNTRCQKICDQWDALGSLTHSRREALEKTEKQLEAIDQLHLEYAKRAAPFNNWMESAMEDLQDMFIVHTIEEIEGLISAHDQFKSTLPDADREREAILAIHKEAQRIAESNHIKLSGSNPYTTVTPQIINSKWEKVQQLVPKRDHALLEEQSKQQSNEHLRRQFASQANVVGPWIQTKMEEIGRISIEMNGTLEDQLSHLKQYERSIVDYKPNLDLLEQQHQLIQEALIFDNKHTNYTMEHIRVGWEQLLTTIARTINEVENQILTRDAKGISQEQMQEFRASFNHFDKDHGGALGPEEFKACLISLGYDVENDRQGEAEFNRIMSLVDPNHSGLVTFQAFIDFMSRETTDTDTADQVIASFKVLAGDKNFITAEELRRELPPDQAEYCIARMAPYQGPDAVPGALDYKSFSTALYGESDL', 'accession': 'IPI00013808.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 79, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'D', 'pre': 'K', 'protein description': 'IPI:IPI00013808.1|SWISS-PROT:O43707|TREMBL:Q96BG6|ENSEMBL:ENSP00000252699|REFSEQ:NP_004915|H-INV:HIT000032172|VEGA:OTTHUMP00000076071;OTTHUMP00000174445 Tax_Id=9606 Gene_Symbol=ACTN4 Alpha-actinin-4', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 67, 'version': '3.15'}], 'PeptideSequence': 'AGTQIENIDEDFR', 'ProteinScape:IntensityCoverage': 0.3919545603809718, 'ProteinScape:SequestMetaScore': 7.59488518903425, 'calculatedMassToCharge': 1507.695, 'chargeState': 1, 'experimentalMassToCharge': 1507.696, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=1'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'AGTQIENIDEDFRDGLK', 'Seq': 'MVDYHAANQSYQYGPSSAGNGAGGGGSMGDYMAQEDDWDRDLLLDPAWEKQQRKTFTAWCNSHLRKAGTQIENIDEDFRDGLKLMLLLEVISGERLPKPERGKMRVHKINNVNKALDFIASKGVKLVSIGAEEIVDGNAKMTLGMIWTIILRFAIQDISVEETSAKEGLLLWCQRKTAPYKNVNVQNFHISWKDGLAFNALIHRHRPELIEYDKLRKDDPVTNLNNAFEVAEKYLDIPKMLDAEDIVNTARPDEKAIMTYVSSFYHAFSGAQKAETAANRICKVLAVNQENEHLMEDYEKLASDLLEWIRRTIPWLEDRVPQKTIQEMQQKLEDFRDYRRVHKPPKVQEKCQLEINFNTLQTKLRLSNRPAFMPSEGKMVSDINNGWQHLEQAEKGYEEWLLNEIRRLERLDHLAEKFRQKASIHEAWTDGKEAMLKHRDYETATLSDIKALIRKHEAFESDLAAHQDRVEQIAAIAQELNELDYYDSHNVNTRCQKICDQWDALGSLTHSRREALEKTEKQLEAIDQLHLEYAKRAAPFNNWMESAMEDLQDMFIVHTIEEIEGLISAHDQFKSTLPDADREREAILAIHKEAQRIAESNHIKLSGSNPYTTVTPQIINSKWEKVQQLVPKRDHALLEEQSKQQSNEHLRRQFASQANVVGPWIQTKMEEIGRISIEMNGTLEDQLSHLKQYERSIVDYKPNLDLLEQQHQLIQEALIFDNKHTNYTMEHIRVGWEQLLTTIARTINEVENQILTRDAKGISQEQMQEFRASFNHFDKDHGGALGPEEFKACLISLGYDVENDRQGEAEFNRIMSLVDPNHSGLVTFQAFIDFMSRETTDTDTADQVIASFKVLAGDKNFITAEELRRELPPDQAEYCIARMAPYQGPDAVPGALDYKSFSTALYGESDL', 'accession': 'IPI00013808.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 83, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'L', 'pre': 'K', 'protein description': 'IPI:IPI00013808.1|SWISS-PROT:O43707|TREMBL:Q96BG6|ENSEMBL:ENSP00000252699|REFSEQ:NP_004915|H-INV:HIT000032172|VEGA:OTTHUMP00000076071;OTTHUMP00000174445 Tax_Id=9606 Gene_Symbol=ACTN4 Alpha-actinin-4', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 67, 'version': '3.15'}], 'PeptideSequence': 'AGTQIENIDEDFRDGLK', 'ProteinScape:IntensityCoverage': 0.5070386909133888, 'ProteinScape:SequestMetaScore': 10.8810331335713, 'calculatedMassToCharge': 1920.9224, 'chargeState': 1, 'experimentalMassToCharge': 1920.923, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=2'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'ALDFIASK', 'Seq': 'MVDYHAANQSYQYGPSSAGNGAGGGGSMGDYMAQEDDWDRDLLLDPAWEKQQRKTFTAWCNSHLRKAGTQIENIDEDFRDGLKLMLLLEVISGERLPKPERGKMRVHKINNVNKALDFIASKGVKLVSIGAEEIVDGNAKMTLGMIWTIILRFAIQDISVEETSAKEGLLLWCQRKTAPYKNVNVQNFHISWKDGLAFNALIHRHRPELIEYDKLRKDDPVTNLNNAFEVAEKYLDIPKMLDAEDIVNTARPDEKAIMTYVSSFYHAFSGAQKAETAANRICKVLAVNQENEHLMEDYEKLASDLLEWIRRTIPWLEDRVPQKTIQEMQQKLEDFRDYRRVHKPPKVQEKCQLEINFNTLQTKLRLSNRPAFMPSEGKMVSDINNGWQHLEQAEKGYEEWLLNEIRRLERLDHLAEKFRQKASIHEAWTDGKEAMLKHRDYETATLSDIKALIRKHEAFESDLAAHQDRVEQIAAIAQELNELDYYDSHNVNTRCQKICDQWDALGSLTHSRREALEKTEKQLEAIDQLHLEYAKRAAPFNNWMESAMEDLQDMFIVHTIEEIEGLISAHDQFKSTLPDADREREAILAIHKEAQRIAESNHIKLSGSNPYTTVTPQIINSKWEKVQQLVPKRDHALLEEQSKQQSNEHLRRQFASQANVVGPWIQTKMEEIGRISIEMNGTLEDQLSHLKQYERSIVDYKPNLDLLEQQHQLIQEALIFDNKHTNYTMEHIRVGWEQLLTTIARTINEVENQILTRDAKGISQEQMQEFRASFNHFDKDHGGALGPEEFKACLISLGYDVENDRQGEAEFNRIMSLVDPNHSGLVTFQAFIDFMSRETTDTDTADQVIASFKVLAGDKNFITAEELRRELPPDQAEYCIARMAPYQGPDAVPGALDYKSFSTALYGESDL', 'accession': 'IPI00013808.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 122, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'G', 'pre': 'K', 'protein description': 'IPI:IPI00013808.1|SWISS-PROT:O43707|TREMBL:Q96BG6|ENSEMBL:ENSP00000252699|REFSEQ:NP_004915|H-INV:HIT000032172|VEGA:OTTHUMP00000076071;OTTHUMP00000174445 Tax_Id=9606 Gene_Symbol=ACTN4 Alpha-actinin-4', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 115, 'version': '3.15'}], 'PeptideSequence': 'ALDFIASK', 'ProteinScape:IntensityCoverage': 0.43376827663349576, 'ProteinScape:SequestMetaScore': 6.1021771936508955, 'calculatedMassToCharge': 864.4752, 'chargeState': 1, 'experimentalMassToCharge': 864.474, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=3'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [{'Modification': [{'location': 1, "name": "Oxidation"}], 'PeptideEvidenceRef': [ {'DatabaseName': {'database IPI_human': ''}, 'Modification': [ {'location': 1, "name": "Oxidation"}], 'PeptideSequence': 'MLDAEDIVNTARPDEK', 'Seq': 'MVDYHAANQSYQYGPSSAGNGAGGGGSMGDYMAQEDDWDRDLLLDPAWEKQQRKTFTAWCNSHLRKAGTQIENIDEDFRDGLKLMLLLEVISGERLPKPERGKMRVHKINNVNKALDFIASKGVKLVSIGAEEIVDGNAKMTLGMIWTIILRFAIQDISVEETSAKEGLLLWCQRKTAPYKNVNVQNFHISWKDGLAFNALIHRHRPELIEYDKLRKDDPVTNLNNAFEVAEKYLDIPKMLDAEDIVNTARPDEKAIMTYVSSFYHAFSGAQKAETAANRICKVLAVNQENEHLMEDYEKLASDLLEWIRRTIPWLEDRVPQKTIQEMQQKLEDFRDYRRVHKPPKVQEKCQLEINFNTLQTKLRLSNRPAFMPSEGKMVSDINNGWQHLEQAEKGYEEWLLNEIRRLERLDHLAEKFRQKASIHEAWTDGKEAMLKHRDYETATLSDIKALIRKHEAFESDLAAHQDRVEQIAAIAQELNELDYYDSHNVNTRCQKICDQWDALGSLTHSRREALEKTEKQLEAIDQLHLEYAKRAAPFNNWMESAMEDLQDMFIVHTIEEIEGLISAHDQFKSTLPDADREREAILAIHKEAQRIAESNHIKLSGSNPYTTVTPQIINSKWEKVQQLVPKRDHALLEEQSKQQSNEHLRRQFASQANVVGPWIQTKMEEIGRISIEMNGTLEDQLSHLKQYERSIVDYKPNLDLLEQQHQLIQEALIFDNKHTNYTMEHIRVGWEQLLTTIARTINEVENQILTRDAKGISQEQMQEFRASFNHFDKDHGGALGPEEFKACLISLGYDVENDRQGEAEFNRIMSLVDPNHSGLVTFQAFIDFMSRETTDTDTADQVIASFKVLAGDKNFITAEELRRELPPDQAEYCIARMAPYQGPDAVPGALDYKSFSTALYGESDL', 'accession': 'IPI00013808.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 255, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'A', 'pre': 'K', 'protein description': 'IPI:IPI00013808.1|SWISS-PROT:O43707|TREMBL:Q96BG6|ENSEMBL:ENSP00000252699|REFSEQ:NP_004915|H-INV:HIT000032172|VEGA:OTTHUMP00000076071;OTTHUMP00000174445 Tax_Id=9606 Gene_Symbol=ACTN4 Alpha-actinin-4', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 240, 'version': '3.15'}], 'PeptideSequence': 'MLDAEDIVNTARPDEK', 'ProteinScape:IntensityCoverage': 0.16164593872706742, 'ProteinScape:SequestMetaScore': 5.635013787097159, 'calculatedMassToCharge': 1832.862115, 'chargeState': 1, 'experimentalMassToCharge': 1832.863, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=10'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'SYTSGPGSR', 'Seq': 'SIRVTQKSYKVSTSGPRAFSSRSYTSGPGSRISSSSFSRVGSSNFRGGLGGGYGGASGMGGITAVTVNQSLLSPLVLEVDPNIQAVRTQEKEQIKTLNNKFASFIDKVRFLEQQNKMLETKWSLLQQQKTARSNMDNMFESYINNLRRQLETLGQEKLKLEAELGNMQGLVEDFKNKYEDEINKRTEMENEFVLIKKDVDEAYMNKVELESRLEGLTDEINFLRQLYEEEIRELQSQISDTSVVLSMDNSRSLDMDSIIAEVKAQYEDIANRSRAEAESMYQIKYEELQSLAGKHGDDLRRTKTEISEMNRNISRLQAEIEGLKGQRASLEAAIADAEQRGELAIKDANAKLSELEAALQRAKQDMARQLREYQELMNVKLALDIEIATYRKLLEGEESRLESGMQNMSIHTKTTGGYAGGLSSAYGGLTSPGLSYSLGSSFGSGAGSSSFSRTSSSRAVVVKKIETRDGKLVSESSDVLPK', 'accession': 'IPI00554648.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 31, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'I', 'pre': 'R', 'protein description': '>IPI:IPI00554648.1|SWISS-PROT:P05787 Tax_Id=9606 Keratin, type II cytoskeletal 8', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 23, 'version': '3.15'}], 'PeptideSequence': 'SYTSGPGSR', 'ProteinScape:IntensityCoverage': 0.6146634530945828, 'ProteinScape:SequestMetaScore': 10.17510605321669, 'calculatedMassToCharge': 911.4144, 'chargeState': 1, 'experimentalMassToCharge': 911.413, 'passThreshold': True, 'rank': 1}, {'Modification': [{'location': 11, "name": "Oxidation"}], 'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'Modification': [{'location': 11, "name": "Oxidation"}], 'PeptideSequence': 'TLTLVDTGIGMTK', 'Seq': 'PEEVHHGEEEVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNASDALDKIRYESLTDPSKLDSGKELKIDIIPNPQERTLTLVDTGIGMTKADLINNLGTIAKSGTKAFMEALQAGADISMIGQFGVGFYSAYLVAEKVVVITKHNDDEQYAWESSAGGSFTVRADHGEPIGRGTKVILHLKEDQTEYLEERRVKEVVKKHSQFIGYPITLYLEKEREKEISDDEAEEEKGEKEEEDKDDEEKPKIEDVGSDEEDDSGKDKKKKTKKIKEKYIDQEELNKTKPIWTRNPDDITQEEYGEFYKSLTNDWEDHLAVKHFSVEGQLEFRALLFIPRRAPFDLFENKKKKNNIKLYVRRVFIMDSCDELIPEYLNFIRGVVDSEDLPLNISREMLQQSKILKVIRKNIVKKCLELFSELAEDKENYKKFYEAFSKNLKLGIHEDSTNRRRLSELLRYHTSQSGDEMTSLSEYVSRMKETQKSIYYITGESKEQVANSAFVERVRKRGFEVVYMTEPIDEYCVQQLKEFDGKSLVSVTKEGLELPEDEEEKKKMEESKAKFENLCKLMKEILDKKVEKVTISNRLVSSPCCIVTSTYGWTANMERIMKAQALRDNSTMGYMMAKKHLEINPDHPIVETLRQKAEADKNDKAVKDLVVLLFETALLSSGFSLEDPQTHSNRIYRMIKLGLGIDEDEVAAEEPNAAVPDEIPPLEGDEDASRMEEVD', 'accession': 'IPI00414676.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 94, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'A', 'pre': 'R', 'protein description': '>IPI:IPI00414676.5|SWISS-PROT:P08238|TREMBL:Q5T9W7;Q6PK50;Q9H6X9|ENSEMBL:ENSP00000325875|REFSEQ:NP_031381|H-INV:HIT000008644;HIT000032091;HIT000034201;HIT000035963;HIT000036733;HIT000049765;HIT000057726|VEGA:OTTHUMP00000016517;OTTHUMP00000016518;OTTHUMP00000016519 Tax_Id=9606 Heat shock protein HSP 90-beta', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 82, 'version': '3.15'}], 'PeptideSequence': 'TLTLVDTGIGMTK', 'ProteinScape:IntensityCoverage': 0.2517734933944088, 'ProteinScape:SequestMetaScore': 6.005532583410669, 'calculatedMassToCharge': 1365.722015, 'chargeState': 1, 'experimentalMassToCharge': 1365.721, 'passThreshold': True, 'rank': 2}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=11'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'HNDDEQYAWESSAGGSFTVR', 'Seq': 'PEEVHHGEEEVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNASDALDKIRYESLTDPSKLDSGKELKIDIIPNPQERTLTLVDTGIGMTKADLINNLGTIAKSGTKAFMEALQAGADISMIGQFGVGFYSAYLVAEKVVVITKHNDDEQYAWESSAGGSFTVRADHGEPIGRGTKVILHLKEDQTEYLEERRVKEVVKKHSQFIGYPITLYLEKEREKEISDDEAEEEKGEKEEEDKDDEEKPKIEDVGSDEEDDSGKDKKKKTKKIKEKYIDQEELNKTKPIWTRNPDDITQEEYGEFYKSLTNDWEDHLAVKHFSVEGQLEFRALLFIPRRAPFDLFENKKKKNNIKLYVRRVFIMDSCDELIPEYLNFIRGVVDSEDLPLNISREMLQQSKILKVIRKNIVKKCLELFSELAEDKENYKKFYEAFSKNLKLGIHEDSTNRRRLSELLRYHTSQSGDEMTSLSEYVSRMKETQKSIYYITGESKEQVANSAFVERVRKRGFEVVYMTEPIDEYCVQQLKEFDGKSLVSVTKEGLELPEDEEEKKKMEESKAKFENLCKLMKEILDKKVEKVTISNRLVSSPCCIVTSTYGWTANMERIMKAQALRDNSTMGYMMAKKHLEINPDHPIVETLRQKAEADKNDKAVKDLVVLLFETALLSSGFSLEDPQTHSNRIYRMIKLGLGIDEDEVAAEEPNAAVPDEIPPLEGDEDASRMEEVD', 'accession': 'IPI00414676.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 167, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'A', 'pre': 'K', 'protein description': '>IPI:IPI00414676.5|SWISS-PROT:P08238|TREMBL:Q5T9W7;Q6PK50;Q9H6X9|ENSEMBL:ENSP00000325875|REFSEQ:NP_031381|H-INV:HIT000008644;HIT000032091;HIT000034201;HIT000035963;HIT000036733;HIT000049765;HIT000057726|VEGA:OTTHUMP00000016517;OTTHUMP00000016518;OTTHUMP00000016519 Tax_Id=9606 Heat shock protein HSP 90-beta', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 148, 'version': '3.15'}], 'PeptideSequence': 'HNDDEQYAWESSAGGSFTVR', 'ProteinScape:IntensityCoverage': 0.4884754815768041, 'ProteinScape:SequestMetaScore': 12.042955809241318, 'calculatedMassToCharge': 2255.9515, 'chargeState': 1, 'experimentalMassToCharge': 2255.95, 'passThreshold': True, 'rank': 1}, {'Modification': [{'location': 13, "name": "Oxidation"}], 'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'Modification': [{'location': 13, "name": "Oxidation"}], 'PeptideSequence': 'GGLGGGYGGASGMGGITAVTVNQSLLSPLVLEVDPNIQAVR', 'Seq': 'SIRVTQKSYKVSTSGPRAFSSRSYTSGPGSRISSSSFSRVGSSNFRGGLGGGYGGASGMGGITAVTVNQSLLSPLVLEVDPNIQAVRTQEKEQIKTLNNKFASFIDKVRFLEQQNKMLETKWSLLQQQKTARSNMDNMFESYINNLRRQLETLGQEKLKLEAELGNMQGLVEDFKNKYEDEINKRTEMENEFVLIKKDVDEAYMNKVELESRLEGLTDEINFLRQLYEEEIRELQSQISDTSVVLSMDNSRSLDMDSIIAEVKAQYEDIANRSRAEAESMYQIKYEELQSLAGKHGDDLRRTKTEISEMNRNISRLQAEIEGLKGQRASLEAAIADAEQRGELAIKDANAKLSELEAALQRAKQDMARQLREYQELMNVKLALDIEIATYRKLLEGEESRLESGMQNMSIHTKTTGGYAGGLSSAYGGLTSPGLSYSLGSSFGSGAGSSSFSRTSSSRAVVVKKIETRDGKLVSESSDVLPK', 'accession': 'IPI00554648.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 87, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'T', 'pre': 'R', 'protein description': '>IPI:IPI00554648.1|SWISS-PROT:P05787 Tax_Id=9606 Keratin, type II cytoskeletal 8', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 47, 'version': '3.15'}], 'PeptideSequence': 'GGLGGGYGGASGMGGITAVTVNQSLLSPLVLEVDPNIQAVR', 'ProteinScape:IntensityCoverage': 0.554279316913958, 'ProteinScape:SequestMetaScore': 3.1184106313104283, 'calculatedMassToCharge': 3941.036315, 'chargeState': 1, 'experimentalMassToCharge': 3941.081, 'passThreshold': True, 'rank': 2}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=12'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'SYTSGPGSR', 'Seq': 'SIRVTQKSYKVSTSGPRAFSSRSYTSGPGSRISSSSFSRVGSSNFRGGLGGGYGGASGMGGITAVTVNQSLLSPLVLEVDPNIQAVRTQEKEQIKTLNNKFASFIDKVRFLEQQNKMLETKWSLLQQQKTARSNMDNMFESYINNLRRQLETLGQEKLKLEAELGNMQGLVEDFKNKYEDEINKRTEMENEFVLIKKDVDEAYMNKVELESRLEGLTDEINFLRQLYEEEIRELQSQISDTSVVLSMDNSRSLDMDSIIAEVKAQYEDIANRSRAEAESMYQIKYEELQSLAGKHGDDLRRTKTEISEMNRNISRLQAEIEGLKGQRASLEAAIADAEQRGELAIKDANAKLSELEAALQRAKQDMARQLREYQELMNVKLALDIEIATYRKLLEGEESRLESGMQNMSIHTKTTGGYAGGLSSAYGGLTSPGLSYSLGSSFGSGAGSSSFSRTSSSRAVVVKKIETRDGKLVSESSDVLPK', 'accession': 'IPI00554648.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 31, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'I', 'pre': 'R', 'protein description': '>IPI:IPI00554648.1|SWISS-PROT:P05787 Tax_Id=9606 Keratin, type II cytoskeletal 8', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 23, 'version': '3.15'}], 'PeptideSequence': 'SYTSGPGSR', 'ProteinScape:IntensityCoverage': 0.39717937427768873, 'ProteinScape:SequestMetaScore': 4.159878401845841, 'calculatedMassToCharge': 911.4144, 'chargeState': 1, 'experimentalMassToCharge': 911.415, 'passThreshold': True, 'rank': 1}, {'Modification': [{'location': 10, "name": "Oxidation"}], 'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'Modification': [{'location': 10, "name": "Oxidation"}], 'PeptideSequence': 'YHTSQSGDEMTSLSEYVSR', 'Seq': 'PEEVHHGEEEVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNASDALDKIRYESLTDPSKLDSGKELKIDIIPNPQERTLTLVDTGIGMTKADLINNLGTIAKSGTKAFMEALQAGADISMIGQFGVGFYSAYLVAEKVVVITKHNDDEQYAWESSAGGSFTVRADHGEPIGRGTKVILHLKEDQTEYLEERRVKEVVKKHSQFIGYPITLYLEKEREKEISDDEAEEEKGEKEEEDKDDEEKPKIEDVGSDEEDDSGKDKKKKTKKIKEKYIDQEELNKTKPIWTRNPDDITQEEYGEFYKSLTNDWEDHLAVKHFSVEGQLEFRALLFIPRRAPFDLFENKKKKNNIKLYVRRVFIMDSCDELIPEYLNFIRGVVDSEDLPLNISREMLQQSKILKVIRKNIVKKCLELFSELAEDKENYKKFYEAFSKNLKLGIHEDSTNRRRLSELLRYHTSQSGDEMTSLSEYVSRMKETQKSIYYITGESKEQVANSAFVERVRKRGFEVVYMTEPIDEYCVQQLKEFDGKSLVSVTKEGLELPEDEEEKKKMEESKAKFENLCKLMKEILDKKVEKVTISNRLVSSPCCIVTSTYGWTANMERIMKAQALRDNSTMGYMMAKKHLEINPDHPIVETLRQKAEADKNDKAVKDLVVLLFETALLSSGFSLEDPQTHSNRIYRMIKLGLGIDEDEVAAEEPNAAVPDEIPPLEGDEDASRMEEVD', 'accession': 'IPI00414676.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 474, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'M', 'pre': 'R', 'protein description': '>IPI:IPI00414676.5|SWISS-PROT:P08238|TREMBL:Q5T9W7;Q6PK50;Q9H6X9|ENSEMBL:ENSP00000325875|REFSEQ:NP_031381|H-INV:HIT000008644;HIT000032091;HIT000034201;HIT000035963;HIT000036733;HIT000049765;HIT000057726|VEGA:OTTHUMP00000016517;OTTHUMP00000016518;OTTHUMP00000016519 Tax_Id=9606 Heat shock protein HSP 90-beta', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 456, 'version': '3.15'}], 'PeptideSequence': 'YHTSQSGDEMTSLSEYVSR', 'ProteinScape:IntensityCoverage': 0.136423966822031, 'ProteinScape:SequestMetaScore': 5.725397508852668, 'calculatedMassToCharge': 2192.932715, 'chargeState': 1, 'experimentalMassToCharge': 2192.9, 'passThreshold': True, 'rank': 2}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=13'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'ENDILKMPIAAVR', 'Seq': 'YQQKTRHDAFPRIQLALRAGAKSKEDLFSEPKLYEDKEQRISSELPEEALDDSSPSDTSLIAVEREISSNLRHSSTNYEEHLRKLKKDKYSEAVDPSGKPLGLLVILGNQYMREDEIATEERMPENKSTDISTYIKEFDGKEIAFPTENSYFPARGDKSKAVTAKWKRFTPFPLQTDDNSAAKLTEVFKIQLTTVGKLGKCSEGGENKEDKCTDNLCLNPPDELTEHREAPVTDTKPIMPEQIIITTKEISDSQLENRDETAPIALDLIALVSVNSLTGETNRKMEEYKSKKRRTTRLEETGFHILHNREVNMDNGECVFPIHYDAAQEMQPHTDSSKVMKASEFQKDIILMELIEKFDHNVKLLSKDCVEVQNSMANLELDKESEEQAVGDGSCLTRGTLVIKAQNGKNTGENDSTKETREYLRKKMDEAEDYGLVLKDDTYIEVWDVAVQITYLLKRSIEWPESECIDIEPFVHKAKSYINVEKETESHKLVQHKYKCRPLFRGENKDVTKRLLYGKILLEDKLSSASGLKSTELWDLDEDYEVTWRQEILLNKNKKVAPDEKETYIVLQNNLYPMQPVVMPSIPARMMSLENDILKMPIAAVRVKVVDITINKFLMGEINADDMEFPTSGFLEQHSDFGGLANGESQNDKPTEGQAREIEPIAKEEYVIDLEWGLRITQPTSEYGYDGELVVYVTKGMGKQTEPESLSYVCVVEGFAKAQLPERQITGKEDSGFIRRQQRLLEDDFSMKKKIFEGLDPILSEKKLDMQFRKFSMKWQYLKDILPADFDKEKEIFREFHKIEIKVSFGHKGMESKAALYIKFYETFERELKQFLSAEPRYRDDACEGLQLEV', 'accession': 'SHD00382470.3', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 191, 'isDecoy': True, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'V', 'pre': 'L', 'protein description': '>SHD:SHD00382470.3|SWISS-PROT:P07900-2|TREMBL:Q86SX1|ENSEMBL:ENSP00000335153|REFSEQ:NP_001017963|VEGA:OTTHUMP00000041671 Tax_Id=9606 Gene_Symbol=HSP90AA1 heat shock protein 90kDa alpha (cytosolic), class A member 1 isoform 1', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 183, 'version': '3.15'}], 'PeptideSequence': 'ENDILKMPIAAVR', 'ProteinScape:IntensityCoverage': 0.2854129700126088, 'ProteinScape:SequestMetaScore': 6.181682868401155, 'calculatedMassToCharge': 1469.8071, 'chargeState': 1, 'experimentalMassToCharge': 1469.806, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=15'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'LLEDDFSMKK', 'Seq': 'YQQKTRHDAFPRIQLALRAGAKSKEDLFSEPKLYEDKEQRISSELPEEALDDSSPSDTSLIAVEREISSNLRHSSTNYEEHLRKLKKDKYSEAVDPSGKPLGLLVILGNQYMREDEIATEERMPENKSTDISTYIKEFDGKEIAFPTENSYFPARGDKSKAVTAKWKRFTPFPLQTDDNSAAKLTEVFKIQLTTVGKLGKCSEGGENKEDKCTDNLCLNPPDELTEHREAPVTDTKPIMPEQIIITTKEISDSQLENRDETAPIALDLIALVSVNSLTGETNRKMEEYKSKKRRTTRLEETGFHILHNREVNMDNGECVFPIHYDAAQEMQPHTDSSKVMKASEFQKDIILMELIEKFDHNVKLLSKDCVEVQNSMANLELDKESEEQAVGDGSCLTRGTLVIKAQNGKNTGENDSTKETREYLRKKMDEAEDYGLVLKDDTYIEVWDVAVQITYLLKRSIEWPESECIDIEPFVHKAKSYINVEKETESHKLVQHKYKCRPLFRGENKDVTKRLLYGKILLEDKLSSASGLKSTELWDLDEDYEVTWRQEILLNKNKKVAPDEKETYIVLQNNLYPMQPVVMPSIPARMMSLENDILKMPIAAVRVKVVDITINKFLMGEINADDMEFPTSGFLEQHSDFGGLANGESQNDKPTEGQAREIEPIAKEEYVIDLEWGLRITQPTSEYGYDGELVVYVTKGMGKQTEPESLSYVCVVEGFAKAQLPERQITGKEDSGFIRRQQRLLEDDFSMKKKIFEGLDPILSEKKLDMQFRKFSMKWQYLKDILPADFDKEKEIFREFHKIEIKVSFGHKGMESKAALYIKFYETFERELKQFLSAEPRYRDDACEGLQLEV', 'accession': 'SHD00382470.3', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 234, 'isDecoy': True, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'K', 'pre': 'R', 'protein description': '>SHD:SHD00382470.3|SWISS-PROT:P07900-2|TREMBL:Q86SX1|ENSEMBL:ENSP00000335153|REFSEQ:NP_001017963|VEGA:OTTHUMP00000041671 Tax_Id=9606 Gene_Symbol=HSP90AA1 heat shock protein 90kDa alpha (cytosolic), class A member 1 isoform 1', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 223, 'version': '3.15'}], 'PeptideSequence': 'LLEDDFSMKK', 'ProteinScape:IntensityCoverage': 0.29049959198538566, 'ProteinScape:SequestMetaScore': 6.669916225794168, 'calculatedMassToCharge': 1225.6059, 'chargeState': 1, 'experimentalMassToCharge': 1225.604, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=20'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'DGHNLISLLEVLSGDSLPR', 'Seq': 'MKIVPDERDRVQKKTFTKWVNKHLIKAQRHISDLYEDLRDGHNLISLLEVLSGDSLPREKGRMRFHKLQNVQIALDYLRHRQVKLVNIRNDDIADGNPKLTLGLIWTIILHFQISDIQVSGQSEDMTAKEKLLLWSQRMVEGYQGLRCDNFTSSWRDGRLFNAIIHRHKPLLIDMNKVYRQTNLENLDQAFSVAERDLGVTRLLDPEDVDVPQPDEKSIITYVSSLYDAMPRVPDVQDGVRANELQLRWQEYRELVLLLLQWMRHHTAAFEERRFPSSFEEIEILWSQFLKFKEMELPAKEADKNRSKGIYQSLEGAVQAGQLKVPPGYHPLDVEKEWGKLHVAILEREKQLRSEFERLECLQRIVTKLQMEAGLCEEQLNQADALLQSDVRLLAAGKVPQRAGEVERDLDKADSMIRLLFNDVQTLKDGRHPQGEQMYRRVYRLHERLVAIRTEYNLRLKAGVAAPATQVAQVTLQSVQRRPELEDSTLRYLQDLLAWVEENQHRVDGAEWGVDLPSVEAQLGSHRGLHQSIEEFRAKIERARSDEGQLSPATRGAYRDCLGRLDLQYAKLLNSSKARLRSLESLHSFVAAATKELMWLNEKEEEEVGFDWSDRNTNMTAKKESYSALMRELELKEKKIKELQNAGDRLLREDHPARPTVESFQAALQTQWSWMLQLCCCIEAHLKENAAYFQFFSDVREAEGQLQKLQEALRRKYSCDRSATVTRLEDLLQDAQDEKEQLNEYKGHLSGLAKRAKAVVQLKPRHPAHPMRGRLPLLAVCDYKQVEVTVHKGDECQLVGPAQPSHWKVLSSSGSEAAVPSVCFLVPPPNQEAQEAVTRLEAQHQALVTLWHQLHVDMKSLLAWQSLRRDVQLIRSWSLATFRTLKPEEQRQALHSLELHYQAFLRDSQDAGGFGPEDRLMAEREYGSCSHHYQQLLQSLEQGAQEESRCQRCISELKDIRLQLEACETRTVHRLRLPLDKEPARECAQRIAEQQKAQAEVEGLGKGVARLSAEAEKVLALPEPSPAAPTLRSELELTLGKLEQVRSLSAIYLEKLKTISLVIRGTQGAEEVLRAHEEQLKEAQAVPATLPELEATKASLKKLRAQAEAQQPTFDALRDELRGAQEVGERLQQRHGERDVEVERWRERVAQLLERWQAVLAQTDVRQRELEQLGRQLRYYRESADPLGAWLQDARRRQEQIQAMPLADSQAVREQLRQEQALLEEIERHGEKVEECQRFAKQYINAIKDYELQLVTYKAQLEPVASPAKKPKVQSGSESVIQEYVDLRTHYSELTTLTSQYIKFISETLRRMEEEERLAEQQRAEERERLAEVEAALEKQRQLAEAHAQAKAQAEREAKELQQRMQEEVVRREEAAVDAQQQKRSIQEELQQLRQSSEAEIQAKARQAEAAERSRLRIEEEIRVVRLQLEATERQRGGAEGELQALRARAEEAEAQKRQAQEEAERLRRQVQDESQRKRQAEVELASRVKAEAEAAREKQRALQALEELRLQAEEAERRLRQAEVERARQVQVALETAQRSAEAELQSKRASFAEKTAQLERSLQEEHVAVAQLREEAERRAQQQAEAERAREEAERELERWQLKANEALRLRLQAEEVAQQKSLAQAEAEKQKEEAEREARRRGKAEEQAVRQRELAEQELEKQRQLAEGTAQQRLAAEQELIRLRAETEQGEQQRQLLEEELARLQREAAAATQKRQELEAELAKVRAEMEVLLASKARAEEESRSTSEKSKQRLEAEAGRFRELAEEAARLRALAEEAKRQRQLAEEDAARQRAEAERVLAEKLAAIGEATRLKTEAEIALKEKEAENERLRRLAEDEAFQRRRLEEQAAQHKADIEERLAQLRKASDSELERQKGLVEDTLRQRRQVEEEILALKASFEKAAAGKAELELELGRIRSNAEDTLRSKEQAELEAARQRQLAAEEERRRREAEERVQKSLAAEEEAARQRKAALEEVERLKAKVEEARRLRERAEQESARQLQLAQEAAQKRLQAEEKAHAFAVQQKEQELQQTLQQEQSVLDQLRGEAEAARRAAEEAEEARVQAEREAAQSRRQVEEAERLKQSAEEQAQARAQAQAAAEKLRKEAEQEAARRAQAEQAALRQKQAADAEMEKHKKFAEQTLRQKAQVEQELTTLRLQLEETDHQKNLLDEELQRLKAEATEAARQRSQVEEELFSVRVQMEELSKLKARIEAENRALILRDKDNTQRFLQEEAEKMKQVAEEAARLSVAAQEAARLRQLAEEDLAQQRALAEKMLKEKMQAVQEATRLKAEAELLQQQKELAQEQARRLQEDKEQMAQQLAEETQGFQRTLEAERQRQLEMSAEAERLKLRVAEMSRAQARAEEDAQRFRKQAEEIGEKLHRTELATQEKVTLVQTLEIQRQQSDHDAERLREAIAELEREKEKLQQEAKLLQLKSEEMQTVQQEQLLQETQALQQSFLSEKDSLLQRERFIEQEKAKLEQLFQDEVAKAQQLREEQQRQQQQMEQERQRLVASMEEARRRQHEAEEGVRRKQEELQQLEQQRRQQEELLAEENQRLREQLQLLEEQHRAALAHSEEVTASQVAATKTLPNGRDALDGPAAEAEPEHSFDGLRRKVSAQRLQEAGILSAEELQRLAQGHTTVDELARREDVRHYLQGRSSIAGLLLKATNEKLSVYAALQRQLLSPGTALILLEAQAASGFLLDPVRNRRLTVNEAVKEGVVGPELHHKLLSAERAVTGYKDPYTGQQISLFQAMQKGLIVREHGIRLLEAQIATGGVIDPVHSHRVPVDVAYRRGYFDEEMNRVLADPSDDTKGFFDPNTHENLTYLQLLERCVEDPETGLCLLPLTDKAAKGGELVYTDSEARDVFEKATVSAPFGKFQGKTVTIWEIINSEYFTAEQRRDLLRQFRTGRITVEKIIKIIITVVEEQEQKGRLCFEGLRSLVPAAELLESRVIDRELYQQLQRGERSVRDVAEVDTVRRALRGANVIAGVWLEEAGQKLSIYNALKKDLLPSDMAVALLEAQAGTGHIIDPATSARLTVDEAVRAGLVGPEFHEKLLSAEKAVTGYRDPYTGQSVSLFQALKKGLIPREQGLRLLDAQLSTGGIVDPSKSHRVPLDVACARGCLDEETSRALSAPRADAKAYSDPSTGEPATYGELQQRCRPDQLTGLSLLPLSEKAARARQEELYSELQARETFEKTPVEVPVGGFKGRTVTVWELISSEYFTAEQRQELLRQFRTGKVTVEKVIKILITIVEEVETLRQERLSFSGLRAPVPASELLASGVLSRAQFEQLKDGKTTVKDLSELGSVRTLLQGSGCLAGIYLEDTKEKVSIYEAMRRGLLRATTAALLLEAQAATGFLVDPVRNQRLYVHEAVKAGVVGPELHEQLLSAEKAVTGYRDPYSGSTISLFQAMQKGLVLRQHGIRLLEAQIATGGIIDPVHSHRVPVDVAYQRGYFSEEMNRVLADPSDDTKGFFDPNTHENLTYRQLLERCVEDPETGLRLLPLKGAEKAEVVETTQVYTEEETRRAFEETQIDIPGGGSHGGSTMSLWEVMQSDLIPEEQRAQLMADFQAGRVTKERMIIIIIEIIEKTEIIRQQGLASYDYVRRRLTAEDLFEARIISLETYNLLREGTRSLREALEAESAWCYLYGTGSVAGVYLPGSRQTLSIYQALKKGLLSAEVARLLLEAQAATGFLLDPVKGERLTVDEAVRKGLVGPELHDRLLSAERAVTGYRDPYTEQTISLFQAMKKELIPTEEALRLLDAQLATGGIVDPRLGFHLPLEVAYQRGYLNKDTHDQLSEPSEVRSYVDPSTDERLSYTQLLRRCRRDDGTGQLLLPLSDARKLTFRGLRKQITMEELVRSQVMDEATALQLREGLTSIEEVTKNLQKFLEGTSCIAGVFVDATKERLSVYQAMKKGIIRPGTAFELLEAQAATGYVIDPIKGLKLTVEEAVRMGIVGPEFKDKLLSAERAVTGYKDPYSGKLISLFQAMKKGLILKDHGIRLLEAQIATGGIIDPEESHRLPVEVAYKRGLFDEEMNEILTDPSDDTKGFFDPNTEENLTYLQLMERCITDPQTGLCLLPLKEKKRERKTSSKSSVRKRRVVIVDPETGKEMSVYEAYRKGLIDHQTYLELSEQECEWEEITISSSDGVVKSMIIDRRSGRQYDIDDAIAKNLIDRSALDQYRAGTLSITEFADMLSGNAGGFRSRSSSVGSSSSYPISPAVSRTQLASWSDPTEETGPVAGILDTETLEKVSITEAMHRNLVDNITGQRLLEAQACTGGIIDPSTGERFPVTDAVNKGLVDKIMVDRINLAQKAFCGFEDPRTKTKMSAAQALKKGWLYYEAGQRFLEVQYLTGGLIEPDTPGRVPLDEALQRGTVDARTAQKLRDVGAYSKYLTCPKTKLKISYKDALDRSMVEEGTGLRLLEAAAQSTKGYYSPYSVSGSGSTAGSRTGSRTGSRAGSRRGSFDATGSGFSMTFSSSSYSSSGYGRRYASGSSASLGGPESAVA', 'accession': 'IPI00398776.3', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 59, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'E', 'pre': 'R', 'protein description': '>IPI:IPI00398776.3|TREMBL:Q6S379;Q96IE3|REFSEQ:NP_958783 Tax_Id=9606 plectin 1 isoform 7', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 40, 'version': '3.15'}], 'PeptideSequence': 'DGHNLISLLEVLSGDSLPR', 'calculatedMassToCharge': 2035.0745, 'chargeState': 1, 'experimentalMassToCharge': 2035.075, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=2'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'QTNLENLDQAFSVAER', 'Seq': 'MKIVPDERDRVQKKTFTKWVNKHLIKAQRHISDLYEDLRDGHNLISLLEVLSGDSLPREKGRMRFHKLQNVQIALDYLRHRQVKLVNIRNDDIADGNPKLTLGLIWTIILHFQISDIQVSGQSEDMTAKEKLLLWSQRMVEGYQGLRCDNFTSSWRDGRLFNAIIHRHKPLLIDMNKVYRQTNLENLDQAFSVAERDLGVTRLLDPEDVDVPQPDEKSIITYVSSLYDAMPRVPDVQDGVRANELQLRWQEYRELVLLLLQWMRHHTAAFEERRFPSSFEEIEILWSQFLKFKEMELPAKEADKNRSKGIYQSLEGAVQAGQLKVPPGYHPLDVEKEWGKLHVAILEREKQLRSEFERLECLQRIVTKLQMEAGLCEEQLNQADALLQSDVRLLAAGKVPQRAGEVERDLDKADSMIRLLFNDVQTLKDGRHPQGEQMYRRVYRLHERLVAIRTEYNLRLKAGVAAPATQVAQVTLQSVQRRPELEDSTLRYLQDLLAWVEENQHRVDGAEWGVDLPSVEAQLGSHRGLHQSIEEFRAKIERARSDEGQLSPATRGAYRDCLGRLDLQYAKLLNSSKARLRSLESLHSFVAAATKELMWLNEKEEEEVGFDWSDRNTNMTAKKESYSALMRELELKEKKIKELQNAGDRLLREDHPARPTVESFQAALQTQWSWMLQLCCCIEAHLKENAAYFQFFSDVREAEGQLQKLQEALRRKYSCDRSATVTRLEDLLQDAQDEKEQLNEYKGHLSGLAKRAKAVVQLKPRHPAHPMRGRLPLLAVCDYKQVEVTVHKGDECQLVGPAQPSHWKVLSSSGSEAAVPSVCFLVPPPNQEAQEAVTRLEAQHQALVTLWHQLHVDMKSLLAWQSLRRDVQLIRSWSLATFRTLKPEEQRQALHSLELHYQAFLRDSQDAGGFGPEDRLMAEREYGSCSHHYQQLLQSLEQGAQEESRCQRCISELKDIRLQLEACETRTVHRLRLPLDKEPARECAQRIAEQQKAQAEVEGLGKGVARLSAEAEKVLALPEPSPAAPTLRSELELTLGKLEQVRSLSAIYLEKLKTISLVIRGTQGAEEVLRAHEEQLKEAQAVPATLPELEATKASLKKLRAQAEAQQPTFDALRDELRGAQEVGERLQQRHGERDVEVERWRERVAQLLERWQAVLAQTDVRQRELEQLGRQLRYYRESADPLGAWLQDARRRQEQIQAMPLADSQAVREQLRQEQALLEEIERHGEKVEECQRFAKQYINAIKDYELQLVTYKAQLEPVASPAKKPKVQSGSESVIQEYVDLRTHYSELTTLTSQYIKFISETLRRMEEEERLAEQQRAEERERLAEVEAALEKQRQLAEAHAQAKAQAEREAKELQQRMQEEVVRREEAAVDAQQQKRSIQEELQQLRQSSEAEIQAKARQAEAAERSRLRIEEEIRVVRLQLEATERQRGGAEGELQALRARAEEAEAQKRQAQEEAERLRRQVQDESQRKRQAEVELASRVKAEAEAAREKQRALQALEELRLQAEEAERRLRQAEVERARQVQVALETAQRSAEAELQSKRASFAEKTAQLERSLQEEHVAVAQLREEAERRAQQQAEAERAREEAERELERWQLKANEALRLRLQAEEVAQQKSLAQAEAEKQKEEAEREARRRGKAEEQAVRQRELAEQELEKQRQLAEGTAQQRLAAEQELIRLRAETEQGEQQRQLLEEELARLQREAAAATQKRQELEAELAKVRAEMEVLLASKARAEEESRSTSEKSKQRLEAEAGRFRELAEEAARLRALAEEAKRQRQLAEEDAARQRAEAERVLAEKLAAIGEATRLKTEAEIALKEKEAENERLRRLAEDEAFQRRRLEEQAAQHKADIEERLAQLRKASDSELERQKGLVEDTLRQRRQVEEEILALKASFEKAAAGKAELELELGRIRSNAEDTLRSKEQAELEAARQRQLAAEEERRRREAEERVQKSLAAEEEAARQRKAALEEVERLKAKVEEARRLRERAEQESARQLQLAQEAAQKRLQAEEKAHAFAVQQKEQELQQTLQQEQSVLDQLRGEAEAARRAAEEAEEARVQAEREAAQSRRQVEEAERLKQSAEEQAQARAQAQAAAEKLRKEAEQEAARRAQAEQAALRQKQAADAEMEKHKKFAEQTLRQKAQVEQELTTLRLQLEETDHQKNLLDEELQRLKAEATEAARQRSQVEEELFSVRVQMEELSKLKARIEAENRALILRDKDNTQRFLQEEAEKMKQVAEEAARLSVAAQEAARLRQLAEEDLAQQRALAEKMLKEKMQAVQEATRLKAEAELLQQQKELAQEQARRLQEDKEQMAQQLAEETQGFQRTLEAERQRQLEMSAEAERLKLRVAEMSRAQARAEEDAQRFRKQAEEIGEKLHRTELATQEKVTLVQTLEIQRQQSDHDAERLREAIAELEREKEKLQQEAKLLQLKSEEMQTVQQEQLLQETQALQQSFLSEKDSLLQRERFIEQEKAKLEQLFQDEVAKAQQLREEQQRQQQQMEQERQRLVASMEEARRRQHEAEEGVRRKQEELQQLEQQRRQQEELLAEENQRLREQLQLLEEQHRAALAHSEEVTASQVAATKTLPNGRDALDGPAAEAEPEHSFDGLRRKVSAQRLQEAGILSAEELQRLAQGHTTVDELARREDVRHYLQGRSSIAGLLLKATNEKLSVYAALQRQLLSPGTALILLEAQAASGFLLDPVRNRRLTVNEAVKEGVVGPELHHKLLSAERAVTGYKDPYTGQQISLFQAMQKGLIVREHGIRLLEAQIATGGVIDPVHSHRVPVDVAYRRGYFDEEMNRVLADPSDDTKGFFDPNTHENLTYLQLLERCVEDPETGLCLLPLTDKAAKGGELVYTDSEARDVFEKATVSAPFGKFQGKTVTIWEIINSEYFTAEQRRDLLRQFRTGRITVEKIIKIIITVVEEQEQKGRLCFEGLRSLVPAAELLESRVIDRELYQQLQRGERSVRDVAEVDTVRRALRGANVIAGVWLEEAGQKLSIYNALKKDLLPSDMAVALLEAQAGTGHIIDPATSARLTVDEAVRAGLVGPEFHEKLLSAEKAVTGYRDPYTGQSVSLFQALKKGLIPREQGLRLLDAQLSTGGIVDPSKSHRVPLDVACARGCLDEETSRALSAPRADAKAYSDPSTGEPATYGELQQRCRPDQLTGLSLLPLSEKAARARQEELYSELQARETFEKTPVEVPVGGFKGRTVTVWELISSEYFTAEQRQELLRQFRTGKVTVEKVIKILITIVEEVETLRQERLSFSGLRAPVPASELLASGVLSRAQFEQLKDGKTTVKDLSELGSVRTLLQGSGCLAGIYLEDTKEKVSIYEAMRRGLLRATTAALLLEAQAATGFLVDPVRNQRLYVHEAVKAGVVGPELHEQLLSAEKAVTGYRDPYSGSTISLFQAMQKGLVLRQHGIRLLEAQIATGGIIDPVHSHRVPVDVAYQRGYFSEEMNRVLADPSDDTKGFFDPNTHENLTYRQLLERCVEDPETGLRLLPLKGAEKAEVVETTQVYTEEETRRAFEETQIDIPGGGSHGGSTMSLWEVMQSDLIPEEQRAQLMADFQAGRVTKERMIIIIIEIIEKTEIIRQQGLASYDYVRRRLTAEDLFEARIISLETYNLLREGTRSLREALEAESAWCYLYGTGSVAGVYLPGSRQTLSIYQALKKGLLSAEVARLLLEAQAATGFLLDPVKGERLTVDEAVRKGLVGPELHDRLLSAERAVTGYRDPYTEQTISLFQAMKKELIPTEEALRLLDAQLATGGIVDPRLGFHLPLEVAYQRGYLNKDTHDQLSEPSEVRSYVDPSTDERLSYTQLLRRCRRDDGTGQLLLPLSDARKLTFRGLRKQITMEELVRSQVMDEATALQLREGLTSIEEVTKNLQKFLEGTSCIAGVFVDATKERLSVYQAMKKGIIRPGTAFELLEAQAATGYVIDPIKGLKLTVEEAVRMGIVGPEFKDKLLSAERAVTGYKDPYSGKLISLFQAMKKGLILKDHGIRLLEAQIATGGIIDPEESHRLPVEVAYKRGLFDEEMNEILTDPSDDTKGFFDPNTEENLTYLQLMERCITDPQTGLCLLPLKEKKRERKTSSKSSVRKRRVVIVDPETGKEMSVYEAYRKGLIDHQTYLELSEQECEWEEITISSSDGVVKSMIIDRRSGRQYDIDDAIAKNLIDRSALDQYRAGTLSITEFADMLSGNAGGFRSRSSSVGSSSSYPISPAVSRTQLASWSDPTEETGPVAGILDTETLEKVSITEAMHRNLVDNITGQRLLEAQACTGGIIDPSTGERFPVTDAVNKGLVDKIMVDRINLAQKAFCGFEDPRTKTKMSAAQALKKGWLYYEAGQRFLEVQYLTGGLIEPDTPGRVPLDEALQRGTVDARTAQKLRDVGAYSKYLTCPKTKLKISYKDALDRSMVEEGTGLRLLEAAAQSTKGYYSPYSVSGSGSTAGSRTGSRTGSRAGSRRGSFDATGSGFSMTFSSSSYSSSGYGRRYASGSSASLGGPESAVA', 'accession': 'IPI00398776.3', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 197, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'D', 'pre': 'R', 'protein description': '>IPI:IPI00398776.3|TREMBL:Q6S379;Q96IE3|REFSEQ:NP_958783 Tax_Id=9606 plectin 1 isoform 7', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 181, 'version': '3.15'}], 'PeptideSequence': 'QTNLENLDQAFSVAER', 'calculatedMassToCharge': 1834.8856, 'chargeState': 1, 'experimentalMassToCharge': 1834.884, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=3'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'HHTAAFEER', 'Seq': 'MKIVPDERDRVQKKTFTKWVNKHLIKAQRHISDLYEDLRDGHNLISLLEVLSGDSLPREKGRMRFHKLQNVQIALDYLRHRQVKLVNIRNDDIADGNPKLTLGLIWTIILHFQISDIQVSGQSEDMTAKEKLLLWSQRMVEGYQGLRCDNFTSSWRDGRLFNAIIHRHKPLLIDMNKVYRQTNLENLDQAFSVAERDLGVTRLLDPEDVDVPQPDEKSIITYVSSLYDAMPRVPDVQDGVRANELQLRWQEYRELVLLLLQWMRHHTAAFEERRFPSSFEEIEILWSQFLKFKEMELPAKEADKNRSKGIYQSLEGAVQAGQLKVPPGYHPLDVEKEWGKLHVAILEREKQLRSEFERLECLQRIVTKLQMEAGLCEEQLNQADALLQSDVRLLAAGKVPQRAGEVERDLDKADSMIRLLFNDVQTLKDGRHPQGEQMYRRVYRLHERLVAIRTEYNLRLKAGVAAPATQVAQVTLQSVQRRPELEDSTLRYLQDLLAWVEENQHRVDGAEWGVDLPSVEAQLGSHRGLHQSIEEFRAKIERARSDEGQLSPATRGAYRDCLGRLDLQYAKLLNSSKARLRSLESLHSFVAAATKELMWLNEKEEEEVGFDWSDRNTNMTAKKESYSALMRELELKEKKIKELQNAGDRLLREDHPARPTVESFQAALQTQWSWMLQLCCCIEAHLKENAAYFQFFSDVREAEGQLQKLQEALRRKYSCDRSATVTRLEDLLQDAQDEKEQLNEYKGHLSGLAKRAKAVVQLKPRHPAHPMRGRLPLLAVCDYKQVEVTVHKGDECQLVGPAQPSHWKVLSSSGSEAAVPSVCFLVPPPNQEAQEAVTRLEAQHQALVTLWHQLHVDMKSLLAWQSLRRDVQLIRSWSLATFRTLKPEEQRQALHSLELHYQAFLRDSQDAGGFGPEDRLMAEREYGSCSHHYQQLLQSLEQGAQEESRCQRCISELKDIRLQLEACETRTVHRLRLPLDKEPARECAQRIAEQQKAQAEVEGLGKGVARLSAEAEKVLALPEPSPAAPTLRSELELTLGKLEQVRSLSAIYLEKLKTISLVIRGTQGAEEVLRAHEEQLKEAQAVPATLPELEATKASLKKLRAQAEAQQPTFDALRDELRGAQEVGERLQQRHGERDVEVERWRERVAQLLERWQAVLAQTDVRQRELEQLGRQLRYYRESADPLGAWLQDARRRQEQIQAMPLADSQAVREQLRQEQALLEEIERHGEKVEECQRFAKQYINAIKDYELQLVTYKAQLEPVASPAKKPKVQSGSESVIQEYVDLRTHYSELTTLTSQYIKFISETLRRMEEEERLAEQQRAEERERLAEVEAALEKQRQLAEAHAQAKAQAEREAKELQQRMQEEVVRREEAAVDAQQQKRSIQEELQQLRQSSEAEIQAKARQAEAAERSRLRIEEEIRVVRLQLEATERQRGGAEGELQALRARAEEAEAQKRQAQEEAERLRRQVQDESQRKRQAEVELASRVKAEAEAAREKQRALQALEELRLQAEEAERRLRQAEVERARQVQVALETAQRSAEAELQSKRASFAEKTAQLERSLQEEHVAVAQLREEAERRAQQQAEAERAREEAERELERWQLKANEALRLRLQAEEVAQQKSLAQAEAEKQKEEAEREARRRGKAEEQAVRQRELAEQELEKQRQLAEGTAQQRLAAEQELIRLRAETEQGEQQRQLLEEELARLQREAAAATQKRQELEAELAKVRAEMEVLLASKARAEEESRSTSEKSKQRLEAEAGRFRELAEEAARLRALAEEAKRQRQLAEEDAARQRAEAERVLAEKLAAIGEATRLKTEAEIALKEKEAENERLRRLAEDEAFQRRRLEEQAAQHKADIEERLAQLRKASDSELERQKGLVEDTLRQRRQVEEEILALKASFEKAAAGKAELELELGRIRSNAEDTLRSKEQAELEAARQRQLAAEEERRRREAEERVQKSLAAEEEAARQRKAALEEVERLKAKVEEARRLRERAEQESARQLQLAQEAAQKRLQAEEKAHAFAVQQKEQELQQTLQQEQSVLDQLRGEAEAARRAAEEAEEARVQAEREAAQSRRQVEEAERLKQSAEEQAQARAQAQAAAEKLRKEAEQEAARRAQAEQAALRQKQAADAEMEKHKKFAEQTLRQKAQVEQELTTLRLQLEETDHQKNLLDEELQRLKAEATEAARQRSQVEEELFSVRVQMEELSKLKARIEAENRALILRDKDNTQRFLQEEAEKMKQVAEEAARLSVAAQEAARLRQLAEEDLAQQRALAEKMLKEKMQAVQEATRLKAEAELLQQQKELAQEQARRLQEDKEQMAQQLAEETQGFQRTLEAERQRQLEMSAEAERLKLRVAEMSRAQARAEEDAQRFRKQAEEIGEKLHRTELATQEKVTLVQTLEIQRQQSDHDAERLREAIAELEREKEKLQQEAKLLQLKSEEMQTVQQEQLLQETQALQQSFLSEKDSLLQRERFIEQEKAKLEQLFQDEVAKAQQLREEQQRQQQQMEQERQRLVASMEEARRRQHEAEEGVRRKQEELQQLEQQRRQQEELLAEENQRLREQLQLLEEQHRAALAHSEEVTASQVAATKTLPNGRDALDGPAAEAEPEHSFDGLRRKVSAQRLQEAGILSAEELQRLAQGHTTVDELARREDVRHYLQGRSSIAGLLLKATNEKLSVYAALQRQLLSPGTALILLEAQAASGFLLDPVRNRRLTVNEAVKEGVVGPELHHKLLSAERAVTGYKDPYTGQQISLFQAMQKGLIVREHGIRLLEAQIATGGVIDPVHSHRVPVDVAYRRGYFDEEMNRVLADPSDDTKGFFDPNTHENLTYLQLLERCVEDPETGLCLLPLTDKAAKGGELVYTDSEARDVFEKATVSAPFGKFQGKTVTIWEIINSEYFTAEQRRDLLRQFRTGRITVEKIIKIIITVVEEQEQKGRLCFEGLRSLVPAAELLESRVIDRELYQQLQRGERSVRDVAEVDTVRRALRGANVIAGVWLEEAGQKLSIYNALKKDLLPSDMAVALLEAQAGTGHIIDPATSARLTVDEAVRAGLVGPEFHEKLLSAEKAVTGYRDPYTGQSVSLFQALKKGLIPREQGLRLLDAQLSTGGIVDPSKSHRVPLDVACARGCLDEETSRALSAPRADAKAYSDPSTGEPATYGELQQRCRPDQLTGLSLLPLSEKAARARQEELYSELQARETFEKTPVEVPVGGFKGRTVTVWELISSEYFTAEQRQELLRQFRTGKVTVEKVIKILITIVEEVETLRQERLSFSGLRAPVPASELLASGVLSRAQFEQLKDGKTTVKDLSELGSVRTLLQGSGCLAGIYLEDTKEKVSIYEAMRRGLLRATTAALLLEAQAATGFLVDPVRNQRLYVHEAVKAGVVGPELHEQLLSAEKAVTGYRDPYSGSTISLFQAMQKGLVLRQHGIRLLEAQIATGGIIDPVHSHRVPVDVAYQRGYFSEEMNRVLADPSDDTKGFFDPNTHENLTYRQLLERCVEDPETGLRLLPLKGAEKAEVVETTQVYTEEETRRAFEETQIDIPGGGSHGGSTMSLWEVMQSDLIPEEQRAQLMADFQAGRVTKERMIIIIIEIIEKTEIIRQQGLASYDYVRRRLTAEDLFEARIISLETYNLLREGTRSLREALEAESAWCYLYGTGSVAGVYLPGSRQTLSIYQALKKGLLSAEVARLLLEAQAATGFLLDPVKGERLTVDEAVRKGLVGPELHDRLLSAERAVTGYRDPYTEQTISLFQAMKKELIPTEEALRLLDAQLATGGIVDPRLGFHLPLEVAYQRGYLNKDTHDQLSEPSEVRSYVDPSTDERLSYTQLLRRCRRDDGTGQLLLPLSDARKLTFRGLRKQITMEELVRSQVMDEATALQLREGLTSIEEVTKNLQKFLEGTSCIAGVFVDATKERLSVYQAMKKGIIRPGTAFELLEAQAATGYVIDPIKGLKLTVEEAVRMGIVGPEFKDKLLSAERAVTGYKDPYSGKLISLFQAMKKGLILKDHGIRLLEAQIATGGIIDPEESHRLPVEVAYKRGLFDEEMNEILTDPSDDTKGFFDPNTEENLTYLQLMERCITDPQTGLCLLPLKEKKRERKTSSKSSVRKRRVVIVDPETGKEMSVYEAYRKGLIDHQTYLELSEQECEWEEITISSSDGVVKSMIIDRRSGRQYDIDDAIAKNLIDRSALDQYRAGTLSITEFADMLSGNAGGFRSRSSSVGSSSSYPISPAVSRTQLASWSDPTEETGPVAGILDTETLEKVSITEAMHRNLVDNITGQRLLEAQACTGGIIDPSTGERFPVTDAVNKGLVDKIMVDRINLAQKAFCGFEDPRTKTKMSAAQALKKGWLYYEAGQRFLEVQYLTGGLIEPDTPGRVPLDEALQRGTVDARTAQKLRDVGAYSKYLTCPKTKLKISYKDALDRSMVEEGTGLRLLEAAAQSTKGYYSPYSVSGSGSTAGSRTGSRTGSRAGSRRGSFDATGSGFSMTFSSSSYSSSGYGRRYASGSSASLGGPESAVA', 'accession': 'IPI00398776.3', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 274, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'R', 'pre': 'R', 'protein description': '>IPI:IPI00398776.3|TREMBL:Q6S379;Q96IE3|REFSEQ:NP_958783 Tax_Id=9606 plectin 1 isoform 7', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 265, 'version': '3.15'}], 'PeptideSequence': 'HHTAAFEER', 'calculatedMassToCharge': 1097.5049, 'chargeState': 1, 'experimentalMassToCharge': 1097.503, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=4'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'STFSTNYR', 'Seq': 'MSFTTRSTFSTNYRSLGSVQAPSYGARPVSSAASVYAGAGGSGSRISVSRSTSFRGGMGSGGLATGIAGGLAGMGGIQNEKETMQSLNDRLASYLDRVRSLETENRRLESKIREHLEKKGPQVRDWSHYFKIIEDLRAQIFANTVDNARIVLQIDNARLAADDFRVKYETELAMRQSVENDIHGLRKVIDDTNITRLQLETEIEALKEELLFMKKNHEEEVKGLQAQIASSGLTVEVDAPKSQDLAKIMADIRAQYDELARKNREELDKYWSQQIEESTTVVTTQSAEVGAAETTLTELRRTVQSLEIDLDSMRNLKASLENSLREVEARYALQMEQLNGILLHLESELAQTRAEGQRQAQEYEALLNIKVKLEAEIATYRRLLEDGEDFNLGDALDSSNSMQTIQKTTTRRIVDGKVVSETNDTKVLRH', 'accession': 'IPI00554788.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 15, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'S', 'pre': 'R', 'protein description': '>IPI:IPI00554788.5|SWISS-PROT:P05783|ENSEMBL:ENSP00000373487;ENSP00000373489|REFSEQ:NP_000215;NP_954657|H-INV:HIT000280941|VEGA:OTTHUMP00000167632 Tax_Id=9606 Gene_Symbol=KRT18 Keratin, type I cytoskeletal 18', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 7, 'version': '3.15'}], 'PeptideSequence': 'STFSTNYR', 'calculatedMassToCharge': 975.4457, 'chargeState': 1, 'experimentalMassToCharge': 975.446, 'passThreshold': True, 'rank': 1}, {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'MYGPMGTR', 'Seq': 'PSKYTHAFFHGGTVLFTYVDGLIAESLQLVTQSPPAAGVAGMYDGTSPTTTDIADFEGGPVKEDDEQKLLSSCPRVKGSEPKSPDHGACPSKGLNCKEAHAACCAVFYDYGVEVELVSALSATGAVVHKTASCTLDVTYPEDTIDGDPTAGRKTKKCKGSKATPPDYVRGEKVEIAARGPNDKLYYFEVVSREKPLGAFKCGNQKVNIVYEGVVVVGELEAKILDEVQGTTDGVVGSKDTTQINVIVFNLPCKTGSFVCNRAYKCDKPMDAEGPVVAKLNPGRAALVLHHPEQDKGLNLTGGVHNDTPDGVQKGLDITPTSVAPPMKQQVHNVVMRKKDTLDSKAQIATPETAFVNADAEPVSPWSELIVSEKLGENALGTLQPFHQAVDTTVAKPYRAPLAPWGAVVVNEQFVPAVSVNIGAERESEYILQTAGKCIADDLEGITETNKAVTTREDGPSISYIPSGGDAVSYDTQNRVGTTSLSPWKVDRGVSLSVGALKLLRIVWNDFCADTPVCVAGEVSYSTVGRGVSAGVAAPNPPVKAMVSRPIGTVSRIGGSDTRAGMKFSMHTGGIHTTKSKDDGESGSSYGGDTTVIEYGVEGGSTTVQELAIAPVGAYNLSAQPFGVKGRNEETKKTYSFQVAFVFGVLIFPESPRSREIQNPPAPIVYFVKSGDAQDKGGKEKAGTALIPFMVDPASGLKMYGPMGTRNQNDLGQESQGGTPSKTSEDDVSYNETKQLRATGGAGEETSDADSPKRLSEHGASASPMVQMGIGVINLEYGAPSVTPTVEPCRVTEYVSSSDSEGYAGTRVSAGVDQIDVKPLITLQIYPRSLYQVQENKCLATQAAIVYSSADVVPVVVLNKSGHGKFKVDRTDRKQHEVHYGNKCDDTNGDKNIGGNPSTVLESKDCCGISCVPLKVKTGKGVGPVTMSPPMGPAVHGGDIVHSLREVVVVSAVGKSDVVRGAGVAESKVISKRVAVTMADFSYVEKGIGRKGFSETDFGHKGPGDGRVVGSSRKVSSFFVIFQPAGGWKVIEVILHDAIIGGDGQIGGTANPFVGYTLKEASGVDYADVPVIGVLAYYVAELYRDQHGTDQGPGPEGNEITADPGVGTGAEDGRASGYGAGIVTTKTTDSTGCPHRPVQAERLGRKLRSEPVMGVKPGVGGTHCNEDINEKTGIALDVVRAPPAFQPSISGVHTKSSATDVGPDVFDTGGVALVCQPVVGSVVSPYYCPPFGAGVIECYPMSTDPADSSRRNFKPPQRVVTTTEAGDDSGAYVYRYFGPGADMKLPKDSSDVVGNGDCESDPPKLSGNVPQHPEQYSTPAESMRGDPAVSDSLVMANVEFPKCEPDQNYGGHDGKVAPFFDRENEQVDVGLPNDSKGPCVRGVFPLFHYIGSVAPRAVIKCPTTLGPIGQTGGFPGFKSIAYLLIGTGCFQKLLKYVITELVDNCDGCRAKYGSDSTGSFVMEAKGLPIQGFKGKSGTDKCNHEKVDIELTTSGKRPRPPANQAKIEDVLPNAVTDVKECTAVYVDREIELDEIDPHTAQGHRYKRGPKEPIAWSIELSPSGRYMVELLVEISVAFVPPIPIFGTEVAPVGAPAISTSGSPGPAIGRVTLGQKAALPTEPEPSNSYARVDRQEEKRTPEGCGEAAKAHHPPFGINPLAQVIEDQDAYHFDHVGARIGLERGTSCDGESEAIEGPVLYAAPAKAAPSGEVHIARQDVKDPLAGGELHTVLDRCDAVSYESSKSQPLHTITFSKPEPGKSDAMITPPVIEPIVFIWIQRAHFPNGETSLGSSRAFAGKRVSKTYGGSDLRECWIGGGPKKNLKPCQPGEGTGKPITGEPLGMVIVRTLPRWELVKHFLVAPGTVIGTTGHADHVSRPGKPVLIPTERANWPVPRHYTELHDGREVKCTREGVKFHRLDTVNAAVHKNYRHPSSKSSCLWHGVVTKEQAGQISIREFVEDVSPGSIIVDIVNLVAVQGPSSTGSGLGVVHAYPLTYYTVKDIVVALKLSSASFIMVNKKAVVTVCFSDVQSGFKKNVASPPGVLSVSIGLDTYEGDPGIATNWWPSTDEKPCGNVLEDSGPDENHSNPNTAWEFTGLGDTKEGRPNTAFVESEMVLLPVAYVLEQKHPGQGQVGNTMLKDGICEGSTSHVGAVDPSEVQPLGFPGRLITKKFLETFIFCYPSEPIGKGKKVDGKLTSSYSSELAERELVLALAVGIGDAVYYYAFGAGPKIHVFTGDGGGVKNGILVLEGPFHGDEPDLGPFNPGPTQGVERGYSGGEKLATPTVTTYVPELVGLVVVGGTDGIGVHAAVVERWGTYDGKVSPQSVYTTKPGPLSGSDDIHLNVKTVKYLHDEGYTFTIPPHGEKVVEVVEAEQVGRQSDGTTYVTQNNAQAIPVKLRRTAPYGELGHVKLDQVGEEYFGIKFHVNGCDGPGTGPENATYVEQTSGVPPRTPVFGFSVEGTLIQSPQIALNKGRHAVTTIIKQARPFLGGVKGTTDQTPDAFVPHKQTEPLVAACKYVGGWYNHVVPKGKAGEGQGQHRPAVKRANPSDEQAMQSMKKPSPPTPQGEEVSTSVKDLEPESYFQQNVYVNVPAHIGKKLPGIECNQND', 'accession': 'SHD00644576.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 710, 'isDecoy': True, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'N', 'pre': 'K', 'protein description': '>SHD:SHD00644576.1|TREMBL:Q5HY54;Q86TQ3;Q96C61|ENSEMBL:ENSP00000358863|VEGA:OTTHUMP00000064890 Tax_Id=9606 Gene_Symbol=FLNA Filamin A, alpha', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 702, 'version': '3.15'}], 'PeptideSequence': 'MYGPMGTR', 'calculatedMassToCharge': 912.3993, 'chargeState': 1, 'experimentalMassToCharge': 912.29, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=6'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [{'Modification': [{'location': 11, "name": "Oxidation"}], 'PeptideEvidenceRef': [ {'DatabaseName': {'database IPI_human': ''}, 'Modification': [ {'location': 11, "name": "Oxidation"}], 'PeptideSequence': 'TLTLVDTGIGMTK', 'Seq': 'PEEVHHGEEEVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNASDALDKIRYESLTDPSKLDSGKELKIDIIPNPQERTLTLVDTGIGMTKADLINNLGTIAKSGTKAFMEALQAGADISMIGQFGVGFYSAYLVAEKVVVITKHNDDEQYAWESSAGGSFTVRADHGEPIGRGTKVILHLKEDQTEYLEERRVKEVVKKHSQFIGYPITLYLEKEREKEISDDEAEEEKGEKEEEDKDDEEKPKIEDVGSDEEDDSGKDKKKKTKKIKEKYIDQEELNKTKPIWTRNPDDITQEEYGEFYKSLTNDWEDHLAVKHFSVEGQLEFRALLFIPRRAPFDLFENKKKKNNIKLYVRRVFIMDSCDELIPEYLNFIRGVVDSEDLPLNISREMLQQSKILKVIRKNIVKKCLELFSELAEDKENYKKFYEAFSKNLKLGIHEDSTNRRRLSELLRYHTSQSGDEMTSLSEYVSRMKETQKSIYYITGESKEQVANSAFVERVRKRGFEVVYMTEPIDEYCVQQLKEFDGKSLVSVTKEGLELPEDEEEKKKMEESKAKFENLCKLMKEILDKKVEKVTISNRLVSSPCCIVTSTYGWTANMERIMKAQALRDNSTMGYMMAKKHLEINPDHPIVETLRQKAEADKNDKAVKDLVVLLFETALLSSGFSLEDPQTHSNRIYRMIKLGLGIDEDEVAAEEPNAAVPDEIPPLEGDEDASRMEEVD', 'accession': 'IPI00414676.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 50, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'A', 'pre': 'R', 'protein description': '>IPI:IPI00414676.5|SWISS-PROT:P08238|TREMBL:Q5T9W7;Q6PK50;Q9H6X9|ENSEMBL:ENSP00000325875|REFSEQ:NP_031381|H-INV:HIT000008644;HIT000032091;HIT000034201;HIT000035963;HIT000036733;HIT000049765;HIT000057726|VEGA:OTTHUMP00000016517;OTTHUMP00000016518;OTTHUMP00000016519 Tax_Id=9606 Heat shock protein HSP 90-beta', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 41, 'version': '3.15'}], 'PeptideSequence': 'TLTLVDTGIGMTK', 'ProteinScape:IntensityCoverage': 0.0, 'ProteinScape:MascotScore': 33.82, 'calculatedMassToCharge': 1365.722015, 'chargeState': 1, 'experimentalMassToCharge': 1365.721, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=11'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'HNDDEQYAWESSAGGSFTVR', 'Seq': 'PEEVHHGEEEVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNASDALDKIRYESLTDPSKLDSGKELKIDIIPNPQERTLTLVDTGIGMTKADLINNLGTIAKSGTKAFMEALQAGADISMIGQFGVGFYSAYLVAEKVVVITKHNDDEQYAWESSAGGSFTVRADHGEPIGRGTKVILHLKEDQTEYLEERRVKEVVKKHSQFIGYPITLYLEKEREKEISDDEAEEEKGEKEEEDKDDEEKPKIEDVGSDEEDDSGKDKKKKTKKIKEKYIDQEELNKTKPIWTRNPDDITQEEYGEFYKSLTNDWEDHLAVKHFSVEGQLEFRALLFIPRRAPFDLFENKKKKNNIKLYVRRVFIMDSCDELIPEYLNFIRGVVDSEDLPLNISREMLQQSKILKVIRKNIVKKCLELFSELAEDKENYKKFYEAFSKNLKLGIHEDSTNRRRLSELLRYHTSQSGDEMTSLSEYVSRMKETQKSIYYITGESKEQVANSAFVERVRKRGFEVVYMTEPIDEYCVQQLKEFDGKSLVSVTKEGLELPEDEEEKKKMEESKAKFENLCKLMKEILDKKVEKVTISNRLVSSPCCIVTSTYGWTANMERIMKAQALRDNSTMGYMMAKKHLEINPDHPIVETLRQKAEADKNDKAVKDLVVLLFETALLSSGFSLEDPQTHSNRIYRMIKLGLGIDEDEVAAEEPNAAVPDEIPPLEGDEDASRMEEVD', 'accession': 'IPI00414676.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 50, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'A', 'pre': 'K', 'protein description': '>IPI:IPI00414676.5|SWISS-PROT:P08238|TREMBL:Q5T9W7;Q6PK50;Q9H6X9|ENSEMBL:ENSP00000325875|REFSEQ:NP_031381|H-INV:HIT000008644;HIT000032091;HIT000034201;HIT000035963;HIT000036733;HIT000049765;HIT000057726|VEGA:OTTHUMP00000016517;OTTHUMP00000016518;OTTHUMP00000016519 Tax_Id=9606 Heat shock protein HSP 90-beta', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 41, 'version': '3.15'}], 'PeptideSequence': 'HNDDEQYAWESSAGGSFTVR', 'ProteinScape:IntensityCoverage': 0.0, 'ProteinScape:MascotScore': 39.0, 'calculatedMassToCharge': 2256.9515, 'chargeState': 1, 'experimentalMassToCharge': 2256.952, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=12'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'GGMGSGGLATGIAGGLAGMGGIQNEK', 'Seq': 'MSFTTRSTFSTNYRSLGSVQAPSYGARPVSSAASVYAGAGGSGSRISVSRSTSFRGGMGSGGLATGIAGGLAGMGGIQNEKETMQSLNDRLASYLDRVRSLETENRRLESKIREHLEKKGPQVRDWSHYFKIIEDLRAQIFANTVDNARIVLQIDNARLAADDFRVKYETELAMRQSVENDIHGLRKVIDDTNITRLQLETEIEALKEELLFMKKNHEEEVKGLQAQIASSGLTVEVDAPKSQDLAKIMADIRAQYDELARKNREELDKYWSQQIEESTTVVTTQSAEVGAAETTLTELRRTVQSLEIDLDSMRNLKASLENSLREVEARYALQMEQLNGILLHLESELAQTRAEGQRQAQEYEALLNIKVKLEAEIATYRRLLEDGEDFNLGDALDSSNSMQTIQKTTTRRIVDGKVVSETNDTKVLRH', 'accession': 'IPI00554788.5', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 81, 'isDecoy': False, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'E', 'pre': 'R', 'protein description': '>IPI:IPI00554788.5|SWISS-PROT:P05783|ENSEMBL:ENSP00000373487;ENSP00000373489|REFSEQ:NP_000215;NP_954657|H-INV:HIT000280941|VEGA:OTTHUMP00000167632 Tax_Id=9606 Gene_Symbol=KRT18 Keratin, type I cytoskeletal 18', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 55, 'version': '3.15'}], 'PeptideSequence': 'GGMGSGGLATGIAGGLAGMGGIQNEK', 'calculatedMassToCharge': 2261.0939, 'chargeState': 1, 'experimentalMassToCharge': 2261.092, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=35'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'LRSEPVMGVK', 'Seq': 'PSKYTHAFFHGGTVLFTYVDGLIAESLQLVTQSPPAAGVAGMYDGTSPTTTDIADFEGGPVKEDDEQKLLSSCPRVKGSEPKSPDHGACPSKGLNCKEAHAACCAVFYDYGVEVELVSALSATGAVVHKTASCTLDVTYPEDTIDGDPTAGRKTKKCKGSKATPPDYVRGEKVEIAARGPNDKLYYFEVVSREKPLGAFKCGNQKVNIVYEGVVVVGELEAKILDEVQGTTDGVVGSKDTTQINVIVFNLPCKTGSFVCNRAYKCDKPMDAEGPVVAKLNPGRAALVLHHPEQDKGLNLTGGVHNDTPDGVQKGLDITPTSVAPPMKQQVHNVVMRKKDTLDSKAQIATPETAFVNADAEPVSPWSELIVSEKLGENALGTLQPFHQAVDTTVAKPYRAPLAPWGAVVVNEQFVPAVSVNIGAERESEYILQTAGKCIADDLEGITETNKAVTTREDGPSISYIPSGGDAVSYDTQNRVGTTSLSPWKVDRGVSLSVGALKLLRIVWNDFCADTPVCVAGEVSYSTVGRGVSAGVAAPNPPVKAMVSRPIGTVSRIGGSDTRAGMKFSMHTGGIHTTKSKDDGESGSSYGGDTTVIEYGVEGGSTTVQELAIAPVGAYNLSAQPFGVKGRNEETKKTYSFQVAFVFGVLIFPESPRSREIQNPPAPIVYFVKSGDAQDKGGKEKAGTALIPFMVDPASGLKMYGPMGTRNQNDLGQESQGGTPSKTSEDDVSYNETKQLRATGGAGEETSDADSPKRLSEHGASASPMVQMGIGVINLEYGAPSVTPTVEPCRVTEYVSSSDSEGYAGTRVSAGVDQIDVKPLITLQIYPRSLYQVQENKCLATQAAIVYSSADVVPVVVLNKSGHGKFKVDRTDRKQHEVHYGNKCDDTNGDKNIGGNPSTVLESKDCCGISCVPLKVKTGKGVGPVTMSPPMGPAVHGGDIVHSLREVVVVSAVGKSDVVRGAGVAESKVISKRVAVTMADFSYVEKGIGRKGFSETDFGHKGPGDGRVVGSSRKVSSFFVIFQPAGGWKVIEVILHDAIIGGDGQIGGTANPFVGYTLKEASGVDYADVPVIGVLAYYVAELYRDQHGTDQGPGPEGNEITADPGVGTGAEDGRASGYGAGIVTTKTTDSTGCPHRPVQAERLGRKLRSEPVMGVKPGVGGTHCNEDINEKTGIALDVVRAPPAFQPSISGVHTKSSATDVGPDVFDTGGVALVCQPVVGSVVSPYYCPPFGAGVIECYPMSTDPADSSRRNFKPPQRVVTTTEAGDDSGAYVYRYFGPGADMKLPKDSSDVVGNGDCESDPPKLSGNVPQHPEQYSTPAESMRGDPAVSDSLVMANVEFPKCEPDQNYGGHDGKVAPFFDRENEQVDVGLPNDSKGPCVRGVFPLFHYIGSVAPRAVIKCPTTLGPIGQTGGFPGFKSIAYLLIGTGCFQKLLKYVITELVDNCDGCRAKYGSDSTGSFVMEAKGLPIQGFKGKSGTDKCNHEKVDIELTTSGKRPRPPANQAKIEDVLPNAVTDVKECTAVYVDREIELDEIDPHTAQGHRYKRGPKEPIAWSIELSPSGRYMVELLVEISVAFVPPIPIFGTEVAPVGAPAISTSGSPGPAIGRVTLGQKAALPTEPEPSNSYARVDRQEEKRTPEGCGEAAKAHHPPFGINPLAQVIEDQDAYHFDHVGARIGLERGTSCDGESEAIEGPVLYAAPAKAAPSGEVHIARQDVKDPLAGGELHTVLDRCDAVSYESSKSQPLHTITFSKPEPGKSDAMITPPVIEPIVFIWIQRAHFPNGETSLGSSRAFAGKRVSKTYGGSDLRECWIGGGPKKNLKPCQPGEGTGKPITGEPLGMVIVRTLPRWELVKHFLVAPGTVIGTTGHADHVSRPGKPVLIPTERANWPVPRHYTELHDGREVKCTREGVKFHRLDTVNAAVHKNYRHPSSKSSCLWHGVVTKEQAGQISIREFVEDVSPGSIIVDIVNLVAVQGPSSTGSGLGVVHAYPLTYYTVKDIVVALKLSSASFIMVNKKAVVTVCFSDVQSGFKKNVASPPGVLSVSIGLDTYEGDPGIATNWWPSTDEKPCGNVLEDSGPDENHSNPNTAWEFTGLGDTKEGRPNTAFVESEMVLLPVAYVLEQKHPGQGQVGNTMLKDGICEGSTSHVGAVDPSEVQPLGFPGRLITKKFLETFIFCYPSEPIGKGKKVDGKLTSSYSSELAERELVLALAVGIGDAVYYYAFGAGPKIHVFTGDGGGVKNGILVLEGPFHGDEPDLGPFNPGPTQGVERGYSGGEKLATPTVTTYVPELVGLVVVGGTDGIGVHAAVVERWGTYDGKVSPQSVYTTKPGPLSGSDDIHLNVKTVKYLHDEGYTFTIPPHGEKVVEVVEAEQVGRQSDGTTYVTQNNAQAIPVKLRRTAPYGELGHVKLDQVGEEYFGIKFHVNGCDGPGTGPENATYVEQTSGVPPRTPVFGFSVEGTLIQSPQIALNKGRHAVTTIIKQARPFLGGVKGTTDQTPDAFVPHKQTEPLVAACKYVGGWYNHVVPKGKAGEGQGQHRPAVKRANPSDEQAMQSMKKPSPPTPQGEEVSTSVKDLEPESYFQQNVYVNVPAHIGKKLPGIECNQND', 'accession': 'SHD00644576.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 1159, 'isDecoy': True, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'P', 'pre': 'K', 'protein description': '>SHD:SHD00644576.1|TREMBL:Q5HY54;Q86TQ3;Q96C61|ENSEMBL:ENSP00000358863|VEGA:OTTHUMP00000064890 Tax_Id=9606 Gene_Symbol=FLNA Filamin A, alpha', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 1150, 'version': '3.15'}], 'PeptideSequence': 'LRSEPVMGVK', 'calculatedMassToCharge': 1115.6168, 'chargeState': 1, 'experimentalMassToCharge': 1115.617, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=36'}, {'FileFormat': 'Proteinscape spectra', 'SpectrumIDFormat': 'spectrum from database nativeID format', 'SpectrumIdentificationItem': [ {'PeptideEvidenceRef': [{'DatabaseName': {'database IPI_human': ''}, 'PeptideSequence': 'QSDGTTYVTQNNAQAIPVK', 'Seq': 'PSKYTHAFFHGGTVLFTYVDGLIAESLQLVTQSPPAAGVAGMYDGTSPTTTDIADFEGGPVKEDDEQKLLSSCPRVKGSEPKSPDHGACPSKGLNCKEAHAACCAVFYDYGVEVELVSALSATGAVVHKTASCTLDVTYPEDTIDGDPTAGRKTKKCKGSKATPPDYVRGEKVEIAARGPNDKLYYFEVVSREKPLGAFKCGNQKVNIVYEGVVVVGELEAKILDEVQGTTDGVVGSKDTTQINVIVFNLPCKTGSFVCNRAYKCDKPMDAEGPVVAKLNPGRAALVLHHPEQDKGLNLTGGVHNDTPDGVQKGLDITPTSVAPPMKQQVHNVVMRKKDTLDSKAQIATPETAFVNADAEPVSPWSELIVSEKLGENALGTLQPFHQAVDTTVAKPYRAPLAPWGAVVVNEQFVPAVSVNIGAERESEYILQTAGKCIADDLEGITETNKAVTTREDGPSISYIPSGGDAVSYDTQNRVGTTSLSPWKVDRGVSLSVGALKLLRIVWNDFCADTPVCVAGEVSYSTVGRGVSAGVAAPNPPVKAMVSRPIGTVSRIGGSDTRAGMKFSMHTGGIHTTKSKDDGESGSSYGGDTTVIEYGVEGGSTTVQELAIAPVGAYNLSAQPFGVKGRNEETKKTYSFQVAFVFGVLIFPESPRSREIQNPPAPIVYFVKSGDAQDKGGKEKAGTALIPFMVDPASGLKMYGPMGTRNQNDLGQESQGGTPSKTSEDDVSYNETKQLRATGGAGEETSDADSPKRLSEHGASASPMVQMGIGVINLEYGAPSVTPTVEPCRVTEYVSSSDSEGYAGTRVSAGVDQIDVKPLITLQIYPRSLYQVQENKCLATQAAIVYSSADVVPVVVLNKSGHGKFKVDRTDRKQHEVHYGNKCDDTNGDKNIGGNPSTVLESKDCCGISCVPLKVKTGKGVGPVTMSPPMGPAVHGGDIVHSLREVVVVSAVGKSDVVRGAGVAESKVISKRVAVTMADFSYVEKGIGRKGFSETDFGHKGPGDGRVVGSSRKVSSFFVIFQPAGGWKVIEVILHDAIIGGDGQIGGTANPFVGYTLKEASGVDYADVPVIGVLAYYVAELYRDQHGTDQGPGPEGNEITADPGVGTGAEDGRASGYGAGIVTTKTTDSTGCPHRPVQAERLGRKLRSEPVMGVKPGVGGTHCNEDINEKTGIALDVVRAPPAFQPSISGVHTKSSATDVGPDVFDTGGVALVCQPVVGSVVSPYYCPPFGAGVIECYPMSTDPADSSRRNFKPPQRVVTTTEAGDDSGAYVYRYFGPGADMKLPKDSSDVVGNGDCESDPPKLSGNVPQHPEQYSTPAESMRGDPAVSDSLVMANVEFPKCEPDQNYGGHDGKVAPFFDRENEQVDVGLPNDSKGPCVRGVFPLFHYIGSVAPRAVIKCPTTLGPIGQTGGFPGFKSIAYLLIGTGCFQKLLKYVITELVDNCDGCRAKYGSDSTGSFVMEAKGLPIQGFKGKSGTDKCNHEKVDIELTTSGKRPRPPANQAKIEDVLPNAVTDVKECTAVYVDREIELDEIDPHTAQGHRYKRGPKEPIAWSIELSPSGRYMVELLVEISVAFVPPIPIFGTEVAPVGAPAISTSGSPGPAIGRVTLGQKAALPTEPEPSNSYARVDRQEEKRTPEGCGEAAKAHHPPFGINPLAQVIEDQDAYHFDHVGARIGLERGTSCDGESEAIEGPVLYAAPAKAAPSGEVHIARQDVKDPLAGGELHTVLDRCDAVSYESSKSQPLHTITFSKPEPGKSDAMITPPVIEPIVFIWIQRAHFPNGETSLGSSRAFAGKRVSKTYGGSDLRECWIGGGPKKNLKPCQPGEGTGKPITGEPLGMVIVRTLPRWELVKHFLVAPGTVIGTTGHADHVSRPGKPVLIPTERANWPVPRHYTELHDGREVKCTREGVKFHRLDTVNAAVHKNYRHPSSKSSCLWHGVVTKEQAGQISIREFVEDVSPGSIIVDIVNLVAVQGPSSTGSGLGVVHAYPLTYYTVKDIVVALKLSSASFIMVNKKAVVTVCFSDVQSGFKKNVASPPGVLSVSIGLDTYEGDPGIATNWWPSTDEKPCGNVLEDSGPDENHSNPNTAWEFTGLGDTKEGRPNTAFVESEMVLLPVAYVLEQKHPGQGQVGNTMLKDGICEGSTSHVGAVDPSEVQPLGFPGRLITKKFLETFIFCYPSEPIGKGKKVDGKLTSSYSSELAERELVLALAVGIGDAVYYYAFGAGPKIHVFTGDGGGVKNGILVLEGPFHGDEPDLGPFNPGPTQGVERGYSGGEKLATPTVTTYVPELVGLVVVGGTDGIGVHAAVVERWGTYDGKVSPQSVYTTKPGPLSGSDDIHLNVKTVKYLHDEGYTFTIPPHGEKVVEVVEAEQVGRQSDGTTYVTQNNAQAIPVKLRRTAPYGELGHVKLDQVGEEYFGIKFHVNGCDGPGTGPENATYVEQTSGVPPRTPVFGFSVEGTLIQSPQIALNKGRHAVTTIIKQARPFLGGVKGTTDQTPDAFVPHKQTEPLVAACKYVGGWYNHVVPKGKAGEGQGQHRPAVKRANPSDEQAMQSMKKPSPPTPQGEEVSTSVKDLEPESYFQQNVYVNVPAHIGKKLPGIECNQND', 'accession': 'SHD00644576.1', 'decoy DB accession regexp': '^SHD', 'decoy DB generation algorithm': 'PeakQuant.DecoyDatabaseBuilder', 'end': 2397, 'isDecoy': True, 'location': 'file://www.medizinisches-proteom-center.de/DBServer/ipi.HUMAN/3.15/ipi.HUMAN_decoy.fasta', "decoy DB from IPI_human": '', "DB composition target+decoy": '', "decoy DB type shuffle": '', 'numDatabaseSequences': 58099, 'post': 'L', 'pre': 'R', 'protein description': '>SHD:SHD00644576.1|TREMBL:Q5HY54;Q86TQ3;Q96C61|ENSEMBL:ENSP00000358863|VEGA:OTTHUMP00000064890 Tax_Id=9606 Gene_Symbol=FLNA Filamin A, alpha', 'releaseDate': '2006-02-22T09:30:47Z', 'start': 2378, 'version': '3.15'}], 'PeptideSequence': 'QSDGTTYVTQNNAQAIPVK', 'calculatedMassToCharge': 2035.0017, 'chargeState': 1, 'experimentalMassToCharge': 2035.002, 'passThreshold': True, 'rank': 1}], 'location': 'proteinscape://www.medizinisches-proteom-center.de/PSServer/Project/Sample/Separation_1D_LC/Fraction_X', 'spectrumID': 'databasekey=40'}]} mzml_mz_array = np.load('test_mzml_mz.npy') mzml_int_array = np.load('test_mzml_intensity.npy') mzml_spectra = [{'MSn spectrum': '', 'base peak intensity': 1471973.875, 'base peak m/z': 810.415283203125, 'count': 2, 'defaultArrayLength': 19914, 'highest observed m/z': 2000.0099466203771, 'id': 'controllerType=0 controllerNumber=1 scan=1', 'index': 0, 'intensity array': makeCA(mzml_int_array), 'lowest observed m/z': 200.00018816645022, 'm/z array': makeCA(mzml_mz_array), 'ms level': 1, 'positive scan': '', 'profile spectrum': '', 'scanList': {'count': 1, 'no combination': '', 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688, 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]', 'instrumentConfigurationRef': 'IC1', 'preset scan configuration': 1.0, 'scan start time': 0.004935, 'scanWindowList': {'count': 1, 'scanWindow': [{'scan window lower limit': 200.0, 'scan window upper limit': 2000.0}]}}]}, 'total ion current': 15245068.0}, {'MSn spectrum': '', 'base peak intensity': 1471973.875, 'base peak m/z': 810.415283203125, 'count': 2, 'defaultArrayLength': 19914, 'highest observed m/z': 2000.0099466203771, 'id': 'controllerType=0 controllerNumber=1 scan=2', 'index': 1, 'intensity array': makeCA(mzml_int_array), 'lowest observed m/z': 200.00018816645022, 'm/z array': makeCA(mzml_mz_array), 'ms level': 1, 'positive scan': '', 'profile spectrum': '', 'scanList': {'count': 1, 'no combination': '', 'scan': [{'[Thermo Trailer Extra]Monoisotopic M/Z:': 810.4152221679688, 'filter string': 'FTMS + p ESI Full ms [200.00-2000.00]', 'instrumentConfigurationRef': 'IC1', 'preset scan configuration': 1.0, 'scan start time': 0.005935, 'scanWindowList': {'count': 1, 'scanWindow': [{'scan window lower limit': 200.0, 'scan window upper limit': 2000.0}]}}]}, 'total ion current': 15245068.0}] mgf_int = [np.array([73., 44., 67., 291., 54., 49.]), np.array([237., 128., 108., 1007., 974., 79.])] mgf_mz = [np.array([846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5]), np.array([345.1, 370.2, 460.2, 1673.3, 1674., 1675.3])] mgf_ch = [np.ma.masked_equal([0] * 6, 0), np.array([3., 2., 1., 1., 1., 1.])] mgf_spectra_long = [{'intensity array': makeCA(mgf_int[0]), 'm/z array': makeCA(mgf_mz[0]), 'charge array': makeCA(mgf_ch[0]), 'params': {'charge': [2], 'com': 'Based on http://www.matrixscience.com/help/data_file_help.html', 'it_mods': 'Oxidation (M)', 'itol': '1', 'itolu': 'Da', 'mass': 'Monoisotopic', 'mods': 'Carbamidomethyl (C)', 'pepmass': (983.6, None), 'title': 'Spectrum 1', 'useremail': '[email protected]', 'username': 'Lou Scene'}}, {'intensity array': makeCA(mgf_int[1]), 'm/z array': makeCA(mgf_mz[1]), 'charge array': makeCA(mgf_ch[1]), 'params': {'charge': [2, 3], 'com': 'Based on http://www.matrixscience.com/help/data_file_help.html', 'it_mods': 'Oxidation (M)', 'itol': '1', 'itolu': 'Da', 'mass': 'Monoisotopic', 'mods': 'Carbamidomethyl (C)', 'pepmass': (1084.9, 1234.0), 'rtinseconds': 25.0, 'scans': '3', 'title': 'Spectrum 2', 'useremail': '[email protected]', 'username': 'Lou Scene'}}] mgf_spectra_short = [{'intensity array': makeCA(mgf_int[0]), 'charge array': makeCA(mgf_ch[0]), 'm/z array': makeCA(mgf_mz[0]), 'params': {'pepmass': (983.6, None), 'title': 'Spectrum 1', 'charge': [2]}}, {'intensity array': makeCA(mgf_int[1]), 'm/z array': makeCA(mgf_mz[1]), 'charge array': makeCA(mgf_ch[1]), 'params': {'pepmass': (1084.9, 1234.0), 'rtinseconds': 25.0, 'scans': '3', 'title': 'Spectrum 2'}}] mgf_spectra_short_no_charges = deepcopy(mgf_spectra_short) for s in mgf_spectra_short_no_charges: del s['charge array'] mgf_spectra_long_no_charges = deepcopy(mgf_spectra_long) for s in mgf_spectra_long_no_charges: del s['charge array'] mgf_spectra_lists = deepcopy(mgf_spectra_long) for s in mgf_spectra_lists: for key in ['m/z array', 'intensity array', 'charge array']: s[key] = list(s[key]) mgf_annotated_int = [np.array([0.013, 0.049, 0.059, 0.012, 0.454, 0.002, 0.956, 0.098]), np.array([0.597, 0.091, 0.063, 0.177, 0.165, 0.038, 0.043, 0.026, 0.213, 0.288, 0.177])] mgf_annotated_mz = [np.array([138.0, 153.5, 375.1, 484.2, 662.8, 698.3, 1130.2, 1395.4]), np.array([156.0, 157.1, 162.5, 211.1, 227.1, 228.1, 418.1, 698.3, 835.3, 949.4, 1112.7])] mgf_annotated_ions = [np.array(["b1+1", "b3+2", "y3+1", "y7+2", "y10+2", "y11+2", "y8+1", "y11+1"]), np.array(["y1+1", "b1+1", "y3+2", "y4+2", "y2+1", "b2+1", "b6+2", "y6+1", "b6+1", "b7+1", "b8+1"])] mgf_spectra_annotated_long = [{'intensity array': makeCA(mgf_annotated_int[0]), 'm/z array': makeCA(mgf_annotated_mz[0]), 'ion array': mgf_annotated_ions[0], 'params': {'charge': [2], 'pepmass': (766.84948, None), 'min_mz': '69.5367320', 'max_mz': '1395.63277', 'title': 'HAPPYNEWYEAR||2'}}, {'intensity array': makeCA(mgf_annotated_int[1]), 'm/z array': makeCA(mgf_annotated_mz[1]), 'ion array': mgf_annotated_ions[1], 'params': {'charge': [3], 'pepmass': (511.56875, None), 'title': 'RAEYWENYPPAH||3', 'min_mz': '69.5367320', 'max_mz': '1395.63277', }}] def decode_dict(d, encoding='utf-8'): """Recursively decode all strings in a dict""" out = {} if isinstance(d, basestring): return d.decode(encoding) if not isinstance(d, dict): return d for k, v in d.items(): newk = k.decode(encoding) if isinstance(v, basestring): out[newk] = v.decode(encoding) elif isinstance(v, dict): out[newk] = decode_dict(v, encoding) elif isinstance(v, list): out[newk] = [decode_dict(i) for i in v] else: out[newk] = v return out mgf_spectra_long_decoded = [decode_dict(s) for s in mgf_spectra_long ] if sys.version_info.major == 2 else mgf_spectra_long mgf_spectra_short_decoded = [decode_dict(s) for s in mgf_spectra_short ] if sys.version_info.major == 2 else mgf_spectra_short tandem_spectra = [{'act': '0', 'expect': 1.5e-07, 'fI': 48232.2, 'id': '11745', 'maxI': 4823220.0, 'mh': 800.418907, 'protein': [{'expect': -989.8, 'file': {'URL': '/home/lab006/fasta/uniprot_sprot.fasta', 'type': 'peptide'}, 'id': '11745.1', 'label': 'sp|P35579|MYH9_HUMAN Myosin-9 OS=Homo sapiens GN=MYH9 PE=1 SV=4', 'note': 'sp|P35579|MYH9_HUMAN Myosin-9 OS=Homo sapiens GN=MYH9 PE=1 SV=4', 'peptide': {'b_ions': 2, 'b_score': 8.2, 'delta': 0.0, 'end': 14, 'expect': 1.5e-07, 'hyperscore': 16.4, 'id': '11745.1.1', 'mh': 800.4189, 'missed_cleavages': 0, 'nextscore': 16.4, 'post': 'NFIN', 'pre': 'AADK', 'seq': 'YLYVDK', 'start': 9, 'y_ions': 3, 'y_score': 11.8}, 'sumI': 8.29, 'uid': '249282'}, {'expect': -784.7, 'file': {'URL': '/home/lab006/fasta/uniprot_sprot.fasta', 'type': 'peptide'}, 'id': '11745.2', 'label': 'sp|Q258K2|MYH9_CANFA Myosin-9 OS=Canis familiaris GN=MYH9 PE=2 SV=1', 'note': 'sp|Q258K2|MYH9_CANFA Myosin-9 OS=Canis familiaris GN=MYH9 PE=2 SV=1', 'peptide': {'b_ions': 2, 'b_score': 8.2, 'delta': 0.0, 'end': 14, 'expect': 1.5e-07, 'hyperscore': 16.4, 'id': '11745.2.1', 'mh': 800.4189, 'missed_cleavages': 0, 'nextscore': 16.4, 'post': 'NFIN', 'pre': 'AADK', 'seq': 'YLYVDK', 'start': 9, 'y_ions': 3, 'y_score': 11.8}, 'sumI': 8.24, 'uid': '249280'}, {'expect': -660.7, 'file': {'URL': '/home/lab006/fasta/uniprot_sprot.fasta', 'type': 'peptide'}, 'id': '11745.3', 'label': 'sp|Q8VDD5|MYH9_MOUSE Myosin-9 OS=Mus musculus GN=Myh9 PE=1 SV=4', 'note': 'sp|Q8VDD5|MYH9_MOUSE Myosin-9 OS=Mus musculus GN=Myh9 PE=1 SV=4', 'peptide': {'b_ions': 2, 'b_score': 8.2, 'delta': 0.0, 'end': 14, 'expect': 1.5e-07, 'hyperscore': 16.4, 'id': '11745.3.1', 'mh': 800.4189, 'missed_cleavages': 0, 'nextscore': 16.4, 'post': 'NFIN', 'pre': 'AADK', 'seq': 'YLYVDK', 'start': 9, 'y_ions': 3, 'y_score': 11.8}, 'sumI': 8.19, 'uid': '249283'}, {'expect': -654.3, 'file': {'URL': '/home/lab006/fasta/uniprot_sprot.fasta', 'type': 'peptide'}, 'id': '11745.4', 'label': 'sp|Q62812|MYH9_RAT Myosin-9 OS=Rattus norvegicus GN=Myh9 PE=1 SV=3', 'note': 'sp|Q62812|MYH9_RAT Myosin-9 OS=Rattus norvegicus GN=Myh9 PE=1 SV=3', 'peptide': {'b_ions': 2, 'b_score': 8.2, 'delta': 0.0, 'end': 14, 'expect': 1.5e-07, 'hyperscore': 16.4, 'id': '11745.4.1', 'mh': 800.4189, 'missed_cleavages': 0, 'nextscore': 16.4, 'post': 'NFIN', 'pre': 'AADK', 'seq': 'YLYVDK', 'start': 9, 'y_ions': 3, 'y_score': 11.8}, 'sumI': 8.15, 'uid': '249284'}, {'expect': -463.3, 'file': {'URL': '/home/lab006/fasta/uniprot_sprot.fasta', 'type': 'peptide'}, 'id': '11745.5', 'label': 'sp|P14105|MYH9_CHICK Myosin-9 OS=Gallus gallus GN=MYH9 PE=2 SV=1', 'note': 'sp|P14105|MYH9_CHICK Myosin-9 OS=Gallus gallus GN=MYH9 PE=2 SV=1', 'peptide': {'b_ions': 2, 'b_score': 8.2, 'delta': 0.0, 'end': 14, 'expect': 1.5e-07, 'hyperscore': 16.4, 'id': '11745.5.1', 'mh': 800.4189, 'missed_cleavages': 0, 'nextscore': 16.4, 'post': 'NIIN', 'pre': 'DADK', 'seq': 'YLYVDK', 'start': 9, 'y_ions': 3, 'y_score': 11.8}, 'sumI': 8.05, 'uid': '249281'}], 'rt': 42.0, 'sumI': 6.93, 'support': {'fragment ion mass spectrum': {'M+H': 800.419, 'Xdata': {'units': 'MASSTOCHARGERATIO', 'values': makeCA(np.array( [174.759, 249.16, 262.135, 277.155, 361.208, 378.203, 401.716, 440.218, 472.624, 495.27, 504.276, 524.271, 526.277, 637.355]))}, 'Ydata': {'units': 'UNKNOWN', 'values': makeCA(np.array( [2., 22., 3., 13., 4., 1., 5., 2., 2., 2., 10., 100., 4., 6.]))}, 'charge': 2, 'id': '11745', 'label': '11745.spectrum', 'note': 'YLYVDK'}, 'supporting data': {'b ion histogram': {'Xdata': {'units': 'number of ions', 'values': makeCA( np.array([0, 1, 2, 3]))}, 'Ydata': {'units': 'counts', 'values': makeCA( np.array([346, 64, 21, 0]))}, 'label': '11745.b'}, 'convolution survival function': {'Xdata': {'units': 'score', 'values': makeCA( np.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]))}, 'Ydata': {'units': 'counts', 'values': makeCA( np.array( [351, 351, 351, 309, 99, 20, 1, 0, 0, 0, 80, 0, 43, 0]))}, 'label': '11745.convolute'}, 'hyperscore expectation function': {'Xdata': {'units': 'score', 'values': makeCA( np.array( [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]))}, 'Ydata': {'units': 'counts', 'values': makeCA( np.array( [350, 350, 350, 308, 98, 19, 0, 81, 0, 0, 80, 3, 0, 47, 4, 0, 10, 0]))}, 'a0': 4.84111, 'a1': -0.712472, 'label': '11745.hyper'}, 'y ion histogram': {'Xdata': {'units': 'number of ions', 'values': makeCA( np.array([0, 1, 2, 3, 4]))}, 'Ydata': {'units': 'counts', 'values': makeCA( np.array([2, 381, 38, 10, 0]))}, 'label': '11745.y'}}}, 'z': 2}] features = [{'FWHM': 8.53579425811768, 'charge': 1, 'convexhull': [{'nr': 0, 'pt': [{'x': 4020.447, 'y': 489.778198242188}, {'x': 4026.53226, 'y': 489.778198242188}, {'x': 4026.53226, 'y': 489.778747558594}, {'x': 4020.447, 'y': 489.778747558594}]}, {'nr': 1, 'pt': [{'x': 4019.229, 'y': 490.779388427734}, {'x': 4026.53226, 'y': 490.779388427734}, {'x': 4026.53226, 'y': 490.780151367188}, {'x': 4019.229, 'y': 490.780151367188}]}], 'id': 'f_16832770593554750385', 'intensity': 666996000.0, 'label': 0.0, 'overallquality': 0.791454, 'position': [{'dim': 0, 'position': 4022.03514673128}, {'dim': 1, 'position': 489.778444938208}], 'quality': [{'dim': 0, 'quality': 0}, {'dim': 1, 'quality': 0}], 'score_correlation': 0.991616770766787, 'score_fit': 0.631694771390403, 'spectrum_index': 4, 'spectrum_native_id': 'controllerType=0 controllerNumber=1 scan=29899'}, {'FWHM': 7.54492568969727, 'charge': 3, 'convexhull': [{'nr': 0, 'pt': [{'x': 4020.447, 'y': 639.657653808594}, {'x': 4026.53226, 'y': 639.657653808594}, {'x': 4026.53226, 'y': 639.658386230469}, {'x': 4020.447, 'y': 639.658386230469}]}, {'nr': 1, 'pt': [{'x': 4020.447, 'y': 639.992004394531}, {'x': 4026.53226, 'y': 639.992004394531}, {'x': 4026.53226, 'y': 639.99267578125}, {'x': 4020.447, 'y': 639.99267578125}]}, {'nr': 2, 'pt': [{'x': 4020.447, 'y': 640.325988769531}, {'x': 4026.53226, 'y': 640.325988769531}, {'x': 4026.53226, 'y': 640.326721191406}, {'x': 4020.447, 'y': 640.326721191406}]}, {'nr': 3, 'pt': [{'x': 4020.447, 'y': 640.659912109375}, {'x': 4026.53226, 'y': 640.659912109375}, {'x': 4026.53226, 'y': 640.660400390625}, {'x': 4020.447, 'y': 640.660400390625}]}, {'nr': 4, 'pt': [{'x': 4020.447, 'y': 640.993469238281}, {'x': 4026.53226, 'y': 640.993469238281}, {'x': 4026.53226, 'y': 640.995056152344}, {'x': 4020.447, 'y': 640.995056152344}]}, {'nr': 5, 'pt': [{'x': 4020.447, 'y': 641.328308105469}, {'x': 4026.53226, 'y': 641.328308105469}, {'x': 4026.53226, 'y': 641.331237792969}, {'x': 4020.447, 'y': 641.331237792969}]}], 'id': 'f_189396504510444007', 'intensity': 345095000.0, 'label': 82.0, 'overallquality': 0.945634, 'position': [{'dim': 0, 'position': 4022.97774692935}, {'dim': 1, 'position': 639.656521928634}], 'quality': [{'dim': 0, 'quality': 0}, {'dim': 1, 'quality': 0}], 'score_correlation': 0.994121041144976, 'score_fit': 0.899512445523661, 'spectrum_index': 5, 'spectrum_native_id': 'controllerType=0 controllerNumber=1 scan=29910'}] pairs = [{'to': 3333.35149999998, 'from': 3329.2416}, {'to': 3328.47130000002, 'from': 3324.2217}, {'to': 3338.57140000002, 'from': 3334.7217}] mzxml_spectra = [ {'polarity': '+', 'id': '19', 'basePeakIntensity': 120053.0, 'highMz': 1800.0, 'msLevel': 1, 'totIonCurrent': 16675500.0, 'peaksCount': 1313, 'num': '19', 'basePeakMz': 445.347, 'lowMz': 400.0, 'intensity array': makeCA( [11411.0, 24104.0, 26457.0, 21981.0, 6883.0, 12824.0, 21671.0, 21360.0, 15150.0, 5142.0, 19249.0, 9117.0, 11344.0, 6406.0, 13941.0, 18443.0, 10493.0, 18646.0, 16825.0, 16874.0, 15641.0, 21131.0, 22189.0, 5183.0, 16021.0, 13994.0, 7778.0, 19195.0, 14369.0, 15403.0, 21316.0, 41381.0, 39128.0, 34936.0, 29871.0, 18967.0, 20278.0, 18010.0, 14258.0, 19970.0, 12478.0, 10118.0, 16659.0, 13724.0, 12649.0, 6376.0, 24213.0, 7070.0, 120053.0, 58799.0, 61671.0, 54718.0, 28383.0, 23848.0, 7399.0, 6297.0, 14041.0, 15005.0, 8792.0, 11082.0, 12722.0, 7377.0, 11877.0, 21741.0, 12004.0, 29042.0, 16966.0, 19835.0, 18742.0, 9041.0, 27173.0, 21811.0, 11173.0, 16283.0, 16579.0, 12884.0, 10654.0, 13874.0, 16176.0, 12204.0, 14599.0, 10778.0, 20339.0, 14452.0, 15418.0, 21373.0, 21895.0, 8022.0, 20560.0, 18443.0, 12916.0, 11245.0, 22588.0, 11455.0, 751.0, 8924.0, 21950.0, 19053.0, 14923.0, 10394.0, 14730.0, 17218.0, 19043.0, 27353.0, 12905.0, 21255.0, 13775.0, 6148.0, 31961.0, 36355.0, 27402.0, 18733.0, 10916.0, 24126.0, 13597.0, 27047.0, 11050.0, 9832.0, 10881.0, 8122.0, 13121.0, 20521.0, 12938.0, 13500.0, 20867.0, 17165.0, 28059.0, 21600.0, 35282.0, 17374.0, 25068.0, 16965.0, 11691.0, 25549.0, 15092.0, 16639.0, 12203.0, 15932.0, 15245.0, 10202.0, 24397.0, 18726.0, 20938.0, 18502.0, 11599.0, 24470.0, 27960.0, 28876.0, 17897.0, 18927.0, 17035.0, 13465.0, 16730.0, 4832.0, 14885.0, 12357.0, 15071.0, 23074.0, 8629.0, 29741.0, 5200.0, 26115.0, 17271.0, 10191.0, 14820.0, 15604.0, 14918.0, 15566.0, 14833.0, 16632.0, 21396.0, 7266.0, 5278.0, 23519.0, 28788.0, 11377.0, 17790.0, 15389.0, 14530.0, 14805.0, 10846.0, 19968.0, 29468.0, 24502.0, 20271.0, 9337.0, 15921.0, 14262.0, 20108.0, 9629.0, 20790.0, 9363.0, 17728.0, 22333.0, 23232.0, 15757.0, 21556.0, 19224.0, 15883.0, 28759.0, 14968.0, 22996.0, 15180.0, 14335.0, 10336.0, 17455.0, 17417.0, 15072.0, 14887.0, 20360.0, 10668.0, 23430.0, 23263.0, 11970.0, 14183.0, 28379.0, 24917.0, 9903.0, 24070.0, 17788.0, 20761.0, 18110.0, 26427.0, 28324.0, 15582.0, 23429.0, 25959.0, 14056.0, 28299.0, 20705.0, 8229.0, 24308.0, 15135.0, 19872.0, 23567.0, 11376.0, 14067.0, 5692.0, 18327.0, 32557.0, 13156.0, 25174.0, 21468.0, 25614.0, 12090.0, 35738.0, 8162.0, 2230.0, 18652.0, 22763.0, 16874.0, 21453.0, 15238.0, 17615.0, 5577.0, 25976.0, 19623.0, 16849.0, 20520.0, 26799.0, 20745.0, 13276.0, 14254.0, 25777.0, 19437.0, 18389.0, 31165.0, 20444.0, 20972.0, 26061.0, 18309.0, 17448.0, 13604.0, 33785.0, 32109.0, 25213.0, 29280.0, 15551.0, 14529.0, 18876.0, 15021.0, 16654.0, 8115.0, 14176.0, 3845.0, 28960.0, 23626.0, 27749.0, 22988.0, 13845.0, 39584.0, 18559.0, 24574.0, 30149.0, 19535.0, 25441.0, 25216.0, 19921.0, 26871.0, 22542.0, 15680.0, 29222.0, 18918.0, 21871.0, 20877.0, 29709.0, 28149.0, 33227.0, 18236.0, 35461.0, 7444.0, 20046.0, 25515.0, 16744.0, 10234.0, 25093.0, 32090.0, 25907.0, 22234.0, 23100.0, 25098.0, 17946.0, 24657.0, 24994.0, 19046.0, 17935.0, 33363.0, 24092.0, 26359.0, 29935.0, 23926.0, 11813.0, 22773.0, 18145.0, 21137.0, 24471.0, 26509.0, 6985.0, 30560.0, 5656.0, 24421.0, 26001.0, 19333.0, 15914.0, 33926.0, 4729.0, 15490.0, 23467.0, 22454.0, 14445.0, 17729.0, 14513.0, 16846.0, 26771.0, 22038.0, 19101.0, 15749.0, 25512.0, 18934.0, 15818.0, 34068.0, 12938.0, 20035.0, 13928.0, 13751.0, 36530.0, 31069.0, 15567.0, 17604.0, 20921.0, 21113.0, 16819.0, 18289.0, 20724.0, 36578.0, 17466.0, 23061.0, 19330.0, 21268.0, 20903.0, 18340.0, 26527.0, 18200.0, 30517.0, 12496.0, 15373.0, 10099.0, 26572.0, 15534.0, 14725.0, 24366.0, 14791.0, 24245.0, 2347.0, 23615.0, 19999.0, 21966.0, 25961.0, 21287.0, 20494.0, 16319.0, 11968.0, 21764.0, 29111.0, 20500.0, 19182.0, 12419.0, 6752.0, 35981.0, 11359.0, 33828.0, 17990.0, 19480.0, 17326.0, 26179.0, 19991.0, 35022.0, 21962.0, 18293.0, 11745.0, 21443.0, 18498.0, 16856.0, 13911.0, 21427.0, 27797.0, 13135.0, 11573.0, 20013.0, 21824.0, 40721.0, 8876.0, 11736.0, 17404.0, 12242.0, 20668.0, 22629.0, 14415.0, 24468.0, 20045.0, 21601.0, 13611.0, 20452.0, 9472.0, 28138.0, 25649.0, 17563.0, 11449.0, 31620.0, 33606.0, 12953.0, 17304.0, 15832.0, 16587.0, 18759.0, 18818.0, 27442.0, 21765.0, 19069.0, 28825.0, 14218.0, 18246.0, 16948.0, 34438.0, 15412.0, 16274.0, 17358.0, 25344.0, 12208.0, 27550.0, 13035.0, 18310.0, 23319.0, 29225.0, 14338.0, 22462.0, 7620.0, 20663.0, 19790.0, 30480.0, 10846.0, 17997.0, 17069.0, 20419.0, 15792.0, 26581.0, 24764.0, 28308.0, 9438.0, 36253.0, 28219.0, 27562.0, 32185.0, 10668.0, 17971.0, 9369.0, 16038.0, 7169.0, 21699.0, 21310.0, 11058.0, 15625.0, 13411.0, 17925.0, 20257.0, 19349.0, 31367.0, 24044.0, 21245.0, 26513.0, 26599.0, 24617.0, 11083.0, 24603.0, 12578.0, 14642.0, 19505.0, 20384.0, 21082.0, 13038.0, 8482.0, 23670.0, 18882.0, 24236.0, 25244.0, 22897.0, 12033.0, 23165.0, 10023.0, 22689.0, 21589.0, 17377.0, 15767.0, 15941.0, 24538.0, 15693.0, 22880.0, 24012.0, 11444.0, 32171.0, 10975.0, 17465.0, 16607.0, 17831.0, 22604.0, 14539.0, 15862.0, 10581.0, 23862.0, 28905.0, 25011.0, 36643.0, 25362.0, 8760.0, 12123.0, 12017.0, 12345.0, 16356.0, 17289.0, 18242.0, 16203.0, 27144.0, 17031.0, 13800.0, 14856.0, 22373.0, 9468.0, 26171.0, 15812.0, 12296.0, 13598.0, 24062.0, 24733.0, 27368.0, 14258.0, 20907.0, 34741.0, 9031.0, 11547.0, 16652.0, 23376.0, 22739.0, 14860.0, 21003.0, 12140.0, 12299.0, 17659.0, 13463.0, 11638.0, 11103.0, 14331.0, 9036.0, 14708.0, 13808.0, 9478.0, 18252.0, 7318.0, 13317.0, 11962.0, 18399.0, 15630.0, 26869.0, 9493.0, 19661.0, 9151.0, 17478.0, 15717.0, 11947.0, 25870.0, 10619.0, 4967.0, 4407.0, 23679.0, 13463.0, 28370.0, 21746.0, 10257.0, 18819.0, 18331.0, 15616.0, 15391.0, 11121.0, 9006.0, 28670.0, 14547.0, 12729.0, 24116.0, 18969.0, 14256.0, 12762.0, 22671.0, 34569.0, 16841.0, 16448.0, 11357.0, 11932.0, 10505.0, 21017.0, 13939.0, 10841.0, 18196.0, 13169.0, 10237.0, 11095.0, 15895.0, 13967.0, 13244.0, 16045.0, 15984.0, 14962.0, 9562.0, 29133.0, 3777.0, 19409.0, 17706.0, 16988.0, 7733.0, 21684.0, 5061.0, 6130.0, 17908.0, 25642.0, 13197.0, 12499.0, 13419.0, 10540.0, 12168.0, 16621.0, 15579.0, 16498.0, 6945.0, 13174.0, 12525.0, 11536.0, 13709.0, 17849.0, 9068.0, 23164.0, 16403.0, 9277.0, 33817.0, 32299.0, 10936.0, 8196.0, 9499.0, 14882.0, 25389.0, 8486.0, 15582.0, 8486.0, 8900.0, 9528.0, 6881.0, 17379.0, 10573.0, 20301.0, 19891.0, 9075.0, 14453.0, 26268.0, 11892.0, 14169.0, 15331.0, 23524.0, 8599.0, 13800.0, 19973.0, 17331.0, 13295.0, 9814.0, 7919.0, 5806.0, 10066.0, 12183.0, 7033.0, 20926.0, 19987.0, 20325.0, 4084.0, 7169.0, 6286.0, 16727.0, 5308.0, 15225.0, 8333.0, 7509.0, 16330.0, 18430.0, 9696.0, 10567.0, 10294.0, 13527.0, 17464.0, 4806.0, 9731.0, 14552.0, 7373.0, 14384.0, 13841.0, 18365.0, 13729.0, 8981.0, 8211.0, 18784.0, 16519.0, 9166.0, 8857.0, 4515.0, 13507.0, 4007.0, 11951.0, 5867.0, 19044.0, 10793.0, 5736.0, 14061.0, 19776.0, 1852.0, 7836.0, 3839.0, 3497.0, 12939.0, 400.0, 17525.0, 9941.0, 10136.0, 7386.0, 2874.0, 11984.0, 9659.0, 13837.0, 14899.0, 16949.0, 11096.0, 16434.0, 3696.0, 10241.0, 8483.0, 14962.0, 3763.0, 13840.0, 4172.0, 8208.0, 11448.0, 16043.0, 1414.0, 7910.0, 3867.0, 9856.0, 8235.0, 12281.0, 5712.0, 12212.0, 11185.0, 6827.0, 14356.0, 8187.0, 8840.0, 11619.0, 10035.0, 14740.0, 12464.0, 5509.0, 22634.0, 12178.0, 7228.0, 15923.0, 4476.0, 4031.0, 3449.0, 11040.0, 5726.0, 9838.0, 18725.0, 4204.0, 53477.0, 16037.0, 10616.0, 5125.0, 10235.0, 27880.0, 9318.0, 16184.0, 12630.0, 12914.0, 6321.0, 2221.0, 7615.0, 13992.0, 11813.0, 5618.0, 3515.0, 11687.0, 2.0, 9343.0, 5264.0, 17692.0, 5618.0, 9575.0, 2029.0, 13811.0, 13912.0, 5854.0, 2278.0, 9210.0, 8293.0, 5614.0, 2890.0, 14638.0, 8567.0, 8570.0, 9787.0, 17110.0, 7276.0, 13879.0, 7860.0, 18351.0, 6592.0, 8735.0, 6256.0, 4716.0, 5843.0, 7464.0, 5733.0, 10935.0, 9816.0, 2096.0, 2324.0, 6874.0, 11377.0, 12525.0, 13453.0, 4436.0, 9483.0, 5155.0, 6423.0, 5625.0, 12663.0, 7164.0, 4484.0, 6059.0, 9746.0, 6337.0, 15404.0, 4587.0, 11491.0, 6498.0, 6004.0, 20370.0, 8741.0, 6085.0, 12448.0, 10631.0, 8891.0, 11267.0, 13932.0, 9184.0, 10788.0, 2770.0, 8854.0, 6306.0, 8784.0, 1670.0, 6179.0, 5763.0, 11338.0, 8038.0, 9710.0, 4552.0, 6810.0, 7162.0, 3152.0, 8581.0, 14447.0, 5790.0, 3117.0, 6933.0, 8781.0, 10867.0, 5000.0, 9507.0, 4926.0, 5738.0, 3467.0, 8971.0, 6728.0, 3417.0, 4001.0, 13179.0, 4545.0, 7287.0, 13181.0, 2307.0, 12618.0, 1.0, 5258.0, 7972.0, 10163.0, 8529.0, 7788.0, 3281.0, 3374.0, 4801.0, 7489.0, 2099.0, 3978.0, 6641.0, 9788.0, 10189.0, 7099.0, 9885.0, 5638.0, 8278.0, 10031.0, 7038.0, 10246.0, 10104.0, 10057.0, 6767.0, 7945.0, 4618.0, 3428.0, 5641.0, 2037.0, 1582.0, 5013.0, 9966.0, 8718.0, 5153.0, 3545.0, 6190.0, 3095.0, 3809.0, 7869.0, 293.0, 3450.0, 5198.0, 4633.0, 2466.0, 2263.0, 6963.0, 6210.0, 2847.0, 1888.0, 4740.0, 4613.0, 4702.0, 4492.0, 12312.0, 4014.0, 1.0, 4880.0, 4372.0, 9673.0, 5895.0, 8190.0, 5008.0, 11133.0, 3957.0, 5351.0, 4171.0, 9522.0, 2626.0, 2856.0, 5869.0, 8243.0, 6736.0, 1661.0, 5160.0, 2544.0, 1735.0, 1772.0, 6673.0, 2560.0, 693.0, 4590.0, 6434.0, 3894.0, 3634.0, 11300.0, 4903.0, 2021.0, 5122.0, 1705.0, 2315.0, 9875.0, 6988.0, 5342.0, 2985.0, 1296.0, 786.0, 330.0, 3855.0, 6084.0, 695.0, 3100.0, 955.0, 3332.0, 2108.0, 3055.0, 6827.0, 9644.0, 2350.0, 3803.0, 7983.0, 3374.0, 4991.0, 4201.0, 9586.0, 1606.0, 9359.0, 3386.0, 6139.0, 3641.0, 1365.0, 5385.0, 8636.0, 3568.0, 7654.0, 3020.0, 2700.0, 6707.0, 1364.0, 5598.0, 1235.0, 8451.0, 6638.0, 3447.0, 2149.0, 2724.0, 1684.0, 2775.0, 3842.0, 4948.0, 1292.0, 4620.0, 9864.0, 3501.0, 2737.0, 2424.0, 1691.0, 2409.0, 1350.0, 3366.0, 2743.0, 1163.0, 1488.0, 4977.0, 2517.0, 3052.0, 2825.0, 2760.0, 640.0, 2051.0, 1832.0, 2580.0, 5121.0, 4174.0, 3054.0, 5413.0, 3292.0, 2288.0, 2462.0, 3282.0, 8386.0, 3307.0, 4024.0, 2277.0, 3530.0, 1931.0, 2213.0, 939.0, 2600.0, 5895.0, 2109.0, 5930.0, 392.0, 2401.0, 5965.0, 1602.0, 6670.0, 3591.0, 2930.0, 2464.0, 4300.0, 5849.0, 3491.0, 393.0, 1652.0, 2978.0, 1126.0, 1246.0, 7694.0, 2327.0, 2113.0, 2263.0, 4199.0, 4334.0, 1676.0, 4168.0, 4340.0, 740.0, 5077.0, 1669.0, 1868.0, 1663.0, 836.0, 5071.0, 2316.0, 6424.0, 3388.0, 2212.0, 3921.0, 880.0, 3232.0, 6874.0, 2166.0, 1034.0, 4562.0, 1104.0, 1175.0, 2570.0, 899.0, 2255.0, 5060.0, 671.0, 2382.0, 2179.0, 1032.0, 4165.0, 3924.0, 1548.0, 3790.0, 851.0, 2603.0, 472.0, 1848.0, 2210.0, 1252.0, 3452.0, 743.0, 1546.0, 1548.0, 4476.0, 886.0, 824.0, 1849.0, 4487.0, 2980.0, 1864.0, 2509.0, 1128.0, 2915.0, 4321.0, 6325.0, 2719.0, 1025.0, 6508.0, 3149.0, 4839.0, 1738.0, 4961.0, 361.0, 1765.0, 3128.0, 372.0, 1065.0, 1253.0, 3452.0, 3177.0, 745.0, 1382.0, 2388.0, 3679.0, 3528.0, 1196.0, 1869.0, 2909.0, 3715.0, 5387.0, 953.0, 1265.0, 1484.0, 2505.0, 619.0, 312.0, 2589.0, 6526.0, 1264.0, 1269.0, 3158.0, 4040.0, 1537.0, 3303.0, 1479.0, 1373.0, 3826.0, 2270.0, 2706.0, 1421.0, 2156.0, 4042.0, 5246.0, 1138.0, 1019.0, 1073.0, 884.0, 633.0, 1937.0, 5526.0, 3592.0, 2725.0, 1890.0, 1922.0, 2358.0, 546.0, 5221.0, 649.0, 465.0, 671.0, 1101.0, 3990.0, 890.0, 3254.0, 1686.0, 1074.0, 894.0, 1431.0, 5398.0, 1122.0, 5231.0, 3673.0, 2565.0, 636.0, 642.0, 2411.0, 5724.0, 817.0, 1528.0, 1087.0, 2405.0, 776.0, 2796.0, 3874.0, 933.0, 10114.0, 2131.0, 3491.0, 710.0, 1991.0, 1256.0, 1673.0, 616.0, 513.0, 2674.0, 1551.0, 4945.0, 993.0, 3750.0, 407.0, 4520.0, 834.0, 3829.0, 1575.0, 382.0, 2086.0, 1848.0, 1175.0, 1855.0, 932.0, 828.0, 897.0, 3686.0]), 'm/z array': makeCA( [400.38958740234375, 401.03533935546875, 402.035888671875, 403.2169189453125, 403.97320556640625, 404.91033935546875, 405.83642578125, 407.06207275390625, 407.87646484375, 408.66229248046875, 409.37652587890625, 410.37713623046875, 411.50885009765625, 412.57891845703125, 413.4959716796875, 414.520263671875, 415.25408935546875, 415.918212890625, 416.7078857421875, 417.9366455078125, 418.97564697265625, 419.6207275390625, 420.6142578125, 421.38037109375, 422.5335693359375, 423.6138916015625, 424.50970458984375, 425.468505859375, 426.224365234375, 427.05621337890625, 428.4556884765625, 429.41375732421875, 430.16998291015625, 431.1475830078125, 432.0792236328125, 432.94671630859375, 433.82623291015625, 434.9476318359375, 435.899169921875, 436.917236328125, 438.03265380859375, 439.1148681640625, 440.152099609375, 440.96136474609375, 441.72412109375, 442.4854736328125, 443.546630859375, 444.3160400390625, 445.3466796875, 446.29937744140625, 447.34368896484375, 448.51068115234375, 449.63824462890625, 450.67681884765625, 451.4376220703125, 452.040283203125, 452.69329833984375, 453.514892578125, 454.34765625, 455.23687744140625, 456.094970703125, 456.83660888671875, 457.56396484375, 458.7027587890625, 459.7601318359375, 460.78106689453125, 461.95208740234375, 462.71435546875, 463.43890380859375, 464.15802001953125, 465.26104736328125, 466.5059814453125, 467.46826171875, 468.418212890625, 469.4296875, 470.56182861328125, 471.5120849609375, 472.4197998046875, 473.44354248046875, 474.4901123046875, 475.31768798828125, 476.254638671875, 477.11016845703125, 478.36065673828125, 479.27020263671875, 480.54595947265625, 481.48443603515625, 482.56103515625, 483.2381591796875, 484.52655029296875, 485.4844970703125, 486.3204345703125, 487.4210205078125, 488.37890625, 489.0980224609375, 489.71588134765625, 490.71881103515625, 492.0147705078125, 493.04107666015625, 494.34246826171875, 495.52935791015625, 496.4515380859375, 497.218505859375, 498.20782470703125, 499.23138427734375, 500.26983642578125, 501.19921875, 502.0230712890625, 502.9676513671875, 504.03082275390625, 505.01971435546875, 505.96734619140625, 506.61187744140625, 507.59283447265625, 508.44256591796875, 509.37042236328125, 510.18560791015625, 510.84991455078125, 511.90777587890625, 512.7205810546875, 513.6148681640625, 514.3619384765625, 515.236083984375, 516.13232421875, 517.062744140625, 518.3779296875, 519.432373046875, 520.388671875, 521.2822265625, 522.173583984375, 523.1622314453125, 524.162841796875, 524.95166015625, 525.93212890625, 527.1358642578125, 527.83203125, 528.657958984375, 529.42138671875, 530.356689453125, 531.1588134765625, 531.86474609375, 532.654052734375, 533.808837890625, 534.8798828125, 535.730712890625, 536.622314453125, 537.31787109375, 538.481689453125, 539.50146484375, 540.3681640625, 541.459228515625, 542.43408203125, 543.39501953125, 544.351318359375, 544.9697265625, 545.6025390625, 546.28076171875, 547.1396484375, 548.26806640625, 549.33984375, 550.1533203125, 551.049560546875, 551.99755859375, 552.945068359375, 553.783935546875, 554.453125, 555.311279296875, 556.22900390625, 557.625732421875, 558.461181640625, 559.496337890625, 560.4454345703125, 561.088134765625, 561.8837890625, 562.8387451171875, 563.7255859375, 565.1561279296875, 566.068603515625, 567.09228515625, 568.2957763671875, 569.251953125, 569.9794921875, 571.216064453125, 572.399169921875, 573.3642578125, 574.1414794921875, 575.16162109375, 576.0498046875, 577.20849609375, 578.1102294921875, 579.08349609375, 580.354736328125, 580.9705810546875, 582.02392578125, 582.858642578125, 583.697021484375, 584.751708984375, 585.736083984375, 586.722412109375, 587.48779296875, 588.52685546875, 589.371826171875, 590.213623046875, 591.238525390625, 592.108154296875, 593.032470703125, 593.7459716796875, 594.427490234375, 595.29833984375, 596.341064453125, 597.212646484375, 598.0889892578125, 599.399658203125, 600.26123046875, 601.076171875, 602.169921875, 603.362060546875, 604.254150390625, 605.0965576171875, 606.388427734375, 607.4422607421875, 608.5830078125, 609.69775390625, 610.7020263671875, 611.5001220703125, 612.1220703125, 613.044677734375, 613.8404541015625, 614.84814453125, 615.8154296875, 616.649658203125, 617.3739013671875, 618.20458984375, 619.2890625, 620.2357177734375, 621.212646484375, 622.00048828125, 622.8720703125, 623.511962890625, 624.38818359375, 625.419677734375, 626.416015625, 627.5302734375, 628.47265625, 629.5888671875, 630.49609375, 631.2301025390625, 631.945556640625, 632.5703125, 633.6016845703125, 634.5078125, 635.372314453125, 636.2647705078125, 637.4208984375, 638.0455322265625, 638.9873046875, 640.164794921875, 641.2568359375, 642.148193359375, 643.3486328125, 644.196533203125, 645.092041015625, 645.87744140625, 646.763427734375, 647.722900390625, 648.896240234375, 649.9566650390625, 651.0927734375, 652.0440673828125, 653.2078857421875, 654.2161865234375, 655.0166015625, 655.835693359375, 656.9476318359375, 658.0146484375, 659.3863525390625, 660.5687255859375, 661.540283203125, 662.5528564453125, 663.302734375, 664.231689453125, 665.039794921875, 665.76318359375, 666.485107421875, 667.159423828125, 668.114501953125, 669.1845703125, 670.24853515625, 671.191650390625, 672.0020751953125, 672.87109375, 674.0721435546875, 675.0921630859375, 676.335205078125, 677.490966796875, 678.546630859375, 679.611083984375, 680.4100341796875, 681.339111328125, 682.6435546875, 683.556884765625, 684.397216796875, 685.374267578125, 686.227783203125, 687.2574462890625, 688.130615234375, 689.1865234375, 690.2244873046875, 691.4127197265625, 692.466552734375, 693.337158203125, 694.10302734375, 695.171875, 696.17041015625, 696.811279296875, 697.655517578125, 698.604248046875, 699.7451171875, 700.957763671875, 701.9703369140625, 703.026123046875, 704.0335693359375, 704.848876953125, 705.968017578125, 706.94970703125, 707.863037109375, 708.7841796875, 709.7867431640625, 710.8990478515625, 711.891845703125, 713.140869140625, 713.886474609375, 714.630859375, 715.511962890625, 716.5302734375, 717.387939453125, 718.404541015625, 719.1859130859375, 719.99853515625, 720.786865234375, 721.42138671875, 722.247802734375, 723.229736328125, 724.130126953125, 725.0079345703125, 725.6214599609375, 726.467041015625, 727.396240234375, 728.22216796875, 729.223876953125, 730.02197265625, 730.7550048828125, 731.358154296875, 732.147216796875, 733.08056640625, 733.789306640625, 734.8394775390625, 736.1195068359375, 737.3280029296875, 738.341796875, 739.2176513671875, 740.0177001953125, 740.974853515625, 741.93212890625, 742.6605224609375, 743.4564208984375, 744.5606689453125, 745.465576171875, 746.3536376953125, 747.201416015625, 748.1258544921875, 748.8831787109375, 749.83056640625, 750.6607666015625, 751.9267578125, 753.1162109375, 754.1434326171875, 755.36669921875, 756.35107421875, 757.1273193359375, 758.007080078125, 758.7608642578125, 759.865478515625, 760.9664306640625, 761.7222900390625, 762.766357421875, 763.765869140625, 764.5450439453125, 765.3704833984375, 766.18017578125, 767.0062255859375, 767.79833984375, 768.83837890625, 769.461181640625, 770.11962890625, 771.2366943359375, 772.277099609375, 773.2481689453125, 774.138671875, 775.2012939453125, 776.0504150390625, 776.871337890625, 777.86083984375, 779.0703125, 780.060791015625, 781.0340576171875, 782.0849609375, 782.773681640625, 783.5970458984375, 784.5537109375, 785.3486328125, 786.3221435546875, 787.1483154296875, 788.158203125, 788.9156494140625, 789.9228515625, 791.00927734375, 791.859619140625, 792.6927490234375, 793.48681640625, 794.3616943359375, 795.26318359375, 796.22314453125, 797.01318359375, 797.885009765625, 799.123779296875, 800.2498779296875, 801.010498046875, 801.75146484375, 802.5615234375, 803.5667724609375, 804.52294921875, 805.369140625, 806.0634765625, 806.6678466796875, 807.335693359375, 808.247314453125, 809.06005859375, 810.025634765625, 810.9266357421875, 811.94140625, 812.888671875, 813.6966552734375, 814.395751953125, 815.400146484375, 816.6763916015625, 817.5902099609375, 818.432373046875, 819.2447509765625, 820.334228515625, 821.349609375, 822.0946044921875, 822.8134765625, 823.5904541015625, 824.466552734375, 825.4178466796875, 826.455322265625, 827.565673828125, 828.312255859375, 829.205078125, 830.0302734375, 830.920654296875, 831.8514404296875, 832.850830078125, 833.6767578125, 834.501220703125, 835.38671875, 836.358642578125, 837.1220703125, 837.958740234375, 838.961669921875, 839.9578857421875, 841.068115234375, 842.001953125, 843.1912841796875, 844.4072265625, 845.22265625, 846.176513671875, 847.0936279296875, 848.0589599609375, 848.9915771484375, 849.801513671875, 850.8953857421875, 851.943359375, 852.8096923828125, 853.85595703125, 855.0648193359375, 856.042236328125, 856.8214111328125, 857.915771484375, 858.9195556640625, 860.012451171875, 861.17333984375, 862.082763671875, 863.0733642578125, 863.9952392578125, 864.8193359375, 865.499755859375, 866.1728515625, 867.16259765625, 867.9429931640625, 868.8642578125, 869.75146484375, 870.7010498046875, 871.594482421875, 872.203369140625, 873.178466796875, 874.146728515625, 874.9632568359375, 876.011474609375, 877.1478271484375, 878.137451171875, 879.0302734375, 879.885986328125, 880.9954833984375, 881.829833984375, 882.77783203125, 883.58349609375, 884.70068359375, 885.7152099609375, 886.5029296875, 887.2774658203125, 888.166259765625, 889.111328125, 889.98486328125, 891.231201171875, 892.1761474609375, 893.028564453125, 893.94873046875, 894.856201171875, 895.86328125, 896.7916259765625, 897.7933349609375, 898.693115234375, 899.7535400390625, 900.71630859375, 901.667724609375, 903.014404296875, 904.119873046875, 904.83935546875, 905.889404296875, 906.8662109375, 907.9351806640625, 909.0986328125, 909.96923828125, 910.7926025390625, 912.05322265625, 912.8499755859375, 913.7193603515625, 914.7706298828125, 915.96484375, 917.104736328125, 918.2379150390625, 919.1361083984375, 919.8939208984375, 921.032470703125, 921.9166259765625, 922.7454833984375, 923.697265625, 924.7960205078125, 925.979248046875, 926.9443359375, 927.721435546875, 928.7205810546875, 929.767822265625, 930.7706298828125, 931.7349853515625, 932.7294921875, 933.8270263671875, 934.766357421875, 935.697265625, 936.5841064453125, 937.658447265625, 938.6866455078125, 940.0623779296875, 941.23486328125, 942.1427001953125, 943.04833984375, 943.7071533203125, 944.809326171875, 945.9200439453125, 947.064453125, 948.1424560546875, 949.1114501953125, 950.0234375, 950.919189453125, 951.90576171875, 952.79345703125, 953.675048828125, 954.4881591796875, 955.31640625, 956.2119140625, 956.946533203125, 957.9564208984375, 958.8848876953125, 960.013671875, 960.8348388671875, 961.733154296875, 963.04541015625, 964.576416015625, 965.685791015625, 966.8388671875, 967.9644775390625, 969.043212890625, 969.78857421875, 970.57080078125, 971.774169921875, 972.5782470703125, 973.530517578125, 974.415283203125, 975.2567138671875, 975.9061279296875, 976.678466796875, 977.737060546875, 978.7734375, 979.6895751953125, 980.69287109375, 981.6878662109375, 982.834228515625, 983.8946533203125, 984.76953125, 985.744140625, 986.6802978515625, 987.607421875, 988.8516845703125, 989.6602783203125, 990.83740234375, 992.0177001953125, 992.8641357421875, 993.79345703125, 994.74462890625, 996.4727783203125, 997.5208740234375, 998.2164306640625, 998.922119140625, 999.7427978515625, 1000.5955810546875, 1001.52685546875, 1002.6962890625, 1003.7646484375, 1004.7752685546875, 1006.0716552734375, 1006.9635009765625, 1007.8824462890625, 1008.68310546875, 1009.7298583984375, 1010.65673828125, 1011.7733154296875, 1012.6976318359375, 1013.6849365234375, 1014.634521484375, 1015.474853515625, 1016.2716064453125, 1017.0416259765625, 1018.36962890625, 1019.0325927734375, 1019.911865234375, 1020.7095947265625, 1021.3858642578125, 1021.9937744140625, 1022.7115478515625, 1023.47314453125, 1024.47021484375, 1025.56298828125, 1026.45849609375, 1027.4775390625, 1028.62255859375, 1029.66650390625, 1030.740234375, 1031.78076171875, 1032.7509765625, 1033.580810546875, 1034.82080078125, 1035.89501953125, 1036.65380859375, 1037.5478515625, 1038.529296875, 1039.6845703125, 1040.740478515625, 1041.713623046875, 1042.80419921875, 1043.5556640625, 1044.6923828125, 1045.724609375, 1046.6884765625, 1047.94970703125, 1049.199951171875, 1050.1494140625, 1051.01123046875, 1051.83642578125, 1053.063232421875, 1053.821044921875, 1054.839599609375, 1055.8935546875, 1056.59033203125, 1057.628662109375, 1058.71142578125, 1059.498046875, 1060.646728515625, 1061.85888671875, 1062.8408203125, 1063.971923828125, 1065.1044921875, 1066.3037109375, 1067.3388671875, 1068.47216796875, 1069.58935546875, 1070.874755859375, 1071.87255859375, 1072.61669921875, 1073.59423828125, 1074.499755859375, 1075.6572265625, 1076.328369140625, 1077.55322265625, 1078.5400390625, 1079.72216796875, 1080.673095703125, 1081.66552734375, 1082.6494140625, 1083.61962890625, 1084.7607421875, 1085.62548828125, 1086.58935546875, 1087.58935546875, 1088.59619140625, 1089.525634765625, 1090.396240234375, 1091.36181640625, 1092.49755859375, 1093.876708984375, 1094.72021484375, 1096.005859375, 1096.900634765625, 1097.75146484375, 1098.71533203125, 1099.52587890625, 1100.7333984375, 1101.50341796875, 1102.308349609375, 1103.593994140625, 1104.68115234375, 1105.702392578125, 1107.000732421875, 1107.818359375, 1108.44287109375, 1109.4775390625, 1110.138671875, 1111.1884765625, 1112.01904296875, 1112.9482421875, 1113.81103515625, 1114.8447265625, 1115.92236328125, 1116.7392578125, 1117.732421875, 1119.251708984375, 1119.99755859375, 1120.70849609375, 1121.7509765625, 1122.537353515625, 1123.3759765625, 1123.98681640625, 1124.924560546875, 1125.86083984375, 1126.73876953125, 1127.935546875, 1128.745849609375, 1129.50634765625, 1130.5107421875, 1131.557861328125, 1132.85107421875, 1134.09375, 1135.086181640625, 1136.333251953125, 1137.503662109375, 1138.17236328125, 1138.973876953125, 1139.9248046875, 1140.574951171875, 1141.69287109375, 1142.561767578125, 1143.27685546875, 1144.14404296875, 1145.25537109375, 1145.96337890625, 1146.803955078125, 1147.511962890625, 1148.37158203125, 1149.5185546875, 1150.5634765625, 1151.501953125, 1152.17138671875, 1152.93994140625, 1153.87109375, 1154.857421875, 1155.7646484375, 1156.84619140625, 1157.49462890625, 1158.392578125, 1159.5654296875, 1160.536865234375, 1161.6904296875, 1162.526123046875, 1163.4267578125, 1164.4580078125, 1165.7216796875, 1166.79833984375, 1167.888427734375, 1168.54345703125, 1169.4482421875, 1170.4443359375, 1171.52099609375, 1172.925537109375, 1173.585205078125, 1174.659423828125, 1176.258544921875, 1177.59423828125, 1178.89794921875, 1179.583740234375, 1180.365234375, 1181.583984375, 1182.658203125, 1183.61279296875, 1184.55322265625, 1185.21923828125, 1185.9619140625, 1186.689697265625, 1187.899658203125, 1188.697265625, 1189.4404296875, 1190.21142578125, 1191.803466796875, 1192.5, 1193.730224609375, 1194.675537109375, 1195.63720703125, 1196.69970703125, 1197.807373046875, 1198.7177734375, 1199.99267578125, 1201.32275390625, 1202.562744140625, 1203.42626953125, 1204.72802734375, 1205.5234375, 1206.78466796875, 1207.78125, 1208.93798828125, 1210.1318359375, 1211.028076171875, 1212.47265625, 1213.38818359375, 1214.44287109375, 1215.6640625, 1216.549072265625, 1217.72119140625, 1218.56103515625, 1219.66259765625, 1220.84130859375, 1221.638671875, 1222.54736328125, 1223.291259765625, 1224.15966796875, 1225.0556640625, 1226.285400390625, 1227.32958984375, 1228.735107421875, 1229.45458984375, 1230.4892578125, 1231.423828125, 1232.59423828125, 1233.65185546875, 1234.494140625, 1235.459228515625, 1236.769287109375, 1237.62158203125, 1238.386962890625, 1239.53857421875, 1240.73388671875, 1241.74853515625, 1242.87939453125, 1243.6806640625, 1244.5419921875, 1245.47705078125, 1246.611083984375, 1247.74072265625, 1248.61669921875, 1249.65625, 1251.15625, 1252.2275390625, 1253.28173828125, 1254.02734375, 1254.83154296875, 1256.08203125, 1256.70263671875, 1257.339111328125, 1258.02197265625, 1259.06884765625, 1260.0478515625, 1260.677490234375, 1261.44482421875, 1262.48828125, 1263.2939453125, 1264.525390625, 1265.42578125, 1266.28076171875, 1267.702392578125, 1268.50341796875, 1269.289794921875, 1270.760498046875, 1271.70849609375, 1272.588134765625, 1273.46435546875, 1274.454833984375, 1275.37744140625, 1276.61181640625, 1277.50390625, 1278.83349609375, 1280.004638671875, 1280.65771484375, 1281.583740234375, 1282.4130859375, 1283.975341796875, 1286.34912109375, 1287.2783203125, 1288.082763671875, 1289.128662109375, 1290.34912109375, 1291.50390625, 1292.42236328125, 1293.6240234375, 1294.3994140625, 1295.2666015625, 1295.93310546875, 1296.673583984375, 1297.292724609375, 1298.5595703125, 1300.0537109375, 1300.9287109375, 1301.671142578125, 1303.00048828125, 1304.3251953125, 1305.2900390625, 1306.359130859375, 1307.34033203125, 1308.115234375, 1309.553955078125, 1311.09423828125, 1312.6630859375, 1313.563720703125, 1314.6728515625, 1315.946044921875, 1317.196044921875, 1318.2314453125, 1319.547119140625, 1320.51806640625, 1321.36669921875, 1322.475830078125, 1324.06591796875, 1325.066162109375, 1326.1767578125, 1327.192138671875, 1327.84423828125, 1329.00732421875, 1330.0234375, 1330.87841796875, 1332.33642578125, 1333.59912109375, 1334.4501953125, 1335.6083984375, 1336.414306640625, 1337.505126953125, 1338.644287109375, 1339.3544921875, 1340.593017578125, 1341.7080078125, 1342.484375, 1343.54541015625, 1344.77490234375, 1345.6474609375, 1346.45068359375, 1347.565185546875, 1348.23876953125, 1349.42822265625, 1350.6728515625, 1351.409423828125, 1352.23779296875, 1353.0283203125, 1353.880126953125, 1354.533203125, 1355.537109375, 1356.57568359375, 1357.65673828125, 1358.765625, 1360.82275390625, 1361.900146484375, 1363.05224609375, 1364.3701171875, 1365.10302734375, 1365.755126953125, 1366.70556640625, 1367.60107421875, 1368.658203125, 1369.33935546875, 1370.2607421875, 1371.950927734375, 1373.420654296875, 1374.450439453125, 1375.58544921875, 1376.37353515625, 1377.73291015625, 1378.774658203125, 1379.80029296875, 1380.8291015625, 1381.52490234375, 1382.53271484375, 1383.57470703125, 1384.41259765625, 1385.621826171875, 1386.67822265625, 1387.771728515625, 1388.51513671875, 1389.171142578125, 1389.843505859375, 1390.7734375, 1392.29345703125, 1393.70751953125, 1394.69287109375, 1395.5009765625, 1396.59228515625, 1397.198486328125, 1398.34033203125, 1399.917236328125, 1400.81494140625, 1401.78857421875, 1402.5810546875, 1403.457275390625, 1404.945068359375, 1405.990234375, 1406.9208984375, 1407.742919921875, 1408.49267578125, 1409.36328125, 1410.3154296875, 1411.47900390625, 1412.48193359375, 1413.56103515625, 1414.64013671875, 1415.38916015625, 1416.151123046875, 1416.9501953125, 1418.3662109375, 1419.610107421875, 1420.81787109375, 1422.225341796875, 1423.06787109375, 1424.39892578125, 1425.3291015625, 1426.81103515625, 1427.83984375, 1429.290283203125, 1430.195556640625, 1431.437255859375, 1432.69287109375, 1434.609619140625, 1436.118896484375, 1437.706787109375, 1438.375732421875, 1439.245361328125, 1440.454833984375, 1442.134765625, 1442.849365234375, 1443.953857421875, 1445.473388671875, 1446.18505859375, 1447.553955078125, 1448.31103515625, 1449.299072265625, 1450.066650390625, 1450.80224609375, 1451.525634765625, 1452.308837890625, 1453.209716796875, 1454.205078125, 1455.103515625, 1456.060791015625, 1457.433837890625, 1459.093994140625, 1460.364990234375, 1461.049072265625, 1463.0107421875, 1464.96484375, 1465.69140625, 1466.324951171875, 1467.36328125, 1470.156982421875, 1471.43701171875, 1472.296630859375, 1473.17431640625, 1474.522216796875, 1475.568359375, 1476.2578125, 1478.016357421875, 1479.24072265625, 1479.89453125, 1481.129150390625, 1482.328125, 1483.418212890625, 1484.348388671875, 1485.339599609375, 1487.158447265625, 1489.0185546875, 1489.97509765625, 1491.116455078125, 1493.62109375, 1494.3095703125, 1495.67138671875, 1496.8056640625, 1497.778564453125, 1499.4267578125, 1500.58740234375, 1501.5986328125, 1502.515380859375, 1503.150634765625, 1505.52978515625, 1506.650390625, 1509.39501953125, 1510.064697265625, 1511.25390625, 1512.375244140625, 1514.4970703125, 1515.572265625, 1516.365966796875, 1517.261474609375, 1518.243408203125, 1519.978271484375, 1521.0517578125, 1521.935791015625, 1523.373046875, 1525.430908203125, 1526.421630859375, 1527.80859375, 1528.66845703125, 1529.704833984375, 1530.9765625, 1532.154296875, 1533.34228515625, 1534.33837890625, 1535.78955078125, 1536.61962890625, 1537.38330078125, 1538.264404296875, 1539.772216796875, 1541.060546875, 1543.270263671875, 1544.21630859375, 1545.323974609375, 1546.343994140625, 1548.144287109375, 1550.567138671875, 1552.367431640625, 1553.1787109375, 1554.52197265625, 1555.35400390625, 1556.703125, 1558.220703125, 1558.984375, 1560.05126953125, 1561.304443359375, 1562.48583984375, 1563.30126953125, 1564.437744140625, 1565.80419921875, 1566.59033203125, 1569.40380859375, 1571.77490234375, 1574.384521484375, 1575.582763671875, 1576.427734375, 1577.588134765625, 1578.650390625, 1580.301513671875, 1581.45458984375, 1582.23974609375, 1583.840087890625, 1585.37548828125, 1586.391357421875, 1588.023193359375, 1589.372802734375, 1591.751953125, 1592.68408203125, 1593.472412109375, 1594.313232421875, 1595.52685546875, 1597.152587890625, 1597.790283203125, 1600.117431640625, 1601.466796875, 1602.48681640625, 1603.661865234375, 1604.74169921875, 1605.48486328125, 1606.282958984375, 1607.375, 1608.64697265625, 1609.382568359375, 1610.311279296875, 1611.2880859375, 1613.010009765625, 1614.29541015625, 1615.360107421875, 1616.46337890625, 1617.11572265625, 1618.2783203125, 1620.237060546875, 1620.877685546875, 1621.755126953125, 1623.65576171875, 1624.597900390625, 1627.211181640625, 1629.283935546875, 1630.5380859375, 1631.3447265625, 1633.7392578125, 1635.309814453125, 1636.988037109375, 1638.052001953125, 1638.941162109375, 1641.2333984375, 1643.456787109375, 1645.15478515625, 1646.756103515625, 1647.59521484375, 1648.4482421875, 1649.614013671875, 1650.31689453125, 1651.225341796875, 1653.120361328125, 1654.56396484375, 1656.172607421875, 1659.06787109375, 1660.4921875, 1662.562744140625, 1666.490234375, 1667.990966796875, 1668.6669921875, 1669.895263671875, 1673.319580078125, 1674.264892578125, 1676.18798828125, 1677.0263671875, 1681.38916015625, 1684.42578125, 1685.05517578125, 1685.8115234375, 1687.7568359375, 1689.33251953125, 1691.2744140625, 1692.242919921875, 1699.79736328125, 1703.167236328125, 1704.076416015625, 1704.9755859375, 1706.415771484375, 1708.31298828125, 1711.287353515625, 1714.760498046875, 1716.608642578125, 1717.6083984375, 1719.567626953125, 1720.648193359375, 1723.835205078125, 1726.537353515625, 1727.319091796875, 1728.208984375, 1729.417724609375, 1730.475830078125, 1732.616455078125, 1734.271728515625, 1736.541259765625, 1737.337158203125, 1738.282958984375, 1738.98193359375, 1740.037353515625, 1741.724853515625, 1743.254638671875, 1745.993408203125, 1750.390625, 1751.103271484375, 1754.5107421875, 1756.341064453125, 1758.35205078125, 1760.322021484375, 1761.417724609375, 1763.494873046875, 1766.391357421875, 1767.47119140625, 1769.859130859375, 1771.068359375, 1772.699951171875, 1773.4228515625, 1774.158935546875, 1775.810302734375, 1777.126220703125, 1778.25439453125, 1779.4228515625, 1783.1669921875, 1783.91943359375, 1789.88671875, 1791.3388671875, 1793.0791015625, 1795.557373046875]), 'retentionTime': 5.8905}, {'polarity': '+', 'collisionEnergy': 35.0, 'id': '20', 'basePeakIntensity': 301045.0, 'highMz': 905.0, 'msLevel': 2, 'totIonCurrent': 764637.0, 'peaksCount': 43, 'precursorMz': [{'precursorMz': 445.35, 'precursorIntensity': 120053.0}], 'num': '20', 'basePeakMz': 428.905, 'lowMz': 110.0, 'intensity array': makeCA( [3071.0, 1259.0, 564.0, 2371.0, 1646.0, 1546.0, 1093.0, 1498.0, 1110.0, 2013.0, 1535.0, 1973.0, 28317.0, 4071.0, 792.0, 2456.0, 3167.0, 1673.0, 216505.0, 30083.0, 2.0, 1192.0, 1273.0, 2070.0, 3120.0, 11655.0, 2124.0, 821.0, 825.0, 4734.0, 3214.0, 1235.0, 6617.0, 4802.0, 3320.0, 301045.0, 101500.0, 666.0, 1892.0, 1301.0, 1923.0, 683.0, 1880.0]), 'm/z array': makeCA( [223.08883666992188, 244.08282470703125, 270.891845703125, 277.880859375, 281.1331787109375, 293.664794921875, 311.64837646484375, 312.763916015625, 329.0174560546875, 333.06805419921875, 336.62493896484375, 338.9378662109375, 340.9237060546875, 341.9869384765625, 348.98486328125, 351.067138671875, 354.82891845703125, 357.0274658203125, 358.66326904296875, 359.61871337890625, 360.2332763671875, 370.48370361328125, 382.07147216796875, 383.66082763671875, 385.33001708984375, 386.373291015625, 388.41363525390625, 398.84710693359375, 400.7999267578125, 401.9385986328125, 410.0867919921875, 420.408447265625, 426.13665771484375, 426.94586181640625, 428.072509765625, 428.90478515625, 429.922607421875, 430.8460693359375, 438.67962646484375, 443.957275390625, 444.7640380859375, 446.65692138671875, 531.078369140625]), 'retentionTime': 5.9446666666666665} ] ms1_spectra = [ {'intensity array': makeCA([0., 20.0522, 29.26406, 30.04175, 20.19221, 11.58895, 0.]), 'm/z array': makeCA([2.51263, 82.51282, 82.51301, 82.51321, 82.5134, 82.51359, 82.51378]), 'params': {'BPI': '585566', 'BPM': '544.2904', 'RTime': 0.987225, 'TIC': '3728760', 'scan': ('1', '1')}}, {'intensity array': makeCA([0., 31.2197, 37.46051, 44.36585, 49.12939, 44.33195, 35.1637, 33.48032, 0.]), 'm/z array': makeCA([82.6435, 82.6437, 82.64389, 82.64408, 82.64427, 82.64447, 82.64466, 82.64485, 82.64504]), 'params': {'BPI': '713524', 'BPM': '544.2904', 'RTime': 1.32083, 'TIC': '2694200', 'scan': ('2', '2')}}] ms1_spectra_lists = [{'intensity array': [0., 20.0522, 29.26406, 30.04175, 20.19221, 11.58895, 0.], 'm/z array': [2.51263, 82.51282, 82.51301, 82.51321, 82.5134, 82.51359, 82.51378], 'params': {'BPI': '585566', 'BPM': '544.2904', 'RTime': 0.987225, 'TIC': '3728760', 'scan': ('1', '1')}}, {'intensity array': [0., 31.2197, 37.46051, 44.36585, 49.12939, 44.33195, 35.1637, 33.48032, 0.], 'm/z array': [82.6435, 82.6437, 82.64389, 82.64408, 82.64427, 82.64447, 82.64466, 82.64485, 82.64504], 'params': {'BPI': '713524', 'BPM': '544.2904', 'RTime': 1.32083, 'TIC': '2694200', 'scan': ('2', '2')}}] ms1_header = {'CreationDate': 'Sat Jun 03 15:25:10 2017', 'Extractor version': 'Xcalibur', 'Extractor': 'ProteoWizard', 'Source file': 'Set 1. B2 at 193 nm RT.RAW'} ms2_spectra = [{'intensity array': makeCA([73., 44., 67., 291., 54., 49.]), 'm/z array': makeCA([846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5]), 'params': {'charge': [2.0], 'neutral mass': [1966.193], 'precursor m/z': 983.6, 'scan': ('0', '0')}}, {'intensity array': makeCA([237., 128., 108., 1007., 974., 79.]), 'm/z array': makeCA([345.1, 370.2, 460.2, 1673.3, 1674., 1675.3]), 'params': {'RTime': 25.0, 'precursor m/z': 1084.9, 'scan': ('1', '1')}}] ms2_spectra_lists = [{'intensity array': [73., 44., 67., 291., 54., 49.], 'm/z array': [846.6, 846.8, 847.6, 1640.1, 1640.6, 1895.5], 'params': {'charge': [2.0], 'neutral mass': [1966.193], 'precursor m/z': 983.6, 'scan': ('0', '0')}}, {'intensity array': [237., 128., 108., 1007., 974., 79.], 'm/z array': [345.1, 370.2, 460.2, 1673.3, 1674., 1675.3], 'params': {'RTime': 25.0, 'precursor m/z': 1084.9, 'scan': ('1', '1')}}] ms2_header = {'CreationDate': 'Wed Apr 24 17:06:23 2019', 'Extractor': 'ProteoWizard', 'Extractor version': 'pwiz_2.1.2575 (TPP v4.5 RAPTURE rev 2, Build 201208012328 (linux))', 'Source file': 'test.mgf'} protxml_results = [{'group_number': 1, 'probability': 1.0, 'protein': [{'confidence': 1.0, 'group_sibling_id': 'a', 'n_indistinguishable_proteins': 1, 'pct_spectrum_ids': 0.018, 'peptide': [{'calc_neutral_pep_mass': 2094.0307, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9995, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.38, 'n_sibling_peptides_bin': 3, 'nsp_adjusted_probability': 0.9995, 'peptide_group_designator': 'a', 'peptide_sequence': 'NIPIMSTASVEIDDAIYSR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1538.794, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9995, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'mod_aminoacid_mass': [{'mass': '111.032030', 'position': '1'}], 'modified_peptide': 'Q[111]DVIITAIDNVEAR', 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.38, 'n_sibling_peptides_bin': 3, 'nsp_adjusted_probability': 0.9995, 'peptide_sequence': 'QDVIITAIDNVEAR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2094.0303, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9995, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.38, 'n_sibling_peptides_bin': 3, 'nsp_adjusted_probability': 0.9995, 'peptide_group_designator': 'a', 'peptide_sequence': 'NIPIMSTASVEIDDAIYSR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2212.2752, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9981, 'initial_probability': 0.996, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.38, 'n_sibling_peptides_bin': 3, 'nsp_adjusted_probability': 0.9981, 'peptide_sequence': 'IIPAIATTTATVSGIVAIEMIK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1126.5658, 'charge': 2, 'exp_tot_instances': 0.66, 'fpkm_adjusted_probability': 0.8017, 'initial_probability': 0.6598, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.71, 'n_sibling_peptides_bin': 3, 'nsp_adjusted_probability': 0.8017, 'peptide_sequence': 'TVFFESIER', 'weight': 1.0}, {'calc_neutral_pep_mass': 961.5233, 'charge': 2, 'exp_tot_instances': 0.47, 'fpkm_adjusted_probability': 0.695, 'initial_probability': 0.4723, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 4.9, 'n_sibling_peptides_bin': 4, 'nsp_adjusted_probability': 0.695, 'peptide_sequence': 'NAIFQIEK', 'weight': 1.0}, {'calc_neutral_pep_mass': 945.5131, 'charge': 2, 'exp_tot_instances': 0.25, 'fpkm_adjusted_probability': 0.249, 'initial_probability': 0.249, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 5.13, 'n_sibling_peptides_bin': 4, 'nsp_adjusted_probability': 0.249, 'peptide_sequence': 'AIISNEATK', 'weight': 1.0}], 'percent_coverage': 7.7, 'probability': 1.0, 'prot_length': 1052, 'protein_description': 'Ubiquitin-like modifier-activating enzyme 6 OS=Homo sapiens GN=UBA6 PE=1 SV=1', 'protein_name': 'sp|A0AVT1|UBA6_HUMAN', 'raw_intensity': '0.000', 'total_number_distinct_peptides': 7, 'total_number_peptides': 7, 'unique_stripped_peptides': ['AIISNEATK', 'IIPAIATTTATVSGIVAIEMIK', 'NAIFQIEK', 'NIPIMSTASVEIDDAIYSR', 'QDVIITAIDNVEAR', 'TVFFESIER']}]}, {'group_number': 2, 'probability': 0.999, 'protein': [{'confidence': 1.0, 'group_sibling_id': 'a', 'n_indistinguishable_proteins': 1, 'pct_spectrum_ids': 0.093, 'peptide': [{'calc_neutral_pep_mass': 1519.9086, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'a', 'peptide_sequence': 'AVPIAIAIISVSNPR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1166.5316, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'FGGSGSQVDSAR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1958.9486, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'b', 'peptide_sequence': 'IVGSQEEIASWGHEYVR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2116.0047, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'c', 'peptide_sequence': 'MNIASSFVNGFVNAAFGQDK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1451.8096, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.9989, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'd', 'peptide_sequence': 'VGQAVDVVGQAGKPK', 'weight': 1.0}, {'calc_neutral_pep_mass': 2456.3566, 'charge': 3, 'exp_tot_instances': 2.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 2, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'e', 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2217.1027, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'APVQPQQSPAAAPGGTDEKPSGK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1519.9086, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'a', 'peptide_sequence': 'AVPIAIAIISVSNPR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2460.2245, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.9989, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'DKAPVQPQQSPAAAPGGTDEKPSGK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1486.6874, 'charge': 3, 'exp_tot_instances': 2.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '6'}], 'modified_peptide': 'GTITICPYHSDR', 'n_enzymatic_termini': 2, 'n_instances': 2, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'GTITICPYHSDR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1958.9486, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'b', 'peptide_sequence': 'IVGSQEEIASWGHEYVR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2116.0047, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'c', 'peptide_sequence': 'MNIASSFVNGFVNAAFGQDK', 'weight': 1.0}, {'calc_neutral_pep_mass': 2078.0909, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'TITGFQTHTTPVIIAHGER', 'weight': 1.0}, {'calc_neutral_pep_mass': 1451.8096, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_group_designator': 'd', 'peptide_sequence': 'VGQAVDVVGQAGKPK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1712.8477, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9997, 'initial_probability': 0.999, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9997, 'peptide_sequence': 'VPDDIYKTHIENNR', 'weight': 1.0}, {'calc_neutral_pep_mass': 834.4235, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9996, 'initial_probability': 0.9988, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9996, 'peptide_sequence': 'YGEPTIR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2000.0765, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9996, 'initial_probability': 0.9986, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.8, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9996, 'peptide_sequence': 'MIVTFDEEIRPIPVSVR', 'weight': 1.0}, {'calc_neutral_pep_mass': 2584.4516, 'charge': 3, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9994, 'initial_probability': 0.9979, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.81, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9994, 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIRK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1540.8031, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9992, 'initial_probability': 0.9973, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '7'}], 'modified_peptide': 'SGAIIACGIVNSGVR', 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.81, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9992, 'peptide_sequence': 'SGAIIACGIVNSGVR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1279.5972, 'charge': 2, 'exp_tot_instances': 1.0, 'fpkm_adjusted_probability': 0.9988, 'initial_probability': 0.9959, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.81, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9988, 'peptide_sequence': 'YIYSSEDYIK', 'weight': 1.0}, {'calc_neutral_pep_mass': 2520.3227, 'charge': 3, 'exp_tot_instances': 0.99, 'fpkm_adjusted_probability': 0.9975, 'initial_probability': 0.9917, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'mod_aminoacid_mass': [{'mass': '111.032030', 'position': '1'}], 'modified_peptide': 'E[111]WQEIDDAEKVQREPIITIVK', 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.81, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9975, 'peptide_sequence': 'EWQEIDDAEKVQREPIITIVK', 'weight': 1.0}, {'calc_neutral_pep_mass': 2456.3566, 'charge': 2, 'exp_tot_instances': 0.99, 'fpkm_adjusted_probability': 0.9969, 'initial_probability': 0.9896, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.81, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9969, 'peptide_group_designator': 'e', 'peptide_sequence': 'AEIATEEFIPVTPIIEGFVIIR', 'weight': 1.0}, {'calc_neutral_pep_mass': 1294.7972, 'charge': 3, 'exp_tot_instances': 0.98, 'fpkm_adjusted_probability': 0.995, 'initial_probability': 0.9832, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.82, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.995, 'peptide_sequence': 'VQREPIITIVK', 'weight': 1.0}, {'calc_neutral_pep_mass': 1015.5913, 'charge': 2, 'exp_tot_instances': 0.86, 'fpkm_adjusted_probability': 0.9544, 'initial_probability': 0.8603, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.94, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9544, 'peptide_sequence': 'INIIDTISK', 'weight': 1.0}, {'calc_neutral_pep_mass': 911.5691, 'charge': 2, 'exp_tot_instances': 0.86, 'fpkm_adjusted_probability': 0.9526, 'initial_probability': 0.8555, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 24.95, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9526, 'peptide_sequence': 'EPIITIVK', 'weight': 1.0}, {'calc_neutral_pep_mass': 973.479, 'charge': 2, 'exp_tot_instances': 0.8, 'fpkm_adjusted_probability': 0.9297, 'initial_probability': 0.7956, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': True, 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 25.01, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.9297, 'peptide_sequence': 'EIDIMEPK', 'weight': 1.0}, {'calc_neutral_pep_mass': 889.448, 'charge': 2, 'exp_tot_instances': 0.65, 'fpkm_adjusted_probability': 0.8644, 'initial_probability': 0.6523, 'is_contributing_evidence': True, 'is_nondegenerate_evidence': False, 'mod_aminoacid_mass': [{'mass': '228.056870', 'position': '1'}], 'modified_peptide': 'CAIGVFR', 'n_enzymatic_termini': 2, 'n_instances': 1, 'n_sibling_peptides': 25.45, 'n_sibling_peptides_bin': 8, 'nsp_adjusted_probability': 0.8644, 'peptide_parent_protein': [ {'protein_name': 'DECOY_sp|A0A5B9|TRBC2_HUMAN'}], 'peptide_sequence': 'CAIGVFR', 'weight': 0.54}], 'percent_coverage': 29.3, 'probability': 1.0, 'prot_length': 908, 'protein_description': '26S proteasome non-ATPase regulatory subunit 2 OS=Homo sapiens GN=PSMD2 PE=1 SV=3', 'protein_name': 'DECOY_sp|Q13200|PSMD2_HUMAN', 'raw_intensity': '0.000', 'total_number_distinct_peptides': 29, 'total_number_peptides': 29, 'unique_stripped_peptides': ['AEIATEEFIPVTPIIEGFVIIR', 'AEIATEEFIPVTPIIEGFVIIRK', 'APVQPQQSPAAAPGGTDEKPSGK', 'AVPIAIAIISVSNPR', 'CAIGVFR', 'DKAPVQPQQSPAAAPGGTDEKPSGK', 'EIDIMEPK', 'EPIITIVK', 'EWQEIDDAEKVQREPIITIVK', 'FGGSGSQVDSAR', 'GTITICPYHSDR', 'INIIDTISK', 'IVGSQEEIASWGHEYVR', 'MIVTFDEEIRPIPVSVR', 'MNIASSFVNGFVNAAFGQDK', 'SGAIIACGIVNSGVR', 'TITGFQTHTTPVIIAHGER', 'VGQAVDVVGQAGKPK', 'VPDDIYKTHIENNR', 'VQREPIITIVK', 'YGEPTIR', 'YIYSSEDYIK']}]}] transitions = [ [{'Precursor': {'charge state': 2.0, 'isolation window target m/z': 862.9467}, 'Prediction': {'contactRef': 'CS', 'linear ion trap': '', 'peak intensity': 10000.0, 'peak intensity rank': 1.0, 'peak targeting suitability rank': 1.0, 'softwareRef': 'MaRiMba', 'transition purported from an MS/MS spectrum on a different, specified instrument': ''}, 'Product': {'ConfigurationList': {'Configuration': [{'ValidationStatus': [{'4000 QTRAP': '', 'peak intensity': 4072.0, 'peak intensity rank': 2.0, 'peak targeting suitability rank': 1.0, 'transition optimized on specified instrument': ''}], 'collision energy': 26.0, 'collision gas': 'argon', 'collision gas pressure': 12.0, 'cone voltage': 1200.0, 'contactRef': 'CS', 'declustering potential': 64.0, 'dwell time': 0.12, 'instrumentRef': 'QTRAP', 'interchannel delay': 0.01, 'tube lens voltage': 23.0}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 8.0}, {'name': 'frag: b ion - H2O', 'product interpretation rank': 2.0, 'product ion m/z delta': -0.43, 'product ion series ordinal': 9.0}]}, 'charge state': 1.0, 'isolation window target m/z': 1040.57}, 'RetentionTime': [{'local retention time': 40.02, 'retention time window lower offset': 3.0, 'retention time window upper offset': 3.0, 'softwareRef': 'Skyline0.5'}], 'id': 'ADTHFLLNIYDQLR-M1-T1', 'peptideRef': 'ADTHFLLNIYDQLR-M1'}, {'IntermediateProduct': [{'ConfigurationList': {'Configuration': [{'collision energy': 26.0, 'contactRef': 'CS', 'instrumentRef': 'QTRAP'}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 8.0}]}, 'charge state': 1.0, 'isolation window target m/z': 1040.57}], 'Precursor': {'charge state': 2.0, 'isolation window lower offset': 1.0, 'isolation window target m/z': 862.9467, 'isolation window upper offset': 1.0}, 'Product': {'ConfigurationList': {'Configuration': [{'collision energy': 20.4, 'contactRef': 'CS', 'dwell time': 0.12, 'instrumentRef': 'QTRAP'}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 4.0}]}, 'charge state': 1.0, 'isolation window target m/z': 543.2}, 'RetentionTime': [{'local retention time': 40.02, 'retention time window lower offset': 3.0, 'retention time window upper offset': 3.0, 'softwareRef': 'Skyline0.5'}], 'id': 'ADTHFLLNIYDQLR-M1-T2', 'peptideRef': 'ADTHFLLNIYDQLR-M1'}], [ {'Precursor': {'charge state': 2.0, 'isolation window target m/z': 862.9467}, 'Prediction': {'contact': {'contact URL': 'http://www.systemsbiology.org/', 'contact address': '1441 NE 34th St, Seattle WA 98103, USA', 'contact email': '[email protected]', 'contact name': 'Eric Deutsch', 'contact organization': 'Institute for Systems Biology', 'id': 'CS'}, 'linear ion trap': '', 'peak intensity': 10000.0, 'peak intensity rank': 1.0, 'peak targeting suitability rank': 1.0, 'software': {'name': 'MaRiMba', 'id': 'MaRiMba', 'version': '1.0'}, 'transition purported from an MS/MS spectrum on a different, specified instrument': ''}, 'Product': {'ConfigurationList': {'Configuration': [{'ValidationStatus': [{'4000 QTRAP': '', 'peak intensity': 4072.0, 'peak intensity rank': 2.0, 'peak targeting suitability rank': 1.0, 'transition optimized on specified instrument': ''}], 'collision energy': 26.0, 'collision gas': 'argon', 'collision gas pressure': 12.0, 'cone voltage': 1200.0, 'contact': { 'contact URL': 'http://www.systemsbiology.org/', 'contact address': '1441 NE 34th St, Seattle WA 98103, USA', 'contact email': '[email protected]', 'contact name': 'Eric Deutsch', 'contact organization': 'Institute for Systems Biology', 'id': 'CS'}, 'declustering potential': 64.0, 'dwell time': 0.12, 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'}, 'interchannel delay': 0.01, 'tube lens voltage': 23.0}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 8.0}, {'name': 'frag: b ion - H2O', 'product interpretation rank': 2.0, 'product ion m/z delta': -0.43, 'product ion series ordinal': 9.0}]}, 'charge state': 1.0, 'isolation window target m/z': 1040.57}, 'RetentionTime': [{'local retention time': 40.02, 'retention time window lower offset': 3.0, 'retention time window upper offset': 3.0, 'software': {'name': 'Skyline', 'id': 'Skyline0.5', 'version': '0.5'}}], 'id': 'ADTHFLLNIYDQLR-M1-T1', 'peptide': {'Evidence': {'confident peptide': 6.0}, 'Modification': [{'location': 0, 'monoisotopicMassDelta': 127.063324, 'name': 'SMA'}, {'location': 1, 'monoisotopicMassDelta': 15.994919, 'name': 'Oxidation'}], 'Protein': [{'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKE', 'id': 'Q12149', 'protein accession': 'Q00613', 'protein name': 'Heat shock factor protein 1', 'protein short name': 'HSF 1', 'ref': 'Q12149'}, { 'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKEELISNSSDALDKI', 'id': 'ENSP00000332698', 'protein accession': 'ENSP00000332698', 'protein name': 'Heat shock factor protein 1', 'protein short name': 'HSF 1', 'ref': 'ENSP00000332698'}], 'RetentionTimeList': [{'RetentionTime': [{'predicted retention time': 44.07, 'software': {'name': 'SSRCalc', 'id': 'SSRCalc3.0', 'version': '3.0'}}, {'H-PINS retention time normalization standard': '', 'normalized retention time': 38.43}]}], 'heavy labeled peptide': '', 'id': 'ADTHFLLNIYDQLR-M1', 'isomerization potential': 0.583, 'peptide group label': 'G1', 'predicted isoelectric point': 5.22, 'sequence': 'ADTHFLLNIYDQLR', 'theoretical mass': 1189.22}}, {'IntermediateProduct': [{'ConfigurationList': {'Configuration': [{'collision energy': 26.0, 'contact': { 'contact URL': 'http://www.systemsbiology.org/', 'contact address': '1441 NE 34th St, Seattle WA 98103, USA', 'contact email': '[email protected]', 'contact name': 'Eric Deutsch', 'contact organization': 'Institute for Systems Biology', 'id': 'CS'}, 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'}}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 8.0}]}, 'charge state': 1.0, 'isolation window target m/z': 1040.57}], 'Precursor': {'charge state': 2.0, 'isolation window lower offset': 1.0, 'isolation window target m/z': 862.9467, 'isolation window upper offset': 1.0}, 'Product': {'ConfigurationList': {'Configuration': [{'collision energy': 20.4, 'contact': { 'contact URL': 'http://www.systemsbiology.org/', 'contact address': '1441 NE 34th St, Seattle WA 98103, USA', 'contact email': '[email protected]', 'contact name': 'Eric Deutsch', 'contact organization': 'Institute for Systems Biology', 'id': 'CS'}, 'dwell time': 0.12, 'instrument': {'4000 QTRAP': '', 'id': 'QTRAP'}}]}, 'InterpretationList': {'Interpretation': [{'name': 'frag: y ion', 'product interpretation rank': 1.0, 'product ion m/z delta': 0.03, 'product ion series ordinal': 4.0}]}, 'charge state': 1.0, 'isolation window target m/z': 543.2}, 'RetentionTime': [{'local retention time': 40.02, 'retention time window lower offset': 3.0, 'retention time window upper offset': 3.0, 'software': {'name': 'Skyline', 'id': 'Skyline0.5', 'version': '0.5'}}], 'id': 'ADTHFLLNIYDQLR-M1-T2', 'peptide': {'Evidence': {'confident peptide': 6.0}, 'Modification': [{'location': 0, 'monoisotopicMassDelta': 127.063324, 'name': 'SMA'}, {'location': 1, 'monoisotopicMassDelta': 15.994919, 'name': 'Oxidation'}], 'Protein': [{'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKE', 'id': 'Q12149', 'protein accession': 'Q00613', 'protein name': 'Heat shock factor protein 1', 'protein short name': 'HSF 1', 'ref': 'Q12149'}, { 'Sequence': 'MSTEMETKAEDVETFAFQAEIAQLMSLIINTFYSNKEIFLRELISNSSDALDKIRYESLTDPSKLDNGKEELISNSSDALDKI', 'id': 'ENSP00000332698', 'protein accession': 'ENSP00000332698', 'protein name': 'Heat shock factor protein 1', 'protein short name': 'HSF 1', 'ref': 'ENSP00000332698'}], 'RetentionTimeList': [{'RetentionTime': [{'predicted retention time': 44.07, 'software': {'name': 'SSRCalc', 'id': 'SSRCalc3.0', 'version': '3.0'}}, {'H-PINS retention time normalization standard': '', 'normalized retention time': 38.43}]}], 'heavy labeled peptide': '', 'id': 'ADTHFLLNIYDQLR-M1', 'isomerization potential': 0.583, 'peptide group label': 'G1', 'predicted isoelectric point': 5.22, 'sequence': 'ADTHFLLNIYDQLR', 'theoretical mass': 1189.22}}] ] idxml_data = {(0, 0): [{'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 368.832366943359, 'RT': 1517.88525390625}, {'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 552.744079589844, 'RT': 1520.14294433594}], (0, 1): [{'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 368.832366943359, 'RT': 1517.88525390625}, {'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 552.744079589844, 'RT': 1520.14294433594}], (1, 0): [{'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 368.832366943359, 'RT': 1517.88525390625, 'PeptideHit': [{'score': 0.0, 'sequence': 'DGDIEAEISR', 'charge': 3, 'aa_before': ['K'], 'aa_after': ['E'], 'protein_refs': 'PH_0', 'OMSSA_score': 0.000846175003530426, 'target_decoy': 'target'}]}, {'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 552.744079589844, 'RT': 1520.14294433594, 'PeptideHit': [{'score': 0.0, 'sequence': 'LAMTLAEAER', 'charge': 2, 'aa_before': ['R'], 'aa_after': ['A'], 'protein_refs': 'PH_6', 'OMSSA_score': 0.0384710999326793, 'target_decoy': 'target'}]}], (1, 1): [{'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 368.832366943359, 'RT': 1517.88525390625, 'PeptideHit': [{'score': 0.0, 'sequence': 'DGDIEAEISR', 'charge': 3, 'aa_before': ['K'], 'aa_after': ['E'], 'OMSSA_score': 0.000846175003530426, 'target_decoy': 'target', 'protein': [{'accession': 'tr|A9EY18|A9EY18_SORC5', 'score': 0.0, 'sequence': '', 'OMSSA_score': 0.0}]}]}, {'score_type': 'q-value', 'higher_score_better': False, 'significance_threshold': 0.0, 'MZ': 552.744079589844, 'RT': 1520.14294433594, 'PeptideHit': [{'score': 0.0, 'sequence': 'LAMTLAEAER', 'charge': 2, 'aa_before': ['R'], 'aa_after': ['A'], 'OMSSA_score': 0.0384710999326793, 'target_decoy': 'target', 'protein': [{'accession': 'tr|A9GID7|A9GID7_SORC5', 'score': 0.0, 'sequence': '', 'OMSSA_score': 0.0}]}]}]} usi_proxi_data = {'attributes': [{'accession': 'MS:1008025', 'name': 'scan number', 'value': '131256'}, {'accession': 'MS:1000827', 'name': 'isolation window target m/z', 'value': '1046.4921'}, {'accession': 'MS:1000041', 'name': 'charge state', 'value': '2'}, {'accession': 'MS:1003061', 'name': 'spectrum name', 'value': 'DLPQGFSALEPLVDLPIGIN[HexNac]ITR/2'}, {'accession': 'MS:1000888', 'name': 'unmodified peptide sequence', 'value': 'DLPQGFSALEPLVDLPIGINITR'}], 'm/z array': ([120.0807, 121.0838, 122.0601, 122.1212, 124.0394, 125.0707, 126.0549, 127.0389, 127.0582, 128.0422, 129.0658, 130.0976, 131.0815, 135.8235, 138.0549, 138.1278, 139.0524, 139.0584, 139.0868, 140.0595, 140.0704, 141.0658, 141.1022, 143.1179, 144.0655, 144.0759, 145.0495, 145.0687, 146.0529, 150.0551, 155.081, 155.118, 156.0768, 156.102, 157.061, 157.1083, 157.1336, 158.0924, 159.0765, 162.2362, 163.0602, 164.0636, 166.0863, 166.1227, 167.0816, 168.0657, 169.0611, 169.0691, 169.0967, 170.093, 171.113, 173.0446, 173.4353, 175.1191, 176.1224, 177.1024, 181.0972, 183.1131, 183.1493, 184.0968, 184.1529, 185.0927, 185.1032, 185.1285, 185.1566, 185.165, 186.0762, 186.0872, 187.0716, 187.0797, 193.1338, 196.0713, 197.1282, 198.1238, 199.1084, 200.1394, 201.1236, 202.1273, 203.1032, 203.1141, 204.0869, 205.0901, 205.0984, 206.0911, 207.1129, 208.1082, 209.0924, 211.1443, 212.1479, 213.16, 215.1033, 215.1395, 217.0974, 220.1089, 226.1189, 227.1027, 227.1223, 227.1397, 228.1346, 228.1707, 229.1185, 232.1404, 233.1294, 235.1077, 238.119, 239.1398, 239.1754, 240.1351, 241.13, 243.1131, 243.1341, 243.1461, 248.1039, 250.1183, 251.2113, 254.1502, 255.1459, 259.1405, 260.1425, 261.1241, 265.1298, 266.1138, 266.1861, 268.166, 269.1694, 272.1609, 273.1615, 274.1191, 275.1212, 276.1671, 277.1699, 277.6447, 278.1138, 280.1663, 282.1813, 282.2179, 283.1406, 284.1439, 284.199, 285.1564, 286.4622, 287.1509, 288.1349, 289.1381, 292.1297, 294.1458, 294.1817, 295.1401, 295.1841, 297.1819, 300.1359, 300.1671, 301.1522, 301.17, 303.1704, 305.161, 306.1455, 307.1406, 308.1611, 309.1446, 310.2132, 311.172, 311.2132, 312.1562, 313.2141, 314.1719, 315.1458, 316.1298, 317.1618, 318.108, 320.1246, 320.1602, 321.1924, 322.1778, 323.1606, 325.1882, 326.1718, 328.1875, 329.1842, 333.1562, 335.1355, 335.172, 337.1514, 337.1875, 338.1915, 339.2031, 340.1872, 341.1453, 341.183, 341.2189, 343.1986, 345.1566, 346.159, 349.1885, 351.165, 351.2035, 352.162, 353.219, 353.2553, 354.2156, 354.2592, 355.1988, 358.1399, 358.1622, 360.1917, 361.1499, 363.167, 365.1835, 366.1403, 366.1783, 368.1945, 369.1777, 370.1731, 370.2449, 371.2398, 372.2247, 373.1509, 374.2083, 375.1666, 379.1997, 379.2347, 381.1396, 381.2504, 382.2097, 382.2528, 383.1935, 384.1667, 385.1517, 385.1878, 386.1553, 386.1907, 389.2517, 390.2537, 391.2, 393.2504, 394.173, 394.2514, 395.1725, 396.2228, 397.2444, 398.1682, 398.2409, 399.1696, 399.236, 401.2041, 402.1808, 402.2143, 403.162, 403.2175, 404.22, 406.1737, 407.2308, 407.2641, 408.2263, 411.2611, 413.1825, 413.2116, 420.1881, 421.2455, 423.1993, 424.2025, 424.2552, 425.2404, 426.2432, 428.1939, 430.175, 430.2094, 430.2425, 431.2124, 436.2216, 439.2569, 447.2365, 448.2212, 448.2558, 450.2716, 453.2814, 454.2064, 455.2062, 456.1888, 456.2213, 457.1917, 458.2393, 460.1848, 465.2458, 466.3383, 468.2816, 469.2417, 471.2361, 472.2187, 473.2144, 474.2006, 475.2007, 476.2134, 476.2513, 478.2671, 479.2711, 481.2779, 482.2049, 482.2645, 483.2065, 483.2565, 486.2681, 489.2454, 490.2477, 493.2432, 493.3098, 494.3347, 495.2936, 495.3336, 496.2788, 497.2792, 498.275, 499.2307, 499.2885, 500.2167, 503.2943, 510.3297, 511.2516, 511.3279, 512.2547, 513.2472, 514.2614, 514.6443, 515.299, 517.2418, 518.2451, 519.2954, 522.2933, 523.2952, 525.2454, 527.223, 534.2673, 536.2747, 537.2743, 538.326, 539.3273, 541.2422, 542.2728, 543.2557, 545.2374, 546.2383, 553.2421, 554.241, 554.2872, 560.2838, 561.2878, 569.3062, 570.2684, 570.3147, 571.2578, 578.3295, 580.3793, 585.3024, 586.3311, 587.284, 588.2797, 589.283, 591.352, 592.3513, 595.2974, 595.348, 605.2987, 608.3782, 609.3743, 612.3184, 613.2952, 630.3265, 631.3271, 632.27, 632.3791, 635.379, 639.3464, 640.3002, 651.3082, 655.3589, 656.3406, 656.3829, 658.3204, 659.325, 666.3256, 673.3701, 674.3734, 675.3763, 680.4368, 683.3511, 684.3432, 689.3477, 691.4151, 692.4018, 693.3339, 698.3795, 699.4156, 701.3635, 702.3649, 703.3084, 706.3743, 707.3776, 708.4289, 709.4282, 721.3949, 727.4354, 730.4598, 745.351, 756.4364, 757.4412, 769.4483, 770.3829, 785.3878, 786.4808, 788.3944, 795.3701, 798.3801, 802.4148, 805.4865, 806.4776, 812.3951, 813.3889, 814.3867, 816.3902, 817.3939, 819.4563, 820.4633, 830.4058, 831.4091, 848.5067, 849.4831, 850.4828, 858.4691, 866.512, 867.5145, 869.5193, 876.4803, 877.4835, 878.4819, 883.5378, 884.5388, 892.4265, 927.4429, 929.475, 945.4327, 948.5568, 949.5511, 966.5744, 967.5771, 968.5767, 971.5583, 979.5936, 980.5997, 989.5649, 990.554, 991.5705, 996.6207, 997.622, 998.6257, 1022.5349, 1023.5327, 1040.5256, 1041.5275, 1042.5845, 1050.595, 1051.589, 1058.5154, 1059.5378, 1068.6058, 1069.6024, 1070.6089, 1079.5543, 1079.66, 1080.6621, 1081.6632, 1086.6161, 1087.6189, 1088.6221, 1095.6342, 1111.644, 1112.6522, 1181.6898, 1182.6868, 1192.6384, 1193.6614, 1194.687, 1195.6874, 1199.7003, 1200.7036, 1201.7045, 1271.6893, 1289.6963, 1290.6954, 1291.6979, 1293.7466, 1296.7097, 1297.7087, 1298.6992, 1314.7263, 1315.7263, 1316.7278, 1402.7793, 1403.8024, 1404.8221, 1413.7937, 1414.7994, 1420.8528, 1421.8392, 1451.7468, 1503.89, 1504.8904, 1505.8995, 1528.8531, 1532.8982, 1605.9242, 1606.922, 1623.9305, 1624.9381, 1625.9446, 1626.9426, 1735.6211, 1752.9758, 1753.9739, 1898.9847]), 'intensity array': ([41966.6758, 2547.6956, 3291.5342, 2838.4585, 4198.6621, 2980.3152, 255955.7031, 259554.2812, 15788.3789, 15573.1006, 4178.9922, 5410.9072, 5616.8442, 2474.366, 954771.875, 10152.6621, 5554.1558, 63132.4688, 6978.6929, 3852.3772, 6102.2876, 6130.3369, 7675.2935, 14993.0332, 108239.8047, 6811.1016, 199574.7812, 4911.7881, 13389.499, 6146.4014, 2646.5579, 3048.3428, 2869.1113, 5208.4102, 5745.9106, 4367.8789, 12342.4629, 23719.2148, 12862.9375, 2557.7485, 198537.0938, 13784.9414, 3543.4077, 4131.563, 31193.0723, 224910.25, 8057.98, 14856.0166, 2870.9648, 4401.5791, 9193.2881, 3348.6216, 14712.9502, 87049.7266, 7469.748, 15210.1143, 9361.8613, 13005.0381, 233007.3594, 6379.459, 17465.3633, 4546.292, 3519.7861, 12858.0059, 4718.2969, 18815.377, 145815.4375, 21896.3047, 3676.7759, 11890.6113, 10009.0488, 3699.269, 4043.9946, 65593.2344, 4878.5562, 12677.7168, 103776.2891, 6596.2896, 3318.2097, 6772.8564, 351681.125, 18734.9785, 10957.293, 3510.2415, 7858.1919, 6179.2671, 13985.8643, 173662.8438, 20287.5, 8688.9844, 8498.873, 8903.2383, 19180.8867, 3665.1787, 335366.8125, 7017.2178, 28342.6836, 4865.4375, 18790.5293, 4750.0708, 25336.3691, 3203.4902, 4257.25, 9891.249, 9430.8369, 5323.1807, 3810.5613, 4382.1997, 7045.4399, 4381.0942, 24189.3027, 8441.8184, 4532.8257, 4196.2856, 4110.918, 8598.3818, 6921.2065, 39098.4648, 4789.5303, 5560.521, 9069.1211, 18551.5332, 11671.959, 75855.1562, 6522.418, 16535.8887, 3701.9485, 35926.0859, 3863.2244, 32059.7148, 5819.3403, 3210.5969, 16217.5137, 17247.084, 3868.7102, 5855.6655, 272802.7812, 27620.3594, 4390.2866, 24058.0742, 3318.6807, 9631.8984, 28741.832, 6880.3589, 19617.8301, 6861.2788, 22676.3984, 9000.6592, 4677.1577, 3663.7769, 7423.7568, 64958.9453, 4355.772, 6121.9727, 4432.9341, 14568.1914, 8590.666, 9882.8047, 8349.0869, 8193.5986, 32859.0859, 14244.7568, 5366.3271, 8436.2861, 3541.928, 8114.6763, 11038.0684, 13238.2871, 9012.165, 4139.0894, 8639.3105, 3873.3665, 4799.3062, 3581.249, 6767.1538, 3221.2576, 34234.8242, 27701.3027, 12575.6621, 22205.0137, 12237.8467, 5908.9106, 9947.6084, 24797.748, 4669.2256, 4571.4717, 14177.3848, 6805.0381, 4183.0161, 3842.967, 6658.7861, 36391.8672, 5175.6484, 8281.4512, 6164.1709, 6762.8203, 11843.6836, 7930.707, 41806.7734, 4367.5952, 6773.2051, 4702.3066, 5567.2993, 4455.4995, 4444.3325, 5055.1304, 18162.2148, 4480.1519, 15342.1143, 11285.541, 8318.6074, 10304.4072, 5997.8765, 7593.6689, 4187.0688, 10602.7109, 3672.6799, 8320.6348, 5356.5142, 3662.1902, 11980.7168, 4636.2578, 41726.2422, 13200.499, 8885.6016, 8894.1211, 4967.2891, 29418.1074, 32746.0078, 6112.438, 7184.1636, 36919.9492, 5196.9824, 5471.1787, 12881.5703, 10838.377, 5238.5288, 5155.4321, 6150.2373, 4111.8496, 20762.8535, 19288.4609, 4497.6348, 11436.6729, 6415.1431, 9214.043, 155290.5, 14550.5098, 25952.8242, 4105.3394, 7406.4492, 8644.6816, 4586.876, 3843.9878, 7114.5103, 19891.123, 4242.667, 4844.9673, 12831.1318, 44220.1445, 7491.939, 4230.2671, 160216.5781, 35397.793, 10992.1924, 9463.6084, 87356.7891, 4254.9961, 25704.248, 7932.1284, 10517.7539, 5733.0195, 8632.5596, 10175.666, 36879.6055, 5204.2793, 7365.5513, 5045.0781, 24276.1172, 7509.6475, 4975.8628, 6691.5698, 3877.4844, 4361.6406, 6249.6157, 4908.083, 18014.8926, 8978.2373, 6179.6362, 8305.2979, 11382.0703, 4022.8655, 4265.6592, 10889.9678, 51238.4102, 12708.8779, 7461.2456, 21825.8438, 3999.5769, 4827.0664, 7533.9624, 23269.334, 11600.8018, 4762.519, 5106.3667, 4442.5024, 7032.0605, 25456.2227, 5871.6138, 17477.4062, 8218.1289, 4053.5696, 32143.2871, 7449.3823, 8398.5703, 9791.9453, 25406.2539, 11674.1387, 5712.502, 4139.7842, 4401.1045, 7204.8188, 3954.5417, 6161.9053, 32005.7363, 6428.564, 5489.2305, 10636.6445, 4749.8843, 8948.25, 4526.2495, 9052.9131, 14222.7773, 8232.1895, 20718.2891, 5464.8374, 8501.5361, 17142.1934, 8471.3633, 16037.1406, 4146.5811, 15923.6621, 4934.189, 8793.4043, 34129.1211, 12574.1914, 12152.124, 47545.5664, 4292.9888, 15955.6084, 9993.1094, 6893.1782, 8311.6094, 21146.418, 9047.8076, 39483.2227, 13060.46, 12580.04, 4497.2866, 3737.1768, 5266.8677, 8785.2305, 28534.9453, 10757.5723, 7430.501, 7050.3403, 17575.3848, 4611.1118, 5129.0845, 4341.7598, 10760.4297, 10225.1807, 4679.0171, 8483.8486, 9013.8955, 11730.4531, 3790.2556, 12612.9414, 4082.8838, 7504.1924, 35896.1445, 16693.1152, 5017.0947, 5207.6147, 12085.1699, 14201.3936, 20826.8301, 7449.6035, 8584.9268, 4789.8286, 6915.6299, 5846.1694, 32315.543, 15269.6934, 8256.1914, 32809.0898, 11087.9678, 4199.9697, 6494.6421, 4413.3452, 4201.7617, 3606.0955, 5441.686, 23864.4434, 9459.0645, 4164.3262, 4590.3423, 4342.3149, 6736.8931, 8654.7998, 9649.4893, 10202.7041, 10856.6143, 9960.1367, 5911.1245, 15556.0107, 11216.333, 4908.4263, 15296.3115, 4665.8364, 11577.4492, 4889.543, 44902.1758, 23394.2539, 4861.1089, 11224.9121, 5479.5527, 9040.8555, 41152.7656, 13091.8457, 6072.9536, 70285.5312, 27767.627, 5591.4673, 40979.4375, 21223.6445, 5186.0054, 4497.604, 5784.5356, 6357.8408, 4274.5059, 9011.8428, 77925.8594, 37771.168, 6407.4629, 5945.9824, 20595.2656, 10457.5928, 18185.3223, 6212.7734, 5226.3213, 40318.8164, 21220.1074, 6534.4219, 5595.3613, 4403.0303, 8463.5703, 4463.9971, 6311.5747, 5473.1221, 5640.0103, 5931.4033, 4443.1938, 31420.2207, 19907.5234, 6193.0039, 9792.1543, 56711.4766, 29846.9121, 8117.728, 175281.8125, 99740.7891, 24018.1094, 3852.635, 11292.21, 6774.5361, 21905.0859, 16468.5293, 5227.1997, 5561.5205, 14747.0723, 6571.646, 139674.8438, 67535.0156, 20008.0254, 4196.876, 28087.0918, 17040.2598, 7583.5469, 4520.9663, 5070.3828, 5462.8179, 5022.8677, 36560.4102, 24047.0879, 8656.3838, 9155.3828, 10332.3398, 5795.52, 11947.2334, 9987.9014, 16823.0645, 6565.8887, 8523.4277, 25558.1504, 13748.7529, 6460.7681, 4543.1084, 5551.3354, 6699.9346, 5086.3892, 46855.082, 31373.1426, 12940.0234, 7297.4478, 4090.177, 7064.5483, 3922.6812, 5938.6528])}
python
from exemplo1 import Soma, Dividir Soma(2,2) Dividir(2,0)
python
# File: main.py # Author: Lorè Francesco # Program to build a simplified and proof-of-concept software application # for managing an electronic table reservation book for a restaurant in the evening of a specific day. # The software is composed of a user interface and business logic part # written in Python and an SQL database management system (MySQL) to store the data. # Python v. 3.9.1 import mysql.connector as ms # mysql-connector-python v. 8.0.23 import pandas as pd # pandas v. 1.2.0 # All functions are inside the main function that starts automatically when we open the file def main(): # connection to database mysql dbconn = ms.connect( host="(hostname>", user="(username)", database="(databse_name)" ) # creation of cursor dbcur = dbconn.cursor(buffered=True) # function to execute a query, when there is a 'SELECT' or 'WITH' # at beginning of the query return a dataframe otherwise commit the result # that can be a DELETE, UPDATE, or INSERT operation def queryEx(query, cur=dbcur, con=dbconn): cur.execute(query) if "SELECT" == query[:6] or "WITH" == query[:4]: df = pd.DataFrame(dbcur, columns=cur.column_names) return df else: con.commit() # The following function is used to check the command to do by splitting the input getting a list and then check # if the first element of the list is in the dictionary-keys, in which for each operation there is the related function # the program creates the dictionary "listIns" after having defined all functions def Instruction(inp): inpList = inp.split() if inpList[0] in listIns.keys(): return listIns.get(inpList[0])(inpList[1:]) # return the fit function from dictionary # (the actual parameters is the list of element # in inpList except the first that is the command code) else: return "Error" # function to register reservation of a table def funR(inp): # check if there are only 3 element otherwise returns Error if len(inp) == 3: # check the validity of phone number (length = 10, and all characters are digit) if len(inp[1]) == 10: if inp[1].isdigit() is False: return "Error" else: return "Error" # check that the number of guests is a digit if inp[0].isdigit() is False: return "Error" # check if the number of guests is > 6 # because in this case the restaurant is not able to book a table inp[0] = int(inp[0]) if inp[0] > 6: return "Error" # check if there is a reservation with the same phone number or the same name, in this case the function returns Error query = 'SELECT name ' \ 'FROM reservation ' \ 'WHERE name = "{0}" OR phone_number = "{1}"'.format(inp[2], inp[1]) res = queryEx(query) if res.shape[0] > 0: return "Error" else: # the function verifies if there is an available table with the same number of guests, if not # the function find a table with a number of seats > guests # If there are not available tables it returns Error # otherwise it executes 3 query occ = False for i in range(0, 7 - inp[0]): query = 'SELECT count(dt.ID_table) ' \ 'FROM dining_table dt ' \ 'WHERE dt.seats = "{0}" ' \ ' AND dt.ID_table not in (' \ ' SELECT r.ID_table ' \ ' FROM reservation r' \ ')'.format(inp[0] + i) res = queryEx(query) if res.values[0, 0] > 0: # 1 query: get the ID_table of selected table query = 'SELECT dt.ID_table ' \ 'FROM dining_table dt ' \ 'WHERE dt.seats = "{0}" ' \ ' AND dt.ID_table not in (' \ ' SELECT r.ID_table ' \ ' FROM reservation r' \ ' )'.format(inp[0] + i) ID_table = queryEx(query).values[0, 0] # 2 query : get the max value of ID_res inside reservation table, # if there are not bookings set new ID_res = 1 # otherwise set new ID_res = max(ID_res) + 1 # in this way the function creates an ID different than others query = 'SELECT max(ID_res) ' \ 'FROM reservation' ID_res = queryEx(query).values[0, 0] if ID_res is None: ID_res = 1 else: ID_res = ID_res + 1 # 3 query: insert data into reservation table query = 'INSERT INTO reservation values ' \ '({0}, {1}, {2}, "{3}", "{4}")'.format(ID_res, ID_table, inp[0], inp[1], inp[2]) queryEx(query) occ = True break # if occ is false the function don't find an available table, in this case the function returns Error, # otherwise it returns a empty string if occ is False: return "Error" else: return "" else: return "Error" # function to select information about reservation def funS(inp): # checking if there is 1 argument beyond the code of operation if len(inp) == 1: # checking if argument is a phone number and then execute a query in which I select the required information # otherwise the function makes the same operation checking the name if inp[0].isdigit() is True and len(inp[0]) == 10: query = 'SELECT ID_table, guest_number, seats, phone_number, name ' \ 'FROM reservation NATURAL JOIN dining_table ' \ 'WHERE phone_number = "{0}"'.format(inp[0]) else: query = 'SELECT ID_table, guest_number, seats, phone_number, name ' \ 'FROM reservation NATURAL JOIN dining_table ' \ 'WHERE name = "{0}"'.format(inp[0]) res = queryEx(query) # if there is not result the function returns 'No result(s)' otherwise it returns the result if res.shape[0] < 1: return "No result(s)" else: return " ".join(map(str, list(res.iloc[0]))) # Here the function transform all element of selected tuple into string and then # it uses the join() built-in function to show the desired result # (it uses the list() function to convert the series into a lists) # The same operation of funS but in this case removing the information of a reservation # in base of the phone number or the name, # if there is not result or if the number of arguments is > 1, the function returns Error def funC(inp): if len(inp) == 1: if inp[0].isdigit() is True and len(inp[0]) == 10: query = 'SELECT ID_res ' \ 'FROM reservation ' \ 'WHERE phone_number = "{0}"'.format(inp[0]) res = queryEx(query) if res.shape[0] < 1: return "Error" else: query = 'DELETE FROM reservation ' \ 'WHERE phone_number = "{0}"'.format(inp[0]) queryEx(query) return "" else: query = 'SELECT ID_res ' \ 'FROM reservation ' \ 'WHERE name = "{0}"'.format(inp[0]) res = queryEx(query) if res.shape[0] < 1: return "Error" else: query = 'DELETE FROM reservation ' \ 'WHERE name = "{0}"'.format(inp[0]) queryEx(query) return "" else: return "Error" # function that returns all information required about reservation # if there are not informations it returns "No result(s)" def funL(inp): if len(inp) == 0: query = 'SELECT ID_table, guest_number, seats, phone_number, name ' \ 'FROM reservation NATURAL JOIN dining_table' res = queryEx(query) if res.shape[0] < 1: return "No result(s)" else: for i in range(res.shape[0] - 1): # Since there are more results I use a loop to print the desired result print(" ".join(map(str, list(res.iloc[i])))) # the result is as funS() function return " ".join(map(str, list(res.iloc[res.shape[0] - 1]))) # Function to list all the unreserved tables, one per line # the length of "inp" must be 0, if not the function returns Error def funU(inp): if len(inp) == 0: query = "SELECT dt.ID_table, dt.seats " \ "FROM dining_table dt " \ "WHERE dt.ID_table not in (" \ " SELECT ID_table" \ " FROM reservation r" \ ")" res = queryEx(query) if res.shape[0] < 1: return "No result(s)" # if there are not results the function returns this string else: for i in range(res.shape[0] - 1): print(" ".join(map(str, list(res.iloc[i])))) return " ".join(map(str, list(res.iloc[res.shape[0] - 1]))) # this is the output of function else: return "Error" # function to output the number of reserved tables counting the number of booking in reservation table # If there is an argument the function returns the number of reserved tables under the number of guests # checking also if the number of guests is a digit or not def funNT(inp): if len(inp) == 0: # 0 arguments required query = "SELECT count(ID_table) " \ "FROM reservation" res = queryEx(query) return res.values[0, 0] elif len(inp) == 1: # 1 argument required if inp[0].isdecimal() is True: query = "SELECT count(t.ID_table) " \ "FROM (" \ " SELECT r.ID_table " \ " FROM reservation r " \ " WHERE r.guest_number = {0}" \ ") as t".format(inp[0]) # after "WHEN" I create a relation in which the number # of guests is equal to the argument res = queryEx(query) return res.values[0, 0] else: return "Error" else: return "Error" # function to output the number of booked guests overall checking always # if there are arguments or not, in the first case the function returns Error def funNG(inp): if len(inp) == 0: query = "SELECT sum(guest_number) " \ "FROM reservation" res = queryEx(query) if res.values[0, 0] is None: res.values[0, 0] = 0 return res.values[0, 0] else: return "Error" # Function to output the number of unreserved seats overall. # In this case there are 2 queries and then their execution the function returns the difference between them def funNU(inp): if len(inp) == 0: query = "SELECT sum(seats) " \ "FROM dining_table" # getting number of seats query1 = "SELECT sum(guest_number)" \ "FROM reservation" # getting number of guests res = queryEx(query) res1 = queryEx(query1) if res1.values[0, 0] is None: res1.values[0, 0] = 0 # if the result is None the function returns 0 in way that it can be able # to do the difference between the number of seats and the number of guests return res.values[0, 0] - res1.values[0, 0] else: return "Error" # function to show the information about table(s) with the # greatest number of unreserved seats, one per line def funGU(inp): if len(inp) == 0: # also in this case if there are arguments, the result is Error query = "WITH t as (" \ " SELECT ID_table, guest_number, seats, (seats-guest_number) unreserved_seats " \ " FROM reservation NATURAL JOIN dining_table " \ ") " \ "SELECT t.ID_table, t.guest_number, t.seats " \ "FROM t " \ "WHERE t.unreserved_seats = ( " \ " SELECT max(t1.unreserved_seats) " \ " FROM t t1 " \ ")" # to make this query I need to use "WITH" res = queryEx(query) if res.shape[0] < 1: return "No result(s)" # if there are not results return this string else: for i in range(res.shape[0] - 1): print(" ".join(map(str, list(res.iloc[i])))) return " ".join(map(str, list(res.iloc[res.shape[0] - 1]))) # print the result of query one per line else: return "Error" # function to show the information about table(s) with the # greatest number of unreserved seats, one per line. # Output similar to funGU function, but the guests cannot be 0. # the structure is similar to funGU function, the query is different def funGR(inp): if len(inp) == 0: query = "WITH t as (" \ " SELECT ID_table, guest_number, seats, (seats-guest_number) unreserved_seats " \ " FROM (" \ " SELECT *" \ " FROM reservation" \ " WHERE guest_number <> 0" \ " ) as r NATURAL JOIN dining_table" \ ") " \ "SELECT t.ID_table, t.guest_number, t.seats " \ "FROM t " \ "WHERE t.unreserved_seats = ( " \ " SELECT max(t1.unreserved_seats) " \ " FROM t t1 " \ ")" res = queryEx(query) if res.shape[0] < 1: return "No result(s)" else: for i in range(res.shape[0] - 1): print(" ".join(map(str, list(res.iloc[i])))) return " ".join(map(str, list(res.iloc[res.shape[0] - 1]))) else: return "Error" # all functions are inserted into a dictionary in way that we can select the fit function in base of the code command enter by user listIns = {"R": funR, "S": funS, "C": funC, "L": funL, "U": funU, "NT": funNT, "NG": funNG, "NU": funNU, "GU": funGU, "GR": funGR} inp = input("> ") while inp != "X": # if the user print "X", he/she close the program printRes = Instruction(inp) if printRes != "": print(printRes) inp = input("> ") # closing the program, the following are operations to do: COMMIT WORK, CLOSE CURSOR, CLOSE CONNECTION dbconn.commit() dbcur.close() dbconn.close() if __name__ == '__main__': main()
python
import os from setuptools import setup root_dir_path = os.path.dirname(os.path.abspath(__file__)) try: import pypandoc long_description = pypandoc.convert("README.md", "rst") except(IOError, ImportError): long_description = open(os.path.join(root_dir_path, "README.md")).read() with open(os.path.join(root_dir_path, "requirements.txt")) as requirements_file: requirements = requirements_file.readlines() setup( name="atecina", version="0.1", author="Diego J. Romero López", author_email="[email protected]", description="A simple image converter to art.", long_description=long_description, classifiers=[ "Development Status :: 3 - Alpha", 'License :: OSI Approved :: MIT License', ], install_requires=requirements, license="MIT", keywords="images pillow svg converter", url='https://github.com/diegojromerolopez/atecina', packages=["converters"], package_dir={"converters": "src/converters"}, data_files=[], include_package_data=True, scripts=[ "bin/random_circler.py", "bin/mount_mongofs.py" ] )
python
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Feb 25 18:11:31 2019 @author: franchesoni """ import numpy as np import os import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import from matplotlib import cm from matplotlib import rc, font_manager from functions import amp, stridency #%% res_dir = 'results' filenames = os.listdir(res_dir) acronyms = set([filename[0:2] for filename in filenames if filename[-3::]!='png']) orders = [] # places x 43 predictions = [] # places x 43 x 24 x 44120 RMSDs_loc = [] # places x 43 x 24 for acronym in acronyms: path = res_dir + '/' + acronym + '_' orders.append(np.load(path + 'orders')) if orders[-1] != orders[0]: raise ValueError('inconsistent orders') predictions.append(np.array(np.load(path + 'predictions')).squeeze()) RMSDs_loc.append(np.array(np.load(path + 'RMSDs')).squeeze()) print('-----------------------------') print(acronym) print('-----------------------------') print() RMSDs_loc = np.array(RMSDs_loc) orders = orders[-1] RMSDs = np.mean(RMSDs_loc, axis=0).T #%% RMSDs_centered = RMSDs - np.mean(RMSDs, axis=1).reshape(RMSDs.shape[0], 1) RMSDs_ranked = np.argsort(RMSDs, axis=1) RMSDs_centered_avg = np.mean(RMSDs_centered, axis=0) RMSDs_ranked_avg = np.mean(RMSDs_ranked, axis=0) RMSDs_min = np.mean(np.min(RMSDs_loc, axis=1), axis=0) #amplitudes = np.mean(np.array([amp(prediction) for prediction in predictions]),axis=0).T #stridencies = np.mean(np.array([stridency(prediction) for prediction in predictions]), axis=0).T #%% lts = [0, 8, 23] sizeOfFont = 10 fontProperties = {'weight' : 'normal', 'size' : sizeOfFont} colors = [(0.5, 0.5, 0, 0.5), (0, 0.5, 0, 0.5), (0.5, 0, 0, 0.5), (0, 0, 0, 0.5)] LT = 24 plt.close('all') plt.rcParams.update({'font.size': 15}) fig = plt.figure() ax = plt.gca() plt.plot(np.arange(1, LT+1)*10, RMSDs_min*100, '-o', linewidth=5) plt.plot(np.arange(1, LT+1)*10, RMSDs[:, 4]*100) ax.set_yticks([20, 25, 30, 35, 40]) ax.set_ylim([18, 40]) ax.set_ylabel('Relative RMS deviation (%)') ax.set_xlabel('Lead Time (min)') ax.set_xticks([0, 50, 100, 150, 200]) ax.set_yticks([20, 25, 30, 35, 40]) ax.set_xticklabels(ax.get_xticks(), fontProperties) ax.set_yticklabels(ax.get_yticks(), fontProperties) plt.legend(['Optimum (p, q)', 'order = (5, 0)']) # PLOTEAR PARCIAL A TIEMPOS lts lts = [0, 8, 23] sizeOfFont = 10 fontProperties = {'weight' : 'normal', 'size' : sizeOfFont} colors = [(0.5, 0.5, 0, 0.5), (0, 0.5, 0, 0.5), (0.5, 0, 0, 0.5), (0, 0, 0, 0.5)] #plt.close('all') plt.rcParams.update({'font.size': 15}) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) #plt.title('Average %RMSD') a = plt.gca() for i, order in zip(range(len(orders)-1, -1, -1), orders[::-1]): for i, order in enumerate(orders): ax.plot(np.arange(1, LT+1)*10, RMSDs[:, i] * 100, color=colors[order[1]], label='MA order = {}'.format(order[1])) display = (1, 15, 29, 42) handles, labels = ax.get_legend_handles_labels() ax.legend([handle for i,handle in enumerate(handles) if i in display], [label for i,label in enumerate(labels) if i in display], loc = 'best') #ax.set_ylim(0.15, 0.45) ax.set_ylabel('Relative RMS deviation (%)') ax.set_xlabel('Lead Time (min)') ax.set_xticks([0, 50, 100, 150, 200]) ax.set_yticks([20, 25, 30, 35, 40]) a.set_xticklabels(a.get_xticks(), fontProperties) a.set_yticklabels(a.get_yticks(), fontProperties) plt.savefig('bf_orders.png') plt.rcParams.update({'font.size': 15}) columns = np.arange(11) # AR terms rows = np.arange(4) table1 = np.empty((4, 11)) table2 = np.empty((4, 11)) r, c = np.meshgrid(rows, columns) fig = plt.figure(figsize=plt.figaspect(0.3)) for index, lt in enumerate(lts): # Crear tabla for i in rows: for j in columns: table1[i, j] = np.nan ls = [a for a, x in enumerate(orders) if x[0] == j and x[1]==i] if len(ls) == 1: table1[i, j] = RMSDs[lt, ls[0]] z1 = table1.T * 100 ax = fig.add_subplot(1, 3, index+1, projection='3d') plt.tick_params(labelsize=8) norm = mpl.colors.Normalize(vmin=np.amin(z1[1::, :]), vmax=np.amax(z1[1::, :])) surf = ax.plot_surface(r[1::, :], c[1::, :], z1[1::, :], facecolors=plt.cm.jet(norm(z1[1::, :])), linewidth=1, antialiased=False) surf.set_facecolor((0,0,0,0)) ax.set_title('LT = {}m'.format((lt+1)*10)) ax.set_ylim(10, 0) ax.set_ylabel('AR order') ax.set_xlim(0, 3) ax.set_xlabel('MA order') ax.set_xticks([0, 1, 2, 3]) ax.set_yticks([1, 2, 4, 6, 8, 10]) plt.show() fig.tight_layout() plt.savefig('rmsds.png') fig = plt.figure() # Crear tabla for i in rows: for j in columns: table1[i, j] = np.nan table2[i, j] = np.nan ls = [a for a, x in enumerate(orders) if x[0] == j and x[1]==i] if len(ls) == 1: table1[i, j] = RMSDs_centered_avg[ls[0]] if i + j <= 6: table2[i, j] = RMSDs_centered_avg[ls[0]] z1 = table1.T * 100 z2 = table2.T * 100 #ax = fig.add_subplot(1, 3, index+1, projection='3d') ax = fig.gca(projection='3d') plt.tick_params(labelsize=8) norm = mpl.colors.Normalize(vmin=np.amin(z1[1::, :]), vmax=np.amax(z1[1::, :])) surf = ax.plot_surface(r[1::, :], c[1::, :], z1[1::, :], facecolors=plt.cm.jet(norm(z1[1::, :])), linewidth=1, antialiased=False) surf2 = ax.plot_surface(r[1::, :], c[1::, :], z2[1::, :], facecolors=plt.cm.jet(norm(z2[1::, :])), linewidth=1, antialiased=False) surf.set_facecolor((0,0,0,0)) ax.set_ylim(10, 0) ax.set_ylabel('AR order') ax.set_xlim(0, 3) ax.set_xlabel('MA order') ax.set_xticks([0, 1, 2, 3]) ax.set_yticks([1, 2, 4, 6, 8, 10]) plt.show() fig.tight_layout() plt.savefig('average_anomalies.png') #%% #fig = plt.figure() #fig.suptitle('RMSDs ranked') ## Crear tabla #for i in rows: # for j in columns: # table1[i, j] = np.nan # table2[i, j] = np.nan # ls = [a for a, x in enumerate(orders) if x[0] == j and x[1]==i] # if len(ls) == 1: # table1[i, j] = RMSDs_ranked_avg[ls[0]] # if i + j <= 6: # table2[i, j] = RMSDs_ranked_avg[ls[0]] #z1 = table1.T #z2 = table2.T ##ax = fig.add_subplot(1, 3, index+1, projection='3d') #ax = fig.gca(projection='3d') #plt.tick_params(labelsize=8) #norm = mpl.colors.Normalize(vmin=np.amin(z1[1::, :]), # vmax=np.amax(z1[1::, :])) #surf = ax.plot_surface(r[1::, :], c[1::, :], z1[1::, :], # facecolors=plt.cm.jet(norm(z1[1::, :])), # linewidth=1, antialiased=False) #surf2 = ax.plot_surface(r[1::, :], c[1::, :], z2[1::, :], # facecolors=plt.cm.jet(norm(z2[1::, :])), # linewidth=1, antialiased=False) #surf.set_facecolor((0,0,0,0)) #ax.set_ylim(10, 0) #ax.set_ylabel('AR order') #ax.set_xlim(0, 3) #ax.set_xlabel('MA order') #ax.set_xticks([0, 1, 2, 3]) #plt.show() # # point = (6, 0) # ax.scatter([point[1]], [point[0]], z[point[0], point[1]], # s=300, c='r', marker='.', zorder=10) #
python
import logging import pytest from config import NOMICS_API_KEY from nomics import Nomics @pytest.fixture def nomics(): return Nomics(NOMICS_API_KEY) def test_get_markets(nomics): data = nomics.Markets.get_markets(exchange = 'binance') assert isinstance(data, list) assert len(data) > 0 def test_get_market_cap_history(nomics): data = nomics.Markets.get_market_cap_history(start = "2018-04-14T00:00:00Z") assert isinstance(data, list) assert len(data) > 0 def test_get_exchange_markets_ticker(nomics): data = nomics.Markets.get_exchange_markets_ticker(exchange = 'binance') assert isinstance(data, list) assert len(data) > 0
python
from abc import ABC, abstractmethod import time import yaml from koala.typing import * from koala import utils from koala.server import rpc_meta def _get_registered_services() -> Dict[str, str]: all_types = rpc_meta.get_all_impl_types() return {i[0]: i[1].__qualname__ for i in all_types} class KoalaConfig(ABC): @property @abstractmethod def port(self) -> int: pass @property @abstractmethod def services(self) -> Dict[str, str]: pass @abstractmethod def parse(self, file_name: str): pass @property @abstractmethod def ttl(self) -> int: pass @property @abstractmethod def address(self) -> str: pass @property @abstractmethod def log_level(self) -> str: pass @property @abstractmethod def log_name(self) -> str: pass @property @abstractmethod def pd_address(self) -> str: pass @property @abstractmethod def private_key(self) -> str: pass @property @abstractmethod def console_log(self) -> bool: pass @property @abstractmethod def start_time(self) -> int: pass @property @abstractmethod def desc(self) -> str: pass @property @abstractmethod def pd_cache_size(self) -> int: pass @property @abstractmethod def fastapi_port(self) -> int: pass class KoalaDefaultConfig(KoalaConfig): def __init__(self) -> None: super(KoalaDefaultConfig, self).__init__() self._ip = "" self._port = 0 self._services: Dict[str, str] = dict() self._desc = "" self._start_time = int(time.time() * 1000) self._ttl = 15 self._log_file_name = "host" self._log_level = "DEBUG" self._pd_address = "" self._private_key = "" self._console_log = True self._pd_cache_size = 10 * 10000 self._fastapi_port = 0 def set_port(self, port: int): self._port = port @property def port(self) -> int: return self._port def set_services(self, services: List[str]): if services is not None and len(services) > 0: self._services.clear() all_types = _get_registered_services() for key in services: if key in all_types: self._services[key] = all_types[key] @property def services(self) -> Dict[str, str]: if len(self._services) == 0: self._services = _get_registered_services() return self._services pass def set_desc(self, desc: str): self._desc = desc @property def desc(self) -> str: return self._desc @property def start_time(self) -> int: return self._start_time def set_ttl(self, ttl: int): self._ttl = ttl @property def ttl(self) -> int: if self._ttl == 0: self._ttl = 15 return self._ttl def set_address(self, ip: str): if ip is not None and len(ip) > 0: self._ip = ip @property def address(self) -> str: if len(self._ip) > 0: return "%s:%d" % (self._ip, self._port) return "%s:%d" % (utils.get_host_ip(), self._port) @property def log_level(self): return self._log_level def set_log_level(self, level: str): self._log_level = level @property def log_name(self) -> str: return self._log_file_name def set_log_name(self, name: str): self._log_file_name = name @property def pd_address(self) -> str: return self._pd_address def set_pd_address(self, address: str): self._pd_address = address def set_private_key(self, key: str): self._private_key = key @property def private_key(self) -> str: return self._private_key @property def console_log(self) -> bool: return self._console_log def disable_console_log(self): self._console_log = False @property def pd_cache_size(self) -> int: return self._pd_cache_size def set_pd_cache_size(self, size: int): self._pd_cache_size = size @property def fastapi_port(self) -> int: return self._fastapi_port def set_fastapi_port(self, port: int): self._fastapi_port = port @classmethod def _load_config(cls, file_name: str) -> dict: return cls._load_as_json(file_name) @classmethod def _load_as_json(cls, file_name: str) -> dict: with open(file_name) as file: data = file.read() if file_name.endswith(".yaml"): yaml_config = yaml.full_load(data) return yaml_config if file_name.endswith(".json"): json_config = utils.json_loads(data) return json_config raise Exception("KoalaDefaultConfig only support yaml or json config") def parse(self, file_name: str): server_config = self._load_config(file_name) if "port" in server_config: self.set_port(int(server_config["port"])) else: print("需要配置port, 监听的端口") return if "ip" in server_config: self.set_address(server_config["ip"]) if "ttl" in server_config: self.set_ttl(int(server_config["ttl"])) if "services" in server_config: self.set_services(server_config["services"]) if "log_name" in server_config: self.set_log_name(server_config["log_name"]) else: print("需要配置log_name, 日志名") return if "log_level" in server_config: self.set_log_level(server_config["log_level"]) if "console_log" in server_config: enable = bool(server_config["console_log"]) if not enable: self.disable_console_log() if "pd_address" in server_config: self.set_pd_address(server_config["pd_address"]) if "private_key" in server_config: self.set_private_key(server_config["private_key"]) if "pd_cache_size" in server_config: self.set_pd_cache_size(int(server_config["pd_cache_size"])) if "fastapi" in server_config: self.set_fastapi_port(int(server_config["fastapi"])) print(server_config) ConfigType = TypeVar("ConfigType", bound=KoalaConfig) _config: Optional[KoalaConfig] = None def get_config() -> KoalaConfig: global _config if not _config: _config = KoalaDefaultConfig() return _config def set_config_impl(config_type: Type[ConfigType]): global _config _config = config_type() pass
python
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-06-14 10:02 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('dataset', '0013_motionfile_is_hidden'), ] operations = [ migrations.CreateModel( name='Dataset', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('creation_date', models.DateTimeField(auto_now_add=True)), ('filename', models.CharField(max_length=255, unique=True)), ('nb_motions', models.PositiveIntegerField(default=0)), ('nb_annotations', models.PositiveIntegerField(default=0)), ('nb_downloads', models.PositiveIntegerField(default=0)), ], ), ]
python
# # PySNMP MIB module UCD-DLMOD-MIB (http://snmplabs.com/pysmi) # ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/UCD-DLMOD-MIB # Produced by pysmi-0.3.4 at Wed May 1 15:28:26 2019 # On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4 # Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15) # OctetString, Integer, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "OctetString", "Integer", "ObjectIdentifier") NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues") ConstraintsIntersection, ValueRangeConstraint, ValueSizeConstraint, SingleValueConstraint, ConstraintsUnion = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ValueSizeConstraint", "SingleValueConstraint", "ConstraintsUnion") NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance") ModuleIdentity, Integer32, NotificationType, Unsigned32, IpAddress, Gauge32, MibIdentifier, iso, TimeTicks, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Counter32, Counter64, Bits = mibBuilder.importSymbols("SNMPv2-SMI", "ModuleIdentity", "Integer32", "NotificationType", "Unsigned32", "IpAddress", "Gauge32", "MibIdentifier", "iso", "TimeTicks", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Counter32", "Counter64", "Bits") DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention") ucdExperimental, = mibBuilder.importSymbols("UCD-SNMP-MIB", "ucdExperimental") ucdDlmodMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 2021, 13, 14)) ucdDlmodMIB.setRevisions(('2000-01-26 00:00', '1999-12-10 00:00',)) if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0): if mibBuilder.loadTexts: ucdDlmodMIB.setRevisionsDescriptions(('Renamed MIB root object', 'SMIv2 version converted from older MIB definitions.',)) if mibBuilder.loadTexts: ucdDlmodMIB.setLastUpdated('200001260000Z') if mibBuilder.loadTexts: ucdDlmodMIB.setOrganization('University of California, Davis') if mibBuilder.loadTexts: ucdDlmodMIB.setContactInfo('This mib is no longer being maintained by the University of California and is now in life-support-mode and being maintained by the net-snmp project. The best place to write for public questions about the net-snmp-coders mailing list at [email protected]. postal: Wes Hardaker P.O. Box 382 Davis CA 95617 email: [email protected] ') if mibBuilder.loadTexts: ucdDlmodMIB.setDescription('This file defines the MIB objects for dynamic loadable MIB modules.') dlmodNextIndex = MibScalar((1, 3, 6, 1, 4, 1, 2021, 13, 14, 1), Integer32()).setMaxAccess("readonly") if mibBuilder.loadTexts: dlmodNextIndex.setStatus('current') if mibBuilder.loadTexts: dlmodNextIndex.setDescription('The index number of next appropiate unassigned entry in the dlmodTable.') dlmodTable = MibTable((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2), ) if mibBuilder.loadTexts: dlmodTable.setStatus('current') if mibBuilder.loadTexts: dlmodTable.setDescription('A table of dlmodEntry.') dlmodEntry = MibTableRow((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1), ).setIndexNames((0, "UCD-DLMOD-MIB", "dlmodIndex")) if mibBuilder.loadTexts: dlmodEntry.setStatus('current') if mibBuilder.loadTexts: dlmodEntry.setDescription('The parameters of dynamically loaded MIB module.') dlmodIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))) if mibBuilder.loadTexts: dlmodIndex.setStatus('current') if mibBuilder.loadTexts: dlmodIndex.setDescription('An index that uniqely identifies an entry in the dlmodTable.') dlmodName = MibTableColumn((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1, 2), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: dlmodName.setStatus('current') if mibBuilder.loadTexts: dlmodName.setDescription('The module name.') dlmodPath = MibTableColumn((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1, 3), DisplayString()).setMaxAccess("readwrite") if mibBuilder.loadTexts: dlmodPath.setStatus('current') if mibBuilder.loadTexts: dlmodPath.setDescription('The path of the module executable file.') dlmodError = MibTableColumn((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1, 4), DisplayString()).setMaxAccess("readonly") if mibBuilder.loadTexts: dlmodError.setStatus('current') if mibBuilder.loadTexts: dlmodError.setDescription('The last error from dlmod_load_module.') dlmodStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 2021, 13, 14, 2, 1, 5), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6, 7))).clone(namedValues=NamedValues(("loaded", 1), ("unloaded", 2), ("error", 3), ("load", 4), ("unload", 5), ("create", 6), ("delete", 7)))).setMaxAccess("readwrite") if mibBuilder.loadTexts: dlmodStatus.setStatus('current') if mibBuilder.loadTexts: dlmodStatus.setDescription('The current status of the loaded module.') mibBuilder.exportSymbols("UCD-DLMOD-MIB", dlmodPath=dlmodPath, dlmodTable=dlmodTable, dlmodIndex=dlmodIndex, dlmodNextIndex=dlmodNextIndex, dlmodError=dlmodError, dlmodName=dlmodName, dlmodStatus=dlmodStatus, PYSNMP_MODULE_ID=ucdDlmodMIB, dlmodEntry=dlmodEntry, ucdDlmodMIB=ucdDlmodMIB)
python
""" # @Time : 2020/8/28 # @Author : Jimou Chen """ import scrapy from bs4 import BeautifulSoup from testScrapy.items import TestscrapyItem class CommentSpider(scrapy.Spider): name = 'comment_spider' start_urls = ['https://book.douban.com/subject/35092383/annotation'] custom_settings = { "USER_AGENT": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.125 Safari/537.36', } page_num = 1 def parse(self, response, **kwargs): soup = BeautifulSoup(response.body, 'html.parser') nodes = soup.find_all('div', {'class': 'short'}) print('======================{}======================'.format(self.page_num)) for node in nodes: comment = node.find('span').text # 保存 item = TestscrapyItem(page_num = self.page_num, comment=comment) yield item # print(comment, end='\n\n') self.page_num += 1 # 其他页链接 num = 10 * self.page_num if self.page_num <= 28: url = 'https://book.douban.com/subject/35092383/annotation?sort=rank&start=' + str(num) yield scrapy.Request(url, callback=self.parse) # # print('保存完毕')
python
x = 20 print(x)
python
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayPayApplepayTransactionauthtokenCreateModel(object): def __init__(self): self._device_identifier = None self._provisioning_bundle_identifier = None self._provisioning_bundle_identifiers = None self._push_token = None self._reference_identifier = None @property def device_identifier(self): return self._device_identifier @device_identifier.setter def device_identifier(self, value): self._device_identifier = value @property def provisioning_bundle_identifier(self): return self._provisioning_bundle_identifier @provisioning_bundle_identifier.setter def provisioning_bundle_identifier(self, value): self._provisioning_bundle_identifier = value @property def provisioning_bundle_identifiers(self): return self._provisioning_bundle_identifiers @provisioning_bundle_identifiers.setter def provisioning_bundle_identifiers(self, value): if isinstance(value, list): self._provisioning_bundle_identifiers = list() for i in value: self._provisioning_bundle_identifiers.append(i) @property def push_token(self): return self._push_token @push_token.setter def push_token(self, value): self._push_token = value @property def reference_identifier(self): return self._reference_identifier @reference_identifier.setter def reference_identifier(self, value): self._reference_identifier = value def to_alipay_dict(self): params = dict() if self.device_identifier: if hasattr(self.device_identifier, 'to_alipay_dict'): params['device_identifier'] = self.device_identifier.to_alipay_dict() else: params['device_identifier'] = self.device_identifier if self.provisioning_bundle_identifier: if hasattr(self.provisioning_bundle_identifier, 'to_alipay_dict'): params['provisioning_bundle_identifier'] = self.provisioning_bundle_identifier.to_alipay_dict() else: params['provisioning_bundle_identifier'] = self.provisioning_bundle_identifier if self.provisioning_bundle_identifiers: if isinstance(self.provisioning_bundle_identifiers, list): for i in range(0, len(self.provisioning_bundle_identifiers)): element = self.provisioning_bundle_identifiers[i] if hasattr(element, 'to_alipay_dict'): self.provisioning_bundle_identifiers[i] = element.to_alipay_dict() if hasattr(self.provisioning_bundle_identifiers, 'to_alipay_dict'): params['provisioning_bundle_identifiers'] = self.provisioning_bundle_identifiers.to_alipay_dict() else: params['provisioning_bundle_identifiers'] = self.provisioning_bundle_identifiers if self.push_token: if hasattr(self.push_token, 'to_alipay_dict'): params['push_token'] = self.push_token.to_alipay_dict() else: params['push_token'] = self.push_token if self.reference_identifier: if hasattr(self.reference_identifier, 'to_alipay_dict'): params['reference_identifier'] = self.reference_identifier.to_alipay_dict() else: params['reference_identifier'] = self.reference_identifier return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayPayApplepayTransactionauthtokenCreateModel() if 'device_identifier' in d: o.device_identifier = d['device_identifier'] if 'provisioning_bundle_identifier' in d: o.provisioning_bundle_identifier = d['provisioning_bundle_identifier'] if 'provisioning_bundle_identifiers' in d: o.provisioning_bundle_identifiers = d['provisioning_bundle_identifiers'] if 'push_token' in d: o.push_token = d['push_token'] if 'reference_identifier' in d: o.reference_identifier = d['reference_identifier'] return o
python
import sys from calm.dsl.constants import CACHE from calm.dsl.decompile.render import render_template from calm.dsl.store import Cache from calm.dsl.log import get_logging_handle from calm.dsl.decompile.ref_dependency import get_package_name LOG = get_logging_handle(__name__) def render_ahv_vm_disk(cls, boot_config): data_source_ref = cls.data_source_reference or {} if data_source_ref: data_source_ref = data_source_ref.get_dict() device_properties = cls.device_properties.get_dict() disk_size_mib = cls.disk_size_mib # find device type device_type = device_properties["device_type"] adapter_type = device_properties["disk_address"]["adapter_type"] adapter_index = device_properties["disk_address"]["device_index"] schema_file = "" user_attrs = {} # Atleast one disk should be bootable if boot_config: if ( adapter_type == boot_config["boot_device"]["disk_address"]["adapter_type"] and adapter_index == boot_config["boot_device"]["disk_address"]["device_index"] ): user_attrs["bootable"] = True # find operation_type if data_source_ref: if data_source_ref["kind"] == "app_package": user_attrs["name"] = data_source_ref.get("name") user_attrs["name"] = ( get_package_name(user_attrs["name"]) or user_attrs["name"] ) operation_type = "cloneFromVMDiskPackage" elif data_source_ref["kind"] == "image": operation_type = "cloneFromImageService" img_uuid = data_source_ref.get("uuid") disk_cache_data = ( Cache.get_entity_data_using_uuid( entity_type=CACHE.ENTITY.AHV_DISK_IMAGE, uuid=img_uuid ) or {} ) if not disk_cache_data: # Windows images may not be present LOG.warning("Image with uuid '{}' not found".format(img_uuid)) user_attrs["name"] = disk_cache_data.get("name", "") else: LOG.error( "Unknown kind `{}` for data source reference in image".format( data_source_ref["kind"] ) ) else: if device_type == "DISK": user_attrs["size"] = disk_size_mib // 1024 operation_type = "allocateOnStorageContainer" elif device_type == "CDROM": operation_type = "emptyCdRom" else: LOG.error("Unknown device type") sys.exit(-1) # TODO add whitelisting from project via attached accounts if device_type == "DISK": if adapter_type == "SCSI": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_disk_scsi_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_disk_scsi_clone_from_pkg.py.jinja2" elif operation_type == "allocateOnStorageContainer": schema_file = "ahv_vm_disk_scsi_allocate_container.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) elif adapter_type == "PCI": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_disk_pci_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_disk_pci_clone_from_pkg.py.jinja2" elif operation_type == "allocateOnStorageContainer": schema_file = "ahv_vm_disk_pci_allocate_container.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) else: LOG.error("Unknown adapter type {}".format(adapter_type)) sys.exit(-1) else: # CD-ROM if adapter_type == "SATA": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_cdrom_sata_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_cdrom_sata_clone_from_pkg.py.jinja2" elif operation_type == "emptyCdRom": schema_file = "ahv_vm_cdrom_sata_empty_cdrom.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) elif adapter_type == "IDE": if operation_type == "cloneFromImageService": schema_file = "ahv_vm_cdrom_ide_clone_from_image.py.jinja2" elif operation_type == "cloneFromVMDiskPackage": schema_file = "ahv_vm_cdrom_ide_clone_from_pkg.py.jinja2" elif operation_type == "emptyCdRom": schema_file = "ahv_vm_cdrom_ide_empty_cdrom.py.jinja2" else: LOG.error("Unknown operation type {}".format(operation_type)) sys.exit(-1) else: LOG.error("Unknown adapter type {}".format(adapter_type)) sys.exit(-1) text = render_template(schema_file=schema_file, obj=user_attrs) return text.strip()
python
""" Class FuzzyData """ import numpy as np from kernelfuzzy.fuzzyset import FuzzySet from kernelfuzzy.memberships import gaussmf class FuzzyData: _data = None # I dont know if we want to keep this _fuzzydata = None _epistemic_values = None # only for epistemic fuzzy sets _target = None def __init__(self, data=None, target=None): if data is not None: self._data = data self._target = target self._data.columns = self._data.columns.str.strip().str.lower().str.replace(' ', '_').str.replace('(', '').str.replace(')', '') def quantile_fuzzification_classification(self): ''' Algorithm 1 from https://hal.archives-ouvertes.fr/hal-01438607/document ''' grouped = self._data.groupby([self._target]) self._epistemic_values = grouped.transform(lambda x: np.exp(-np.square(x - x.quantile(0.5)) / (np.abs(x.quantile(0.75) - x.quantile(0.25)) / ( 2 * np.sqrt(2 * np.log(2)))) ** 2 )) # join data and epistemistic values num_rows = self._epistemic_values.shape[0] num_cols = self._epistemic_values.shape[1] self._fuzzydata=np.asarray([[FuzzySet(elements=self._data.iloc[j, i], md=self._epistemic_values.iloc[j, i]) for i in range(num_cols)] for j in range(num_rows)]) def get_fuzzydata(self): return self._fuzzydata def get_data(self): return self._data def get_epistemic_values(self): return self._epistemic_values def get_target(self): return self._data[self._target] def show_class(self): """ Print in the stdout the all the contents of the class, for debugging """ print("(_data) \n", _data, "\n") print("(_fuzzydata) \n", _fuzzydata, "\n") print("(_epistemic_values) \n", _epistemic_values, "\n") print("(_target) \n", _target, "\n") # TOYS DATASETS @staticmethod def create_toy_fuzzy_dataset(num_rows=10, num_cols=2): ''' Creates a matrix of fuzzy datasets, each row represent a tuple of fuzzy sets each column is a variable. Each fuzzy set is a fuzzy set with gaussian membership function ''' return np.asarray([[FuzzySet(elements=np.random.uniform(0, 100, 2), mf=gaussmf, params=[np.mean(np.random.uniform(0, 100, 2)), np.std(np.random.uniform(0, 100, 2))]) for i in range(num_cols)] for j in range(num_rows)]) # TODO profile and compare with '''fuzzy_dataset_same = np.full((num_rows, num_cols), dtype=FuzzySet, fill_value=FuzzySet(elements=np.random.uniform(0, 100, 10), mf=gaussmf, params=[np.mean(np.random.uniform(0, 100, 10)), np.std(np.random.uniform(0, 100, 10))])) ''' # TODO better parsing
python
"""Convergence diagnostics and model validation""" import numpy as np from .stats import autocorr, autocov, statfunc from copy import copy __all__ = ['geweke', 'gelman_rubin', 'effective_n'] @statfunc def geweke(x, first=.1, last=.5, intervals=20): """Return z-scores for convergence diagnostics. Compare the mean of the first % of series with the mean of the last % of series. x is divided into a number of segments for which this difference is computed. If the series is converged, this score should oscillate between -1 and 1. Parameters ---------- x : array-like The trace of some stochastic parameter. first : float The fraction of series at the beginning of the trace. last : float The fraction of series at the end to be compared with the section at the beginning. intervals : int The number of segments. Returns ------- scores : list [[]] Return a list of [i, score], where i is the starting index for each interval and score the Geweke score on the interval. Notes ----- The Geweke score on some series x is computed by: .. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}} where :math:`E` stands for the mean, :math:`V` the variance, :math:`x_s` a section at the start of the series and :math:`x_e` a section at the end of the series. References ---------- Geweke (1992) """ if np.ndim(x) > 1: return [geweke(y, first, last, intervals) for y in np.transpose(x)] # Filter out invalid intervals if first + last >= 1: raise ValueError( "Invalid intervals for Geweke convergence analysis", (first, last)) # Initialize list of z-scores zscores = [] # Last index value end = len(x) - 1 # Calculate starting indices sindices = np.arange(0, end // 2, step=int((end / 2) / (intervals - 1))) # Loop over start indices for start in sindices: # Calculate slices first_slice = x[start: start + int(first * (end - start))] last_slice = x[int(end - last * (end - start)):] z = (first_slice.mean() - last_slice.mean()) z /= np.sqrt(first_slice.std() ** 2 + last_slice.std() ** 2) zscores.append([start, z]) if intervals is None: return np.array(zscores[0]) else: return np.array(zscores) def gelman_rubin(mtrace): """ Returns estimate of R for a set of traces. The Gelman-Rubin diagnostic tests for lack of convergence by comparing the variance between multiple chains to the variance within each chain. If convergence has been achieved, the between-chain and within-chain variances should be identical. To be most effective in detecting evidence for nonconvergence, each chain should have been initialized to starting values that are dispersed relative to the target distribution. Parameters ---------- mtrace : MultiTrace A MultiTrace object containing parallel traces (minimum 2) of one or more stochastic parameters. Returns ------- Rhat : dict Returns dictionary of the potential scale reduction factors, :math:`\hat{R}` Notes ----- The diagnostic is computed by: .. math:: \hat{R} = \frac{\hat{V}}{W} where :math:`W` is the within-chain variance and :math:`\hat{V}` is the posterior variance estimate for the pooled traces. This is the potential scale reduction factor, which converges to unity when each of the traces is a sample from the target posterior. Values greater than one indicate that one or more chains have not yet converged. References ---------- Brooks and Gelman (1998) Gelman and Rubin (1992)""" if mtrace.nchains < 2: raise ValueError( 'Gelman-Rubin diagnostic requires multiple chains of the same length.') def calc_rhat(x): try: # When the variable is multidimensional, this assignment will fail, triggering # a ValueError that will handle the multidimensional case m, n = x.shape # Calculate between-chain variance B = n * np.var(np.mean(x, axis=1), ddof=1) # Calculate within-chain variance W = np.mean(np.var(x, axis=1, ddof=1)) # Estimate of marginal posterior variance Vhat = W*(n - 1)/n + B/n return np.sqrt(Vhat/W) except ValueError: # Tricky transpose here, shifting the last dimension to the first rotated_indices = np.roll(np.arange(x.ndim), 1) # Now iterate over the dimension of the variable return np.squeeze([calc_rhat(xi) for xi in x.transpose(rotated_indices)]) Rhat = {} for var in mtrace.varnames: # Get all traces for var x = np.array(mtrace.get_values(var, combine=False)) try: Rhat[var] = calc_rhat(x) except ValueError: Rhat[var] = [calc_rhat(y.transpose()) for y in x.transpose()] return Rhat def effective_n(mtrace): """ Returns estimate of the effective sample size of a set of traces. Parameters ---------- mtrace : MultiTrace A MultiTrace object containing parallel traces (minimum 2) of one or more stochastic parameters. Returns ------- n_eff : float Return the effective sample size, :math:`\hat{n}_{eff}` Notes ----- The diagnostic is computed by: .. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t} where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}` is negative. References ---------- Gelman et al. (2014)""" if mtrace.nchains < 2: raise ValueError( 'Calculation of effective sample size requires multiple chains of the same length.') def calc_vhat(x): try: # When the variable is multidimensional, this assignment will fail, triggering # a ValueError that will handle the multidimensional case m, n = x.shape # Calculate between-chain variance B = n * np.var(np.mean(x, axis=1), ddof=1) # Calculate within-chain variance W = np.mean(np.var(x, axis=1, ddof=1)) # Estimate of marginal posterior variance Vhat = W*(n - 1)/n + B/n return Vhat except ValueError: # Tricky transpose here, shifting the last dimension to the first rotated_indices = np.roll(np.arange(x.ndim), 1) # Now iterate over the dimension of the variable return np.squeeze([calc_vhat(xi) for xi in x.transpose(rotated_indices)]) def calc_n_eff(x): m, n = x.shape negative_autocorr = False t = 1 Vhat = calc_vhat(x) variogram = lambda t: (sum(sum((x[j][i] - x[j][i-t])**2 for i in range(t,n)) for j in range(m)) / (m*(n - t))) rho = np.ones(n) # Iterate until the sum of consecutive estimates of autocorrelation is negative while not negative_autocorr and (t < n): rho[t] = 1. - variogram(t)/(2.*Vhat) if not t % 2: negative_autocorr = sum(rho[t-1:t+1]) < 0 t += 1 return int(m*n / (1. + 2*rho[1:t].sum())) n_eff = {} for var in mtrace.varnames: # Get all traces for var x = np.array(mtrace.get_values(var, combine=False)) try: n_eff[var] = calc_n_eff(x) except ValueError: n_eff[var] = [calc_n_eff(y.transpose()) for y in x.transpose()] return n_eff
python
import keras.backend as k from keras.models import load_model from keras.engine.topology import Input from keras.engine.training import Model from keras.layers.convolutional import Conv2D from keras.layers.core import Activation, Dense, Flatten from keras.layers.merge import Add from keras.layers.normalization import BatchNormalization from keras.optimizers import SGD from keras.losses import mean_squared_error from keras.regularizers import l2 def _build_residual_block(args, x): cnn_filter_num = args['cnn_filter_num'] cnn_filter_size = args['cnn_filter_size'] l2_reg = args['l2_reg'] in_x = x x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same", data_format="channels_first", kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization(axis=1)(x) x = Activation("relu")(x) x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same", data_format="channels_first", kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization(axis=1)(x) x = Add()([in_x, x]) x = Activation("relu")(x) return x def build_model(args): cnn_filter_num = args['cnn_filter_num'] cnn_filter_size = args['cnn_filter_size'] l2_reg = args['l2_reg'] in_x = x = Input(args['input_dim']) # (batch, channels, height, width) x = Conv2D(filters=cnn_filter_num, kernel_size=cnn_filter_size, padding="same", data_format="channels_first", kernel_regularizer=l2(l2_reg))(x) x = BatchNormalization(axis=1)(x) x = Activation("relu")(x) for _ in range(args['res_layer_num']): x = _build_residual_block(args, x) res_out = x # for policy output x = Conv2D(filters=2, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out) x = BatchNormalization(axis=1)(x) x = Activation("relu")(x) x = Flatten()(x) policy_out = Dense(args['policy_dim'], kernel_regularizer=l2(l2_reg), activation="softmax", name="policy")(x) # for value output x = Conv2D(filters=1, kernel_size=1, data_format="channels_first", kernel_regularizer=l2(l2_reg))(res_out) x = BatchNormalization(axis=1)(x) x = Activation("relu")(x) x = Flatten()(x) x = Dense(256, kernel_regularizer=l2(l2_reg), activation="relu")(x) value_out = Dense(1, kernel_regularizer=l2(l2_reg), activation="tanh", name="value")(x) return Model(in_x, [policy_out, value_out], name="model") def build(args): model = build_model(args) model.compile(loss=['categorical_crossentropy', 'mean_squared_error'], optimizer=SGD(lr=args['learning_rate'], momentum = args['momentum']), #optimizer='adam', loss_weights=[0.5, 0.5]) return model def load(filename): return load_model(filename)
python
#!/usr/bin/env python3 import scrape_common as sc print('TG') d = sc.download('https://www.tg.ch/news/fachdossier-coronavirus.html/10552') sc.timestamp() d = d.replace('&nbsp;', ' ') # 2020-03-25 """ <li>Anzahl bestätigter Fälle: 96</li> <p><em>Stand 25.3.20</em></p> """ # 2020-04-03 """ <div class="box box--blue"> <h2>Aktuelle Fallzahlen im Kanton Thurgau</h2> <ul> <li>Anzahl bestätigter Fälle: 198</li> <li>davon&nbsp;5 verstorben</li> </ul> <p><em>Stand 3.4.20</em></p> </div> """ print('Date and time:', sc.find(r'Stand\s*([^<]+)<', d)) print('Confirmed cases:', sc.find(r'(?:Anzahl)?\s*bestätigter\s*Fälle:?\s*([0-9]+)\b', d)) print('Deaths:', sc.find(r'\b([0-9]+)\s*verstorb', d) or sc.find(r'Verstorben:?\s*([0-9]+)', d)) print('Hospitalized:', sc.find(r'Hospitalisiert:\s*([0-9]+)', d)) print('ICU:', sc.find(r'davon auf der Intensivstation:\s+([0-9]+)', d))
python
from abc import ABCMeta, abstractmethod class RedditWikiClass(object): __metaclass__ = ABCMeta @abstractmethod def create_from_wiki(self, row, **kwargs): pass @abstractmethod def get_id(self): pass
python
""" Run training/inference in background process via CLI. """ import abc import attr import os import subprocess as sub import tempfile import time from datetime import datetime from typing import Any, Callable, Dict, List, Optional, Text, Tuple from PySide2 import QtWidgets from sleap import Labels, Video, LabeledFrame from sleap.gui.learning.configs import ConfigFileInfo from sleap.nn import training from sleap.nn.config import TrainingJobConfig SKIP_TRAINING = False @attr.s(auto_attribs=True) class ItemForInference(abc.ABC): """ Abstract base class for item on which we can run inference via CLI. Must have `path` and `cli_args` properties, used to build CLI call. """ @property @abc.abstractmethod def path(self) -> Text: pass @property @abc.abstractmethod def cli_args(self) -> List[Text]: pass @attr.s(auto_attribs=True) class VideoItemForInference(ItemForInference): """ Encapsulate data about video on which inference should run. This allows for inference on an arbitrary list of frames from video. Attributes: video: the :py:class:`Video` object (which already stores its own path) frames: list of frames for inference; if None, then all frames are used use_absolute_path: whether to use absolute path for inference cli call """ video: Video frames: Optional[List[int]] = None use_absolute_path: bool = False @property def path(self): if self.use_absolute_path: return os.path.abspath(self.video.filename) return self.video.filename @property def cli_args(self): arg_list = list() arg_list.append(self.path) # TODO: better support for video params if hasattr(self.video.backend, "dataset") and self.video.backend.dataset: arg_list.extend(("--video.dataset", self.video.backend.dataset)) if ( hasattr(self.video.backend, "input_format") and self.video.backend.input_format ): arg_list.extend(("--video.input_format", self.video.backend.input_format)) # -Y represents endpoint of [X, Y) range but inference cli expects # [X, Y-1] range (so add 1 since negative). frame_int_list = [i + 1 if i < 0 else i for i in self.frames] arg_list.extend(("--frames", ",".join(map(str, frame_int_list)))) return arg_list @attr.s(auto_attribs=True) class DatasetItemForInference(ItemForInference): """ Encapsulate data about frame selection based on dataset data. Attributes: labels_path: path to the saved :py:class:`Labels` dataset. frame_filter: which subset of frames to get from dataset, supports * "user" * "suggested" use_absolute_path: whether to use absolute path for inference cli call. """ labels_path: str frame_filter: str = "user" use_absolute_path: bool = False @property def path(self): if self.use_absolute_path: return os.path.abspath(self.labels_path) return self.labels_path @property def cli_args(self): args_list = ["--labels", self.path] if self.frame_filter == "user": args_list.append("--only-labeled-frames") elif self.frame_filter == "suggested": args_list.append("--only-suggested-frames") return args_list @attr.s(auto_attribs=True) class ItemsForInference: """Encapsulates list of items for inference.""" items: List[ItemForInference] total_frame_count: int def __len__(self): return len(self.items) @classmethod def from_video_frames_dict( cls, video_frames_dict: Dict[Video, List[int]], total_frame_count: int ): items = [] for video, frames in video_frames_dict.items(): if frames: items.append(VideoItemForInference(video=video, frames=frames)) return cls(items=items, total_frame_count=total_frame_count) @attr.s(auto_attribs=True) class InferenceTask: """Encapsulates all data needed for running inference via CLI.""" trained_job_paths: List[str] inference_params: Dict[str, Any] = attr.ib(default=attr.Factory(dict)) labels: Optional[Labels] = None labels_filename: Optional[str] = None results: List[LabeledFrame] = attr.ib(default=attr.Factory(list)) def make_predict_cli_call( self, item_for_inference: ItemForInference, output_path: Optional[str] = None ) -> List[Text]: """Makes list of CLI arguments needed for running inference.""" cli_args = ["sleap-track"] cli_args.extend(item_for_inference.cli_args) # TODO: encapsulate in inference item class if ( not self.trained_job_paths and "tracking.tracker" in self.inference_params and self.labels_filename ): # No models so we must want to re-track previous predictions cli_args.extend(("--labels", self.labels_filename)) # Make path where we'll save predictions (if not specified) if output_path is None: if self.labels_filename: # Make a predictions directory next to the labels dataset file predictions_dir = os.path.join( os.path.dirname(self.labels_filename), "predictions" ) os.makedirs(predictions_dir, exist_ok=True) else: # Dataset filename wasn't given, so save predictions in same dir # as the video predictions_dir = os.path.dirname(item_for_inference.video.filename) # Build filename with video name and timestamp timestamp = datetime.now().strftime("%y%m%d_%H%M%S") output_path = os.path.join( predictions_dir, f"{os.path.basename(item_for_inference.path)}.{timestamp}." "predictions.slp", ) for job_path in self.trained_job_paths: cli_args.extend(("-m", job_path)) optional_items_as_nones = ( "tracking.target_instance_count", "tracking.kf_init_frame_count", ) for key in optional_items_as_nones: if key in self.inference_params and self.inference_params[key] is None: del self.inference_params[key] # --tracking.kf_init_frame_count enables the kalman filter tracking # so if not set, then remove other (unused) args if "tracking.kf_init_frame_count" not in self.inference_params: if "tracking.kf_node_indices" in self.inference_params: del self.inference_params["tracking.kf_node_indices"] bool_items_as_ints = ( "tracking.pre_cull_to_target", "tracking.post_connect_single_breaks", ) for key in bool_items_as_ints: if key in self.inference_params: self.inference_params[key] = int(self.inference_params[key]) for key, val in self.inference_params.items(): if not key.startswith(("_", "outputs.", "model.", "data.")): cli_args.extend((f"--{key}", str(val))) cli_args.extend(("-o", output_path)) return cli_args, output_path def predict_subprocess( self, item_for_inference: ItemForInference, append_results: bool = False, waiting_callback: Optional[Callable] = None, ) -> Tuple[Text, bool]: """Runs inference in a subprocess.""" cli_args, output_path = self.make_predict_cli_call(item_for_inference) print("Command line call:") print(" \\\n".join(cli_args)) print() with sub.Popen(cli_args) as proc: while proc.poll() is None: if waiting_callback is not None: if waiting_callback() == -1: # -1 signals user cancellation return "", False time.sleep(0.1) print(f"Process return code: {proc.returncode}") success = proc.returncode == 0 if success and append_results: # Load frames from inference into results list new_inference_labels = Labels.load_file(output_path, match_to=self.labels) self.results.extend(new_inference_labels.labeled_frames) return output_path, success def merge_results(self): """Merges result frames into labels dataset.""" # Remove any frames without instances. new_lfs = list(filter(lambda lf: len(lf.instances), self.results)) new_labels = Labels(new_lfs) # Remove potentially conflicting predictions from the base dataset. self.labels.remove_predictions(new_labels=new_labels) # Merge predictions into current labels dataset. _, _, new_conflicts = Labels.complex_merge_between( self.labels, new_labels=new_labels, unify=False, # since we used match_to when loading predictions file ) # new predictions should replace old ones Labels.finish_complex_merge(self.labels, new_conflicts) def write_pipeline_files( output_dir: str, labels_filename: str, config_info_list: List[ConfigFileInfo], inference_params: Dict[str, Any], items_for_inference: ItemsForInference, ): """Writes the config files and scripts for manually running pipeline.""" # Use absolute path for all files that aren't contained in the output dir. labels_filename = os.path.abspath(labels_filename) # Preserve current working directory and change working directory to the # output directory, so we can set local paths relative to that. old_cwd = os.getcwd() os.chdir(output_dir) new_cfg_filenames = [] train_script = "#!/bin/bash\n" # Add head type to save path suffix to prevent overwriting. for cfg_info in config_info_list: if not cfg_info.dont_retrain: if ( cfg_info.config.outputs.run_name_suffix is not None and len(cfg_info.config.outputs.run_name_suffix) > 0 ): # Keep existing suffix if defined. suffix = "." + cfg_info.config.outputs.run_name_suffix else: suffix = "" # Add head name. suffix = "." + cfg_info.head_name + suffix # Update config. cfg_info.config.outputs.run_name_suffix = suffix for cfg_info in config_info_list: if cfg_info.dont_retrain: # Use full absolute path to already training model trained_path = os.path.normpath(os.path.join(old_cwd, cfg_info.path)) new_cfg_filenames.append(trained_path) else: # We're training this model, so save config file... # First we want to set the run folder so that we know where to find # the model after it's trained. # We'll use local path to the output directory (cwd). # Note that setup_new_run_folder does things relative to cwd which # is the main reason we're setting it to the output directory rather # than just using normpath. cfg_info.config.outputs.runs_folder = "" training.setup_new_run_folder(cfg_info.config.outputs) # Now we set the filename for the training config file new_cfg_filename = f"{cfg_info.head_name}.json" # Save the config file cfg_info.config.save_json(new_cfg_filename) # Keep track of the path where we'll find the trained model new_cfg_filenames.append(cfg_info.config.outputs.run_path) # Add a line to the script for training this model train_script += f"sleap-train {new_cfg_filename} {labels_filename}\n" # Write the script to train the models which need to be trained with open(os.path.join(output_dir, "train-script.sh"), "w") as f: f.write(train_script) # Build the script for running inference inference_script = "#!/bin/bash\n" # Object with settings for inference inference_task = InferenceTask( labels_filename=labels_filename, trained_job_paths=new_cfg_filenames, inference_params=inference_params, ) for item_for_inference in items_for_inference.items: # We want to save predictions in output dir so use local path prediction_output_path = ( f"{os.path.basename(item_for_inference.path)}.predictions.slp" ) # Use absolute path to video item_for_inference.use_absolute_path = True # Get list of cli args cli_args, _ = inference_task.make_predict_cli_call( item_for_inference=item_for_inference, output_path=prediction_output_path, ) # And join them into a single call to inference inference_script += " ".join(cli_args) + "\n" # And write it with open(os.path.join(output_dir, "inference-script.sh"), "w") as f: f.write(inference_script) # Restore the working directory os.chdir(old_cwd) def run_learning_pipeline( labels_filename: str, labels: Labels, config_info_list: List[ConfigFileInfo], inference_params: Dict[str, Any], items_for_inference: ItemsForInference, ) -> int: """Runs training (as needed) and inference. Args: labels_filename: Path to already saved current labels object. labels: The current labels object; results will be added to this. config_info_list: List of ConfigFileInfo with configs for training and inference. inference_params: Parameters to pass to inference. frames_to_predict: Dict that gives list of frame indices for each video. Returns: Number of new frames added to labels. """ save_viz = inference_params.get("_save_viz", False) # Train the TrainingJobs trained_job_paths = run_gui_training( labels_filename=labels_filename, labels=labels, config_info_list=config_info_list, gui=True, save_viz=save_viz, ) # Check that all the models were trained if None in trained_job_paths.values(): return -1 inference_task = InferenceTask( labels=labels, labels_filename=labels_filename, trained_job_paths=list(trained_job_paths.values()), inference_params=inference_params, ) # Run the Predictor for suggested frames new_labeled_frame_count = run_gui_inference(inference_task, items_for_inference) return new_labeled_frame_count def run_gui_training( labels_filename: str, labels: Labels, config_info_list: List[ConfigFileInfo], gui: bool = True, save_viz: bool = False, ) -> Dict[Text, Text]: """ Runs training for each training job. Args: labels: Labels object from which we'll get training data. config_info_list: List of ConfigFileInfo with configs for training. gui: Whether to show gui windows and process gui events. save_viz: Whether to save visualizations from training. Returns: Dictionary, keys are head name, values are path to trained config. """ trained_job_paths = dict() if gui: from sleap.nn.monitor import LossViewer from sleap.gui.widgets.imagedir import QtImageDirectoryWidget # open training monitor window win = LossViewer() win.resize(600, 400) win.show() for config_info in config_info_list: if config_info.dont_retrain: if not config_info.has_trained_model: raise ValueError( "Config is set to not retrain but no trained model found: " f"{config_info.path}" ) print( f"Using already trained model for {config_info.head_name}: " f"{config_info.path}" ) trained_job_paths[config_info.head_name] = config_info.path else: job = config_info.config model_type = config_info.head_name # We'll pass along the list of paths we actually used for loading # the videos so that we don't have to rely on the paths currently # saved in the labels file for finding videos. video_path_list = [video.filename for video in labels.videos] # Update save dir and run name for job we're about to train # so we have access to them here (rather than letting # train_subprocess update them). # training.Trainer.set_run_name(job, labels_filename) job.outputs.runs_folder = os.path.join( os.path.dirname(labels_filename), "models" ) training.setup_new_run_folder( job.outputs, base_run_name=f"{model_type}.{len(labels)}" ) if gui: print("Resetting monitor window.") win.reset(what=str(model_type)) win.setWindowTitle(f"Training Model - {str(model_type)}") win.set_message(f"Preparing to run training...") if save_viz: viz_window = QtImageDirectoryWidget.make_training_vizualizer( job.outputs.run_path ) viz_window.move(win.x() + win.width() + 20, win.y()) win.on_epoch.connect(viz_window.poll) print(f"Start training {str(model_type)}...") def waiting(): if gui: QtWidgets.QApplication.instance().processEvents() # Run training trained_job_path, success = train_subprocess( job_config=job, labels_filename=labels_filename, video_paths=video_path_list, waiting_callback=waiting, save_viz=save_viz, ) if success: # get the path to the resulting TrainingJob file trained_job_paths[model_type] = trained_job_path print(f"Finished training {str(model_type)}.") else: if gui: win.close() QtWidgets.QMessageBox( text=f"An error occurred while training {str(model_type)}. " "Your command line terminal may have more information about " "the error." ).exec_() trained_job_paths[model_type] = None if gui: # close training monitor window win.close() return trained_job_paths def run_gui_inference( inference_task: InferenceTask, items_for_inference: ItemsForInference, gui: bool = True, ) -> int: """Run inference on specified frames using models from training_jobs. Args: inference_task: Encapsulates information needed for running inference, such as labels dataset and models. items_for_inference: Encapsulates information about the videos (etc.) on which we're running inference. gui: Whether to show gui windows and process gui events. Returns: Number of new frames added to labels. """ if gui: # show message while running inference progress = QtWidgets.QProgressDialog( f"Running inference on {len(items_for_inference)} videos...", "Cancel", 0, len(items_for_inference), ) progress.show() QtWidgets.QApplication.instance().processEvents() # Make callback to process events while running inference def waiting(done_count): if gui: QtWidgets.QApplication.instance().processEvents() progress.setValue(done_count) if progress.wasCanceled(): return -1 for i, item_for_inference in enumerate(items_for_inference.items): # Run inference for desired frames in this video predictions_path, success = inference_task.predict_subprocess( item_for_inference, append_results=True, waiting_callback=lambda: waiting(i) ) if not success: if gui: progress.close() QtWidgets.QMessageBox( text="An error occcured during inference. Your command line " "terminal may have more information about the error." ).exec_() return -1 inference_task.merge_results() # close message window if gui: progress.close() # return total_new_lf_count return len(inference_task.results) def train_subprocess( job_config: TrainingJobConfig, labels_filename: str, video_paths: Optional[List[Text]] = None, waiting_callback: Optional[Callable] = None, save_viz: bool = False, ): """Runs training inside subprocess.""" # run_name = job_config.outputs.run_name run_path = job_config.outputs.run_path success = False with tempfile.TemporaryDirectory() as temp_dir: # Write a temporary file of the TrainingJob so that we can respect # any changed made to the job attributes after it was loaded. temp_filename = datetime.now().strftime("%y%m%d_%H%M%S") + "_training_job.json" training_job_path = os.path.join(temp_dir, temp_filename) job_config.save_json(training_job_path) # Build CLI arguments for training cli_args = [ "sleap-train", training_job_path, labels_filename, "--zmq", ] if save_viz: cli_args.append("--save_viz") # Use cli arg since cli ignores setting in config if job_config.outputs.tensorboard.write_logs: cli_args.append("--tensorboard") # Add list of video paths so we can find video even if paths in saved # labels dataset file are incorrect. if video_paths: cli_args.extend(("--video-paths", ",".join(video_paths))) print(cli_args) if not SKIP_TRAINING: # Run training in a subprocess with sub.Popen(cli_args) as proc: # Wait till training is done, calling a callback if given. while proc.poll() is None: if waiting_callback is not None: if waiting_callback() == -1: # -1 signals user cancellation return "", False time.sleep(0.1) success = proc.returncode == 0 print("Run Path:", run_path) return run_path, success
python
import requests from xml.etree import ElementTree import collections from dateutil.parser import parse Episode = collections.namedtuple('Episode', 'title link pubdate') def main(): dom = get_xml_dom('https://talkpython.fm/rss') episodes = get_episodes(dom) for idx, e in enumerate(episodes[:5]): print('{}. {}'.format(idx, e.title)) def get_xml_dom(url): resp = requests.get(url) if resp.status_code != 200: return None dom = ElementTree.fromstring(resp.text) return dom def get_episodes(dom): item_nodes = dom.findall('channel/item') episodes = [ Episode( n.find('title').text, n.find('link').text, parse(n.find('pubDate').text) ) for n in item_nodes ] return sorted(episodes, key=lambda e: e.pubdate) if __name__ == '__main__': main()
python
from typing import Any, Dict, Generic, Optional, Type, Union from flair.data import Corpus from numpy import typing as nptyping from typing_extensions import Literal from embeddings.data.data_loader import ( ConllFlairCorpusDataLoader, DataLoader, PickleFlairCorpusDataLoader, ) from embeddings.data.dataset import Data, Dataset, LocalDataset from embeddings.embedding.auto_flair import ( AutoFlairDocumentPoolEmbedding, AutoFlairWordEmbedding, DocumentEmbedding, ) from embeddings.embedding.flair_embedding import FlairDocumentPoolEmbedding from embeddings.evaluator.evaluator import Evaluator from embeddings.evaluator.sequence_labeling_evaluator import SequenceLabelingEvaluator from embeddings.evaluator.text_classification_evaluator import TextClassificationEvaluator from embeddings.model.flair_model import FlairModel from embeddings.model.model import Model from embeddings.pipeline.pipeline import Pipeline from embeddings.pipeline.standard_pipeline import EvaluationResult, LoaderResult, ModelResult from embeddings.task.flair_task.sequence_labeling import SequenceLabeling from embeddings.task.flair_task.text_classification import TextClassification from embeddings.task.flair_task.text_pair_classification import TextPairClassification from embeddings.utils.json_dict_persister import JsonPersister class ModelEvaluationPipeline( Pipeline[EvaluationResult], Generic[Data, LoaderResult, ModelResult, EvaluationResult], ): def __init__( self, dataset: Dataset[Data], data_loader: DataLoader[Data, LoaderResult], model: Model[LoaderResult, ModelResult], evaluator: Evaluator[ModelResult, EvaluationResult], ) -> None: self.dataset = dataset self.data_loader = data_loader self.model = model self.evaluator = evaluator def run(self) -> EvaluationResult: loaded_data = self.data_loader.load(self.dataset) model_result = self.model.execute(loaded_data) return self.evaluator.evaluate(model_result) class FlairTextClassificationEvaluationPipeline( ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]] ): def __init__( self, dataset_path: str, embedding_name: str, output_path: str, document_embedding_cls: Union[str, Type[DocumentEmbedding]] = FlairDocumentPoolEmbedding, persist_path: Optional[str] = None, predict_subset: Literal["dev", "test"] = "test", task_model_kwargs: Optional[Dict[str, Any]] = None, task_train_kwargs: Optional[Dict[str, Any]] = None, load_model_kwargs: Optional[Dict[str, Any]] = None, ): load_model_kwargs = {} if load_model_kwargs is None else load_model_kwargs dataset = LocalDataset(dataset=dataset_path) data_loader = PickleFlairCorpusDataLoader() embedding = AutoFlairDocumentPoolEmbedding.from_hub( repo_id=embedding_name, document_embedding_cls=document_embedding_cls, **load_model_kwargs ) task = TextClassification( output_path=output_path, task_train_kwargs=task_train_kwargs, task_model_kwargs=task_model_kwargs, ) model = FlairModel(embedding=embedding, task=task, predict_subset=predict_subset) evaluator: Evaluator[Dict[str, Any], Dict[str, Any]] = TextClassificationEvaluator() if persist_path is not None: evaluator = evaluator.persisting(JsonPersister(path=persist_path)) super().__init__(dataset, data_loader, model, evaluator) class FlairTextPairClassificationEvaluationPipeline( ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]] ): def __init__( self, dataset_path: str, embedding_name: str, output_path: str, document_embedding_cls: Union[str, Type[DocumentEmbedding]] = FlairDocumentPoolEmbedding, persist_path: Optional[str] = None, predict_subset: Literal["dev", "test"] = "test", task_model_kwargs: Optional[Dict[str, Any]] = None, task_train_kwargs: Optional[Dict[str, Any]] = None, load_model_kwargs: Optional[Dict[str, Any]] = None, ): load_model_kwargs = {} if load_model_kwargs is None else load_model_kwargs dataset = LocalDataset(dataset=dataset_path) data_loader = PickleFlairCorpusDataLoader() embedding = AutoFlairDocumentPoolEmbedding.from_hub( repo_id=embedding_name, document_embedding_cls=document_embedding_cls, **load_model_kwargs ) task = TextPairClassification( output_path=output_path, task_train_kwargs=task_train_kwargs, task_model_kwargs=task_model_kwargs, ) model = FlairModel(embedding=embedding, task=task, predict_subset=predict_subset) evaluator: Evaluator[Dict[str, Any], Dict[str, Any]] = TextClassificationEvaluator() if persist_path: evaluator = evaluator.persisting(JsonPersister(path=persist_path)) super().__init__(dataset, data_loader, model, evaluator) class FlairSequenceLabelingEvaluationPipeline( ModelEvaluationPipeline[str, Corpus, Dict[str, nptyping.NDArray[Any]], Dict[str, Any]] ): DEFAULT_EVAL_MODE = SequenceLabelingEvaluator.EvaluationMode.CONLL def __init__( self, dataset_path: str, embedding_name: str, output_path: str, hidden_size: int, evaluation_mode: SequenceLabelingEvaluator.EvaluationMode = DEFAULT_EVAL_MODE, tagging_scheme: Optional[SequenceLabelingEvaluator.TaggingScheme] = None, persist_path: Optional[str] = None, predict_subset: Literal["dev", "test"] = "test", task_model_kwargs: Optional[Dict[str, Any]] = None, task_train_kwargs: Optional[Dict[str, Any]] = None, word_embedding_kwargs: Optional[Dict[str, Any]] = None, ): dataset = LocalDataset(dataset=dataset_path) data_loader = ConllFlairCorpusDataLoader() embedding = AutoFlairWordEmbedding.from_hub(embedding_name, kwargs=word_embedding_kwargs) task = SequenceLabeling( output_path=output_path, hidden_size=hidden_size, task_train_kwargs=task_train_kwargs, task_model_kwargs=task_model_kwargs, ) model = FlairModel(embedding=embedding, task=task, predict_subset=predict_subset) evaluator: Evaluator[Dict[str, Any], Dict[str, Any]] = SequenceLabelingEvaluator( evaluation_mode=evaluation_mode, tagging_scheme=tagging_scheme ) if persist_path: evaluator = evaluator.persisting(JsonPersister(path=persist_path)) super().__init__(dataset, data_loader, model, evaluator)
python
# This has been shanked off of the Electrum codebase in order to get # pubkey_to_address(), which supports bech32 addresses. It is MIT licensed, but # only pieces of it are copied and assembled here. import hashlib from enum import IntEnum from typing import Union from electrum import constants from electrum import segwit_addr class opcodes(IntEnum): # push value OP_0 = 0x00 OP_FALSE = OP_0 OP_PUSHDATA1 = 0x4c OP_PUSHDATA2 = 0x4d OP_PUSHDATA4 = 0x4e OP_1NEGATE = 0x4f OP_RESERVED = 0x50 OP_1 = 0x51 OP_TRUE = OP_1 OP_2 = 0x52 OP_3 = 0x53 OP_4 = 0x54 OP_5 = 0x55 OP_6 = 0x56 OP_7 = 0x57 OP_8 = 0x58 OP_9 = 0x59 OP_10 = 0x5a OP_11 = 0x5b OP_12 = 0x5c OP_13 = 0x5d OP_14 = 0x5e OP_15 = 0x5f OP_16 = 0x60 # control OP_NOP = 0x61 OP_VER = 0x62 OP_IF = 0x63 OP_NOTIF = 0x64 OP_VERIF = 0x65 OP_VERNOTIF = 0x66 OP_ELSE = 0x67 OP_ENDIF = 0x68 OP_VERIFY = 0x69 OP_RETURN = 0x6a # stack ops OP_TOALTSTACK = 0x6b OP_FROMALTSTACK = 0x6c OP_2DROP = 0x6d OP_2DUP = 0x6e OP_3DUP = 0x6f OP_2OVER = 0x70 OP_2ROT = 0x71 OP_2SWAP = 0x72 OP_IFDUP = 0x73 OP_DEPTH = 0x74 OP_DROP = 0x75 OP_DUP = 0x76 OP_NIP = 0x77 OP_OVER = 0x78 OP_PICK = 0x79 OP_ROLL = 0x7a OP_ROT = 0x7b OP_SWAP = 0x7c OP_TUCK = 0x7d # splice ops OP_CAT = 0x7e OP_SUBSTR = 0x7f OP_LEFT = 0x80 OP_RIGHT = 0x81 OP_SIZE = 0x82 # bit logic OP_INVERT = 0x83 OP_AND = 0x84 OP_OR = 0x85 OP_XOR = 0x86 OP_EQUAL = 0x87 OP_EQUALVERIFY = 0x88 OP_RESERVED1 = 0x89 OP_RESERVED2 = 0x8a # numeric OP_1ADD = 0x8b OP_1SUB = 0x8c OP_2MUL = 0x8d OP_2DIV = 0x8e OP_NEGATE = 0x8f OP_ABS = 0x90 OP_NOT = 0x91 OP_0NOTEQUAL = 0x92 OP_ADD = 0x93 OP_SUB = 0x94 OP_MUL = 0x95 OP_DIV = 0x96 OP_MOD = 0x97 OP_LSHIFT = 0x98 OP_RSHIFT = 0x99 OP_BOOLAND = 0x9a OP_BOOLOR = 0x9b OP_NUMEQUAL = 0x9c OP_NUMEQUALVERIFY = 0x9d OP_NUMNOTEQUAL = 0x9e OP_LESSTHAN = 0x9f OP_GREATERTHAN = 0xa0 OP_LESSTHANOREQUAL = 0xa1 OP_GREATERTHANOREQUAL = 0xa2 OP_MIN = 0xa3 OP_MAX = 0xa4 OP_WITHIN = 0xa5 # crypto OP_RIPEMD160 = 0xa6 OP_SHA1 = 0xa7 OP_SHA256 = 0xa8 OP_HASH160 = 0xa9 OP_HASH256 = 0xaa OP_CODESEPARATOR = 0xab OP_CHECKSIG = 0xac OP_CHECKSIGVERIFY = 0xad OP_CHECKMULTISIG = 0xae OP_CHECKMULTISIGVERIFY = 0xaf # expansion OP_NOP1 = 0xb0 OP_CHECKLOCKTIMEVERIFY = 0xb1 OP_NOP2 = OP_CHECKLOCKTIMEVERIFY OP_CHECKSEQUENCEVERIFY = 0xb2 OP_NOP3 = OP_CHECKSEQUENCEVERIFY OP_NOP4 = 0xb3 OP_NOP5 = 0xb4 OP_NOP6 = 0xb5 OP_NOP7 = 0xb6 OP_NOP8 = 0xb7 OP_NOP9 = 0xb8 OP_NOP10 = 0xb9 OP_INVALIDOPCODE = 0xff def hex(self) -> str: return bytes([self]).hex() ############################################################################## bfh = bytes.fromhex def bh2u(x: bytes) -> str: """ str with hex representation of a bytes-like object >>> x = bytes((1, 2, 10)) >>> bh2u(x) '01020A' """ return x.hex() ############################################################################## def to_bytes(something, encoding='utf8') -> bytes: """ cast string to bytes() like object, but for python2 support it's bytearray copy """ if isinstance(something, bytes): return something if isinstance(something, str): return something.encode(encoding) elif isinstance(something, bytearray): return bytes(something) else: raise TypeError("Not a string or bytes like object") def sha256(x: Union[bytes, str]) -> bytes: x = to_bytes(x, 'utf8') return bytes(hashlib.sha256(x).digest()) def sha256d(x: Union[bytes, str]) -> bytes: x = to_bytes(x, 'utf8') out = bytes(sha256(sha256(x))) return out def hash_160(x: bytes) -> bytes: md = hashlib.new('ripemd160') md.update(sha256(x)) return md.digest() ############################################################################## def rev_hex(s: str) -> str: return bh2u(bfh(s)[::-1]) def int_to_hex(i: int, length: int=1) -> str: """Converts int to little-endian hex string. `length` is the number of bytes available """ if not isinstance(i, int): raise TypeError('{} instead of int'.format(i)) range_size = pow(256, length) if i < -(range_size//2) or i >= range_size: raise OverflowError('cannot convert int {} to hex ({} bytes)'.format(i, length)) if i < 0: # two's complement i = range_size + i s = hex(i)[2:].rstrip('L') s = "0"*(2*length - len(s)) + s return rev_hex(s) ############################################################################## def assert_bytes(*args): """ porting helper, assert args type """ try: for x in args: assert isinstance(x, (bytes, bytearray)) except: print('assert bytes failed', list(map(type, args))) raise ############################################################################## def _op_push(i: int) -> str: if i < opcodes.OP_PUSHDATA1: return int_to_hex(i) elif i <= 0xff: return opcodes.OP_PUSHDATA1.hex() + int_to_hex(i, 1) elif i <= 0xffff: return opcodes.OP_PUSHDATA2.hex() + int_to_hex(i, 2) else: return opcodes.OP_PUSHDATA4.hex() + int_to_hex(i, 4) def push_script(data: str) -> str: """Returns pushed data to the script, automatically choosing canonical opcodes depending on the length of the data. hex -> hex ported from https://github.com/btcsuite/btcd/blob/fdc2bc867bda6b351191b5872d2da8270df00d13/txscript/scriptbuilder.go#L128 """ data = bfh(data) data_len = len(data) # "small integer" opcodes if data_len == 0 or data_len == 1 and data[0] == 0: return opcodes.OP_0.hex() elif data_len == 1 and data[0] <= 16: return bh2u(bytes([opcodes.OP_1 - 1 + data[0]])) elif data_len == 1 and data[0] == 0x81: return opcodes.OP_1NEGATE.hex() return _op_push(data_len) + bh2u(data) __b58chars = b'123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' assert len(__b58chars) == 58 __b43chars = b'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:' assert len(__b43chars) == 43 def base_encode(v: bytes, base: int) -> str: """ encode v, which is a string of bytes, to base58.""" assert_bytes(v) if base not in (58, 43): raise ValueError('not supported base: {}'.format(base)) chars = __b58chars if base == 43: chars = __b43chars long_value = 0 for (i, c) in enumerate(v[::-1]): long_value += (256**i) * c result = bytearray() while long_value >= base: div, mod = divmod(long_value, base) result.append(chars[mod]) long_value = div result.append(chars[long_value]) # Bitcoin does a little leading-zero-compression: # leading 0-bytes in the input become leading-1s nPad = 0 for c in v: if c == 0x00: nPad += 1 else: break result.extend([chars[0]] * nPad) result.reverse() return result.decode('ascii') ############################################################################### def hash160_to_b58_address(h160: bytes, addrtype: int) -> str: s = bytes([addrtype]) + h160 s = s + sha256d(s)[0:4] return base_encode(s, base=58) def hash160_to_p2pkh(h160: bytes, *, net=None) -> str: if net is None: net = constants.net return hash160_to_b58_address(h160, net.ADDRTYPE_P2PKH) def hash160_to_p2sh(h160: bytes, *, net=None) -> str: if net is None: net = constants.net return hash160_to_b58_address(h160, net.ADDRTYPE_P2SH) def public_key_to_p2pkh(public_key: bytes, *, net=None) -> str: if net is None: net = constants.net return hash160_to_p2pkh(hash_160(public_key), net=net) def hash_to_segwit_addr(h: bytes, witver: int, *, net=None) -> str: if net is None: net = constants.net return segwit_addr.encode(net.SEGWIT_HRP, witver, h) def public_key_to_p2wpkh(public_key: bytes, *, net=None) -> str: if net is None: net = constants.net return hash_to_segwit_addr(hash_160(public_key), witver=0, net=net) def p2wpkh_nested_script(pubkey: str) -> str: pkh = bh2u(hash_160(bfh(pubkey))) return '00' + push_script(pkh) ############################################################################### def pubkey_to_address(txin_type: str, pubkey: str, *, net=None) -> str: if net is None: net = constants.net if txin_type == 'p2pkh': return public_key_to_p2pkh(bfh(pubkey), net=net) elif txin_type == 'p2wpkh': return public_key_to_p2wpkh(bfh(pubkey), net=net) elif txin_type == 'p2wpkh-p2sh': scriptSig = p2wpkh_nested_script(pubkey) return hash160_to_p2sh(hash_160(bfh(scriptSig)), net=net) else: raise NotImplementedError(txin_type)
python
import csv import urllib import subprocess import sys import os from datetime import datetime, timedelta # Get args if str(sys.argv[1]).isalnum(): source = sys.argv[1] sources = {'comb' : 'comb_ats', 'jpl' : 'jpl_ats', 'sopac' : 'sopac_ats'} src = str(sources[source]) if os.path.exists(src+'.json'): mtime = datetime.fromtimestamp(os.path.getmtime(src+'.json')) diff = datetime.now() - mtime if diff.days == 0: with open(src+'.json', 'r') as out: print(out.read()) sys.exit() url = 'http://geoapp02.ucsd.edu:8080/gpseDB/vel?op=getSingleVelNEUFile&coord='+src+'&site_list=all&out=GMT&fil=unf' try: reader = csv.reader(urllib.urlopen(url), delimiter=' ') except Exception as e: if os.path.exists(src+'.json'): with open(src+'.json', 'r') as out: print(out.read()) sys.exit() with open(src+'.csv', 'wb') as csvfile: wtr = csv.writer( csvfile ) wtr.writerow( ('site', 'x', 'y', 'e_vel', 'n_vel', 'u_vel' )) try: for row in reader: if "'Error'" in str(row): # use the old version if there is an error with open(src+'.json', 'r') as out: print(out.read()) site = row[7] if float(row[0]) > 180: x = float(row[0]) - 360 else: x = float(row[0]) y = float(row[1]) # convert from m to mm e_vel = float(row[2]) * 1000 n_vel = float(row[3]) * 1000 u_vel = float(row[8]) * 1000 wtr.writerow( (site, str(x), str(y), str(e_vel), str(n_vel), str(u_vel)) ) except csv.Error as e: sys.exit('url %s, line %d: %s' % (url, reader.line_num, e)) ogr2ogr_command_list = ["ogr2ogr", "-f", "geojson", "-oo", "X_POSSIBLE_NAMES=x", "-oo", "Y_POSSIBLE_NAMES=y", src+".json", src+".csv"] process = subprocess.Popen(ogr2ogr_command_list,stdout=subprocess.PIPE,stderr=subprocess.PIPE) process.wait() for output in process.stdout: print(output) for error in process.stderr: print(error) with open(src+'.json', 'r') as out: print(out.read()) sys.exit()
python
# manually build and launch your instances # remember that the ip field deals with a private ip def _get_parameter(node_id, private_ip, min_key, max_key): p = {"id": node_id, "ip": private_ip, "min_key": min_key, "max_key": max_key} return p def create_instances_parameters(): """ first = _get_parameter(node_id="1", private_ip="172.31.20.1", min_key="0", max_key="19") # parameter["master_of_master"] = first second = _get_parameter(node_id="2", private_ip="172.31.20.2", min_key="20", max_key="39") # parameter["master"] = second third = _get_parameter(node_id="3", private_ip="172.31.20.3", min_key="40", max_key="59") # parameter["myself"] = third fourth = _get_parameter(node_id="4", private_ip="172.31.20.4", min_key="60", max_key="79") # parameter["slave"] = fourth fifth = _get_parameter(node_id="5", private_ip="172.31.20.5", min_key="80", max_key="99") # parameter["slave_of_slave"] = fifth """ n = 5 key_int = (2**32-1)/n first = _get_parameter(node_id="1", private_ip="172.31.20.1", min_key="0", max_key=str(key_int-1)) # parameter["master_of_master"] = first second = _get_parameter(node_id="2", private_ip="172.31.20.2", min_key=str(key_int), max_key=str(2*key_int-1)) # parameter["master"] = second third = _get_parameter(node_id="3", private_ip="172.31.20.3", min_key=str(2*key_int), max_key=str(3*key_int-1)) # parameter["myself"] = third fourth = _get_parameter(node_id="4", private_ip="172.31.20.4", min_key=str(3*key_int), max_key=str(4*key_int-1)) # parameter["slave"] = fourth fifth = _get_parameter(node_id="5", private_ip="172.31.20.5", min_key=str(4*key_int), max_key=str(5*key_int-1)) list_parameters = [first, second, third, fourth, fifth] list_len = len(list_parameters) result = [] for l in xrange(list_len): parameter = {"master_of_master": list_parameters[l % list_len], "master": list_parameters[(l + 1) % list_len], "myself": list_parameters[(l + 2) % list_len], "slave": list_parameters[(l + 3) % list_len], "slave_of_slave": list_parameters[(l + 4) % list_len]} # print '-------------------' # print list_parameters[l % list_len]['id'] # print list_parameters[(l+1) % list_len]['id'] # print list_parameters[(l+2) % list_len]['id'] # print list_parameters[(l+3) % list_len]['id'] # print list_parameters[(l+4) % list_len]['id'] # print '-------------------' # print '-------------------' # for k, v in parameter.iteritems(): # print "{}, {}".format(k, v) # print '-------------------' result.append(parameter) return result def create_specific_instance_parameters(specific_nodes): list_parameters = [] for k in specific_nodes: list_parameters.append(_get_parameter(node_id=k.id, private_ip=k.ip, min_key=k.min_key, max_key=k.max_key)) parameter = {"master_of_master": list_parameters[0], "master": list_parameters[1], "myself": list_parameters[2], "slave": list_parameters[3], "slave_of_slave": list_parameters[4]} # print '-------------------' # print list_parameters[l % list_len]['id'] # print list_parameters[(l+1) % list_len]['id'] # print list_parameters[(l+2) % list_len]['id'] # print list_parameters[(l+3) % list_len]['id'] # print list_parameters[(l+4) % list_len]['id'] # print '-------------------' # print '-------------------' # for k, v in parameter.iteritems(): # print "{}, {}".format(k, v) # print '-------------------' return parameter def launchApplicationAWS(settings): from CellCycle.AWS.AWSlib import startInstanceAWS from start import loadLogger # necessary to launch aws instances logger = loadLogger(settings) # every instance has an element params_list = create_instances_parameters() # default vpc (virtual private network) has a class of 172.31.0.0\16 # so we can create private ip from 172.31.0.1 to 172.31.255.254 # 172.31.1.0\8 is reserved # I suggest to use (just for initial nodes) 172.31.20.0\8 # for example, create 3 nodes: # 172.31.20.1 # 172.31.20.2 # 172.31.20.3 # only debug # from CellCycle.ChainModule.Generator import Generator # from json import dumps,loads # generator = Generator(logger=logger, settings=settings, json_arg=loads(dumps(params_list))) # generator.create_process_environment() # for ins in params_list: # print "######## NEW NODE #######" # for k, v in ins.iteritems(): # print "{}, {}".format(k, v) # print "#########################" # launch for ins in params_list: startInstanceAWS(settings, logger, ins, ins["myself"]["ip"]) if __name__ == "__main__": import sys from start import loadSettings if len(sys.argv) == 1: settings = loadSettings(currentProfile='default') else: currentProfile = {} currentProfile["profile_name"] = sys.argv[1] currentProfile["key_pair"] = sys.argv[2] currentProfile["branch"] = sys.argv[3] settings = loadSettings(currentProfile) launchApplicationAWS(settings)
python
from typing import List, Union, Callable, Tuple from thinc.types import Ints2d from thinc.api import Model, registry from ..tokens import Doc @registry.layers("spacy.FeatureExtractor.v1") def FeatureExtractor(columns: List[Union[int, str]]) -> Model[List[Doc], List[Ints2d]]: return Model("extract_features", forward, attrs={"columns": columns}) def forward( model: Model[List[Doc], List[Ints2d]], docs, is_train: bool ) -> Tuple[List[Ints2d], Callable]: columns = model.attrs["columns"] features: List[Ints2d] = [] for doc in docs: if hasattr(doc, "to_array"): attrs = doc.to_array(columns) else: attrs = doc.doc.to_array(columns)[doc.start : doc.end] if attrs.ndim == 1: attrs = attrs.reshape((attrs.shape[0], 1)) features.append(model.ops.asarray2i(attrs, dtype="uint64")) backprop: Callable[[List[Ints2d]], List] = lambda d_features: [] return features, backprop
python
from sqlalchemy import Column, Integer, String from sqlalchemy.orm.exc import NoResultFound from modules.db import BaseModel, Model, session_factory class Session(BaseModel, Model): __tablename__ = 'bookmark_sessions' id = Column(Integer, primary_key=True) account_id = Column(Integer) session_key = Column(String(255)) ip_address = Column(String(100)) @staticmethod def is_valid(key): with session_factory() as sess: try: sess.query(Session).filter(session_key=key).one() return True except NoResultFound: return False
python
############### Our Blackjack House Rules ##################### ## The deck is unlimited in size. ## There are no jokers. ## The Jack/Queen/King all count as 10. ## The the Ace can count as 11 or 1. ## Use the following list as the deck of cards: ## cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] ## The cards in the list have equal probability of being drawn. ## Cards are not removed from the deck as they are drawn. ## The computer is the dealer. import random # Returns a random card from the deck def deal_card(): cards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10] random_card = random.choice(cards) return random_card # Returns the score of the hand def calculate_score(cards): total = 0 for card in cards: total += card # If total exceeds 21 and there is an Ace, count it as 1 instead of 11 #if total > 21 and 11 in cards: return total # Main play_again = input("Do you want to play a game of Blackjack? y/n: ").lower() if play_again.startswith('y'): user_cards = [11, 6] computer_cards = [11, 8] #for card in range(2): # user_cards.append(deal_card()) # computer_cards.append(deal_card()) print(user_cards) print(computer_cards) user_score = calculate_score(user_cards) computer_score = calculate_score(computer_cards) print(f"user score is {user_score}") print(f"computer score is {_score}") else: print("Thanks for playing!")
python
''' ClearSky Version 2 Created by Marissa Klein, Wellesley College 2022 Intended use is getting evening forecast for the next week ''' import requests import json from geopy.geocoders import Nominatim class ClearSky: def __init__(self): pass def locationGet(self,loc): ''' Gets latitude and longitude of a specific location. args: loc(str) must be a valid city and state/country raises: none returns: latitude and longitude as a tuple ''' self.loc = loc app = Nominatim(user_agent="ClearSky") location = app.geocode(loc).raw latitude = location['lat'] longitude = location['lon'] location = (latitude, longitude) return location def URLRet(self,loc): ''' Retrieves proper NWS API URL. args: loc(str) must be a valid city and state/country raises: none returns: NWS weather JSON data for a specific location ''' self.loc = loc coords = self.locationGet(loc) lat = coords[0] long = coords[1] #First API Call response = requests.get('https://api.weather.gov/points/'+lat+','+long) json_data = json.loads(response.text) #Second API Call url = json_data['properties']['forecast'] forecast = requests.get(url) forecast_data = json.loads(forecast.text) return forecast_data def getForecast(self,loc): ''' Gets forecast for the next week's evenings. args: loc(str) must be a valid city and state/country raises: none returns: Detailed forecast of the next seven nights. ''' self.loc = loc forecast = self.URLRet(loc) nights = [] nightFor = [] data_len=len(forecast['properties']['periods']) #Finds the data for nights only for x in range(data_len): keyWord = forecast['properties']['periods'][x]['name'] checkOne = keyWord.find('night') checkTwo = keyWord.find('Night') if checkOne == -1 and checkTwo == -1: pass else: nights.append(x) #Pulls the detailed forecast for the identified entries for x in nights: name = forecast['properties']['periods'][x]['name'] nightSky = name+": "+forecast['properties']['periods'][x]['detailedForecast'] nightFor.append(nightSky) #Prints forecast return nightFor
python
# -*- coding: utf-8 -*- from apiclient import discovery from httplib2 import Http from maya import parse, when, get_localzone from pytz import all_timezones from util import set_http class GoogleAPI: """Interface to the Google API. See the documentation for subclasses for more detailed information. """ _service_name = NotImplemented _version = NotImplemented def __init__(self, http=None, impersonated_user_email=None, start=None, end=None, timezone=None): """ :param httplib2.Http http: An Http object for sending the requests. In general, this should be left as None, which will allow for auto-adjustment of the kind of Http object to create based on whether a user's email address is to be impersonated. :param str impersonated_user_email: The email address of a user to impersonate. This requires domain-wide delegation to be activated. See https://developers.google.com/admin-sdk/reports/v1/guides/delegation for instructions. :param str start: The earliest data to collect. Can be any kind of date string, as long as it is unambiguous (e.g. "2017"). It can even be slang, such as "a year ago". Be aware, however, that only the *day* of the date will be used, meaning *time* information will be discarded. :param str end: The latest data to collect. Same format rules apply for this as for the ``start`` parameter. :param str timezone: The timezone to convert all timestamps to before compiling. This should be a standard timezone name. For reference, the list that the timezone will be compared against is available at https://github.com/newvem/pytz/blob/master/pytz/__init__.py. If omitted, the local timezone of the computer will be used. """ if NotImplemented in (self._service_name, self._version): raise ValueError('Implementing classes of GoogleAPI must set a value for _service_name and _version.') self.email = impersonated_user_email # By default, set the timezone to whatever the local timezone is. Otherwise set it to what the user specified. if timezone is None or timezone not in all_timezones: self.tz = str(get_localzone()) else: self.tz = timezone # Interpret the start and end times if start is None: self.start = start else: try: self.start = parse(start).datetime().date() # First, assume they gave a well-formatted time except ValueError: self.start = when(start).datetime().date() # Next, attempt to interpret the time as slang if end is None: self.end = end else: try: self.end = parse(end).datetime().date() except ValueError: self.end = when(end).datetime().date() self.customer_id = 'my_customer' # Only used by directory API # The following are accessed by their respective class properties self._http = http self._service = None self._team_drives = None @property def http(self): if self._http is None or not isinstance(self._http, Http): self._http = set_http(impersonated_user_email=self.email) return self._http @property def service(self): # Create the service object, which provides a connection to Google if self._service is None: self._service = discovery.build(serviceName=self._service_name, version=self._version, http=self.http) return self._service def get_all(self): raise NotImplementedError # TODO: Finish intelligent get_all r = {} for m in [x for x in dir(self) if x.startswith('get_')]: r[m[4:]] = getattr(self, m)() return r
python
import torch import torch.nn as nn import torch.nn.functional as F from itertools import cycle from time import clock as tick import numpy as np from experiments.launcher.config import DatasetConfig from src.eval.utils_eval import evaluate_data_classifier, evaluate_domain_classifier from src.plotting.utils_plotting import plot_data_frontier_digits from src.utils.network import weight_init_glorot_uniform from src.utils.utils_network import set_lr, build_label_domain, get_models, get_optimizer, entropy_loss class DANN(object): def __init__(self, data_loader_train_s, data_loader_train_t, model_config, cuda=False, logger_file=None, data_loader_test_s=None, data_loader_test_t=None, dataset=DatasetConfig(), data_loader_train_s_init=None, n_class=10): self.dataset = dataset self.cuda = cuda self.data_loader_train_s = data_loader_train_s self.data_loader_train_t = data_loader_train_t self.data_loader_test_t = data_loader_test_t self.data_loader_test_s = data_loader_test_s self.data_loader_train_s_init = data_loader_train_s_init self.domain_label_s = 1 self.domain_label_t = 0 self.refinement = model_config.refinement self.n_epochs_refinement = model_config.n_epochs_refinement self.lambda_regul = model_config.lambda_regul self.lambda_regul_s = model_config.lambda_regul_s self.threshold_value = model_config.threshold_value self.logger = logger_file self.adapt_only_first = model_config.adapt_only_first self.crop_dim = 0 if model_config.upper_bound and not self.adapt_only_first else \ int(dataset.im_size * model_config.crop_ratio) self.epoch_to_start_align = model_config.epoch_to_start_align self.output_fig = model_config.output_fig self.stop_grad = model_config.stop_grad self.adaptive_lr = model_config.adaptive_lr self.lr_decay_epoch = model_config.epoch_to_start_align self.lr_decay_factor = 0.5 self.grad_scale = 1.0 self.model_config = model_config self.initialize_model = model_config.initialize_model feat_extractor, data_classifier, domain_classifier = get_models(model_config, n_class, dataset) feat_extractor.apply(weight_init_glorot_uniform) data_classifier.apply(weight_init_glorot_uniform) domain_classifier.apply(weight_init_glorot_uniform) _parent_class = self class GradReverse(torch.autograd.Function): @staticmethod def forward(self, x): return x.clone() @staticmethod def backward(self, grad_output): return grad_output.neg() * _parent_class.grad_scale class GRLDomainClassifier(nn.Module): def __init__(self, domain_classifier, stop_grad): super(GRLDomainClassifier, self).__init__() self.domain_classifier = domain_classifier self.stop_grad = stop_grad def forward(self, input): if self.stop_grad: x = GradReverse.apply(input.detach()) else: x = GradReverse.apply(input) x = self.domain_classifier.forward(x) return x self.feat_extractor = feat_extractor self.data_classifier = data_classifier self.grl_domain_classifier = GRLDomainClassifier(domain_classifier, self.stop_grad) if self.cuda: self.feat_extractor.cuda() self.data_classifier.cuda() self.grl_domain_classifier.cuda() self.optimizer_feat_extractor, self.optimizer_data_classifier, self.optimizer_domain_classifier = \ get_optimizer(model_config, self) self.init_lr = model_config.init_lr def fit(self): self.loss_history = [] self.error_history = [] if self.crop_dim != 0: self.mask_t = torch.ones(size=(self.dataset.channel, self.dataset.im_size, self.dataset.im_size)) if self.cuda: self.mask_t = self.mask_t.cuda() self.mask_t[:, :self.crop_dim, :] = 0.0 if self.initialize_model: self.logger.info("Initialize DANN") for epoch in range(self.epoch_to_start_align): self.feat_extractor.train() self.data_classifier.train() tic = tick() for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s_init): y_batch_s = y_batch_s.view(-1) self.feat_extractor.zero_grad() self.data_classifier.zero_grad() if self.cuda: X_batch_s = X_batch_s.cuda() y_batch_s = y_batch_s.cuda() size = X_batch_s.size() if self.adapt_only_first: X_batch_s = torch.mul(X_batch_s, self.mask_t) output_feat_s = self.feat_extractor(X_batch_s) output_class_s = self.data_classifier(output_feat_s) loss = F.cross_entropy(output_class_s, y_batch_s) loss.backward() self.optimizer_feat_extractor.step() self.optimizer_data_classifier.step() toc = tick() - tic self.logger.info( "\nTrain epoch: {}/{} {:2.2f}s \tLoss: {:.6f} Dist_loss:{:.6f}".format( epoch, self.nb_epochs, toc, loss.item(), 0)) if epoch % 5 == 0 and epoch != 0: evaluate_data_classifier(self, is_test=True, is_target=False) evaluate_data_classifier(self, is_test=True, is_target=True) evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t, comments="Domain test") self.loss_history.append(loss.item()) self.error_history.append(loss.item()) start_epoch = self.epoch_to_start_align self.logger.info(f"Finished initializing with batch size: {size}") else: start_epoch = 0 if self.output_fig: if start_epoch != 0: plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "dann_10") self.logger.info("Start aligning") for epoch in range(start_epoch, self.nb_epochs): self.feat_extractor.train() self.data_classifier.train() self.grl_domain_classifier.train() tic = tick() self.T_batches = cycle(iter(self.data_loader_train_t)) for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s): size_s = X_batch_s.size(0) y_batch_s = y_batch_s.view(-1) p = (batch_idx + (epoch - start_epoch) * len(self.data_loader_train_s)) / ( len(self.data_loader_train_s) * (self.nb_epochs - start_epoch)) if self.adaptive_lr: lr = self.init_lr / (1. + 10 * p) ** 0.75 set_lr(self.optimizer_feat_extractor, lr) set_lr(self.optimizer_data_classifier, lr) set_lr(self.optimizer_domain_classifier, lr) self.feat_extractor.zero_grad() self.data_classifier.zero_grad() self.grl_domain_classifier.zero_grad() X_batch_t, _ = next(self.T_batches) size_t = X_batch_t.size(0) if self.cuda: X_batch_t = X_batch_t.cuda() X_batch_s = X_batch_s.cuda() y_batch_s = y_batch_s.cuda() if self.crop_dim != 0: X_batch_t = torch.mul(X_batch_t, self.mask_t) if self.adapt_only_first: X_batch_s = torch.mul(X_batch_s, self.mask_t) output_feat_s = self.feat_extractor(X_batch_s) output_class_s = self.data_classifier(output_feat_s) loss = F.cross_entropy(output_class_s, y_batch_s) # ----------------------------------------------------------------- # domain classification # ----------------------------------------------------------------- self.grad_scale = 2. / (1. + np.exp(-10 * p)) - 1 align_s = output_feat_s output_domain_s = self.grl_domain_classifier(align_s) label_domain_s = build_label_domain(self, size_s, self.domain_label_s) error_s = F.cross_entropy(output_domain_s, label_domain_s) output_feat_t = self.feat_extractor(X_batch_t) align_t = output_feat_t output_domain_t = self.grl_domain_classifier(align_t) label_domain_t = build_label_domain(self, size_t, self.domain_label_t) error_t = F.cross_entropy(output_domain_t, label_domain_t) dist_loss = (error_s + error_t) error = loss + dist_loss error.backward() self.optimizer_feat_extractor.step() self.optimizer_data_classifier.step() self.optimizer_domain_classifier.step() toc = tick() - tic self.logger.info( "\nTrain epoch: {}/{} {:.1f}% {:2.2f}s \tTotalLoss: {:.6f} LossS: {:.6f} Dist_loss:{:.6f}".format( epoch, self.nb_epochs, p * 100, toc, error.item(), loss.item(), dist_loss.item())) self.loss_history.append(loss.item()) self.error_history.append(error.item()) if epoch % 5 == 0 and epoch != 0: evaluate_data_classifier(self, is_test=True, is_target=False) evaluate_data_classifier(self, is_test=True, is_target=True) evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t, comments="Domain test") if self.refinement: self.logger.info("Refinement") n_epochs_refinement = self.n_epochs_refinement lambda_regul = self.lambda_regul lambda_regul_s = self.lambda_regul_s threshold_value = self.threshold_value set_lr(self.optimizer_data_classifier, self.init_lr / 10) set_lr(self.optimizer_feat_extractor, self.init_lr / 10) for epoch in range(self.nb_epochs, self.nb_epochs + n_epochs_refinement): evaluate_data_classifier(self, is_test=True, is_target=False) evaluate_data_classifier(self, is_test=True, is_target=True) self.data_classifier.train() self.feat_extractor.train() self.T_batches = cycle(iter(self.data_loader_train_t)) for batch_idx, (X_batch_s, y_batch_s) in enumerate(self.data_loader_train_s): y_batch_s = y_batch_s.view(-1) self.data_classifier.zero_grad() self.feat_extractor.zero_grad() X_batch_t, y_batch_t = next(self.T_batches) if self.cuda: X_batch_t = X_batch_t.cuda() X_batch_s = X_batch_s.cuda() y_batch_s = y_batch_s.cuda() y_batch_t = y_batch_t.cuda() if self.crop_dim != 0: X_batch_t = torch.mul(X_batch_t, self.mask_t) if self.adapt_only_first: X_batch_s = torch.mul(X_batch_s, self.mask_t) # Source Domain Data : forward feature extraction + data classifier output_feat_s = self.feat_extractor(X_batch_s) output_class_s = self.data_classifier(output_feat_s) loss = F.cross_entropy(output_class_s, y_batch_s) # Target Domain Data output_feat_t = self.feat_extractor(X_batch_t) output_class_t = self.data_classifier(output_feat_t) threshold_index = F.log_softmax(output_class_t).data.max(1)[0] > np.log(threshold_value) loss_t_ent = entropy_loss(output_class_t[~threshold_index]) y_batch_pseudo_t = output_class_t.data.max(1)[1][threshold_index] if torch.sum(threshold_index) > 0: loss_t = F.cross_entropy(output_class_t[threshold_index], y_batch_pseudo_t) else: loss_t = torch.zeros(1).cuda() if self.cuda else torch.zeros(1) n_pseudo_labelled = torch.sum(threshold_index).item() error = lambda_regul_s * loss + loss_t + lambda_regul * loss_t_ent error.backward() self.optimizer_data_classifier.step() self.optimizer_feat_extractor.step() self.logger.info( "\nTrain epoch: {}/{} \tTotalLoss: {:.6f} LossS: {:.6f} LossT: {:.6f} EntropyT: {:.6f}".format( epoch, self.nb_epochs + n_epochs_refinement, error.item(), lambda_regul_s * loss.item(), loss_t.item(), lambda_regul * loss_t_ent.item())) self.logger.info("N_Pseudo: {:.1f}".format(n_pseudo_labelled)) self.loss_test_s, self.acc_test_s, _, _ = evaluate_data_classifier(self, is_test=True, is_target=False) self.loss_test_t, self.acc_test_t, _, _ = evaluate_data_classifier(self, is_test=True, is_target=True) self.loss_d_test, self.acc_d_test = evaluate_domain_classifier(self, self.data_loader_test_s, self.data_loader_test_t, comments="Domain test") if self.output_fig: plot_data_frontier_digits(self, self.data_loader_test_s, self.data_loader_test_t, "dann_100")
python
from unittest import TestCase, skip from unittest.mock import Mock, patch from tests import _run from tests import * _jobs = jobs from porerefiner import models, jobs, fsevents from porerefiner.fsevents import PoreRefinerFSEventHandler as Handler from hypothesis import given, strategies as strat, example, seed, settings, HealthCheck #from hypothesis_fspaths import fspaths, _PathLike from datetime import datetime import pathlib import sys # safe_paths = lambda: fspaths().filter(lambda x: isinstance(x, str) or isinstance(x, _PathLike)) class TestJobDefinition(jobs.AbstractJob): pass class TestTaggableModels(TestCase): @given( tag=names(), run=Model.Runs(), qa=Model.Qas(), duty=Model.Duties(), ss=Model.Samplesheets(), sam=Model.Samples(), fi=Model.Files()) @with_database def test_taggable_models_are_taggable(self, tag, run, qa, duty, ss, sam, fi): for obj in (run, qa, duty, ss, sam, fi): cls = type(obj) try: for attr in ("tags", "tag", "untag", "ttag", "unttag", "get_by_tags"): try: self.assertTrue(hasattr(cls, attr)) except Exception as e: raise Exception(attr) from e except Exception as e: raise Exception(cls.__name__) from e class TestModels(TestCase): @given(paths()) @example(b'/path/pa') def test_path_field(self, path): try: pa = pathlib.Path(path) except TypeError: pa = pathlib.Path(str(path, encoding=sys.getfilesystemencoding())) fld = models.PathField() self.assertEqual(fld.python_value(fld.db_value(path)), pa) # @given(job=_jobs()) # def test_job_field(self, job): # fld = models.JobField() # self.assertEqual(type(fld.python_value(fld.db_value(job))), type(job)) def test_models_registered(self): self.assertEqual(len(models.REGISTRY), 11) # @skip('broken') @given(tag=strat.text().filter(lambda x: x)) @with_database def test_tags(self, tag): import peewee import logging #peewee.logger.debug = lambda msg, *a, **k: peewee.logger.log(logging.ERROR, msg, *a, **k) # flow = models.SampleSheet.create() # tag, _ = models.Tag.get_or_create(name=tag) # tag_j = models.TagJunction.create(samplesheet=flow, tag=tag) # self.assertIn(tag, flow.tags) ut = models.Run.create(name="TEST", path="TEST") tag = ut.tag("TEST") self.assertIn(tag, ut.tags) ut.untag(tag.name) ttag = ut.ttag("TEST", "TEST", "TEST") self.assertIn(ttag, ut.tags) ut.unttag(ttag.namespace, ttag.name) self.assertNotIn(tag, ut.tags) self.assertNotIn(ttag, ut.tags) #peewee.logger.debug = lambda msg, *a, **k: peewee.logger.log(logging.DEBUG - 5, msg, *a, **k) @with_database def test_tag_failure(self): with self.assertRaises(Exception): tag = models.Tag.create(name='') # class TestFlowcell(TestCase): # @given(pk=sql_ints(), # consumable_id=strat.text(), # consumable_type=strat.text(), # path=paths()) # @with_database # def test_flowcell(self, **kwargs): # assert models.Flowcell.create(**kwargs) class TestRun(TestCase): @skip('broken') @given(pk=sql_ints(), name=strat.text(), library_id=strat.text(), alt_name=strat.text(), run_id=strat.text(), started=strat.datetimes().filter(lambda d: d < datetime.now()), ended=strat.datetimes().filter(lambda d: d > datetime.now()), path=paths(), basecalling_model=strat.one_of(*[strat.just(val) for val, _ in models.Run.basecallers])) @with_database def test_run(self, **kwargs): self.flow = models.Flowcell.create(consumable_id='TEST', consumable_type='TEST', path='TEST/TEST') assert models.Run.create(flowcell=self.flow, **kwargs).run_duration @settings(deadline=None, suppress_health_check=(HealthCheck.all())) @given(run=Model.Runs(), job=_jobs()) @with_database def test_job_spawn(self, run, job): # run.flowcell.save() run.save() self.assertIsNotNone(run.pk) jobb = run.spawn(job) self.assertIs(job, jobb.job_state) class TestQa(TestCase): @given(pk=sql_ints(), coverage=strat.floats().filter(lambda f: f > 0), quality=strat.floats().filter(lambda f: f > 0)) @with_database def test_qa(self, **kwargs): assert models.Qa.create(**kwargs) class TestJob(TestCase): @given(job=Model.Duties()) @with_database def test_job(self, job): assert job.save() # @skip('no test yet') @given(job=Model.Duties(), path=paths(pathlib_only=True)) @with_database def test_job_files(self, job, path): job.save() # file = models.File(path=path) # file.save() file = models.File.create(path=path) job.files.add(file) job.save() self.assertIn(file, job.files) class TestSampleSheet(TestCase): @given(pk=sql_ints(), path=paths(), date=strat.datetimes(), sequencing_kit=strat.text()) @with_database def test_samplesheet(self, **kwargs): assert models.SampleSheet.create(**kwargs) # @skip('broken') @with_database def test_get_unused_sheets(self): # self.flow = flow = models.Flowcell.create(consumable_id="TEST|TEST|TEST", consumable_type="TEST|TEST|TEST", path="TEST/TEST/TEST") self.run = models.Run.create(pk=100, library_id='x', name="TEST", path="TEST/TEST/TEST") self.assertFalse(models.SampleSheet.get_unused_sheets().count()) models.SampleSheet.create(path="TEST") self.assertEqual(models.SampleSheet.get_unused_sheets().count(), 1) # @skip('broken') @given(ss=Message.Samplesheets()) @with_database def test_new_sheet_from_message(self, ss): # flow = models.Flowcell.create(consumable_id="TEST|TEST|TEST", consumable_type="TEST|TEST|TEST", path="TEST/TEST/TEST") run = models.Run.create(pk=100, library_id='x', name="TEST", path="TEST/TEST/TEST") s = models.SampleSheet.new_sheet_from_message(ss, run) self.assertEqual(run.sample_sheet, s) class TestSample(TestCase): @given(pk=sql_ints(), sample_id=strat.text(), accession=strat.text(), barcode_id=strat.text(), organism=strat.text(), extraction_kit=strat.text(), comment=strat.text(), user=strat.emails()) @with_database def test_sample(self, **k): ss = models.SampleSheet.create(path=k['sample_id']) assert models.Sample.create(samplesheet=ss, **k) class TestFile(TestCase): @given(pk=sql_ints(), path=paths(), checksum=strat.text(), last_modified=strat.datetimes(), exported=strat.booleans()) @with_database def test_file(self, **k): assert models.File.create(**k) @given(pk=sql_ints(), path=paths(), checksum=strat.text(), last_modified=strat.datetimes(), exported=strat.booleans(), job=Model.Duties()) @with_database def test_job_spawn(self, job, **k): fi = models.File.create(**k) assert fi.spawn(job) class TestTags(TestBase): "Tests for a bunch of tag-related bugs" @skip("broken") def test_complex_query(self): from porerefiner.models import Run, Tag, TagJunction, TripleTag, TTagJunction tags = ("TEST", "another tag") self.assertFalse(Run.select().join(TagJunction).join(Tag).where(Tag.name << tags).switch(Run).join(TTagJunction).join(TripleTag).where(TripleTag.value << tags)) @skip("old approach") def test_tagging_assumptions(self): from porerefiner.models import Run, Tag, TagJunction, TripleTag, TTagJunction tags = ("TEST", "another tag") run = Run.create(name="TEST", path="/dev/null") self.assertEqual(len(Run.select().join(TagJunction).join(Tag).where(Tag.name << tags)), 0) # test simple query no tags run.tag(tags[0]) self.assertEqual(len(Run.select().join(TagJunction).join(Tag).where(Tag.name << tags)), 1) # test simple query, one tag self.assertEqual(len(Run.select().join(TagJunction).join(Tag).where(Tag.name << tags).switch(Run).join(TTagJunction).join(TripleTag).where(TripleTag.value << tags)), 1) #test complicated query with simple tag run.ttag(namespace="TEST", name="TEST", value=tags[0]) self.assertEqual(len(Run.select().join(TagJunction).join(Tag).where(Tag.name << tags).switch(Run).join(TTagJunction).join(TripleTag).where(TripleTag.value << tags)), 1) # complicated query with two tags but one result def test_lookup_by_tags(self): from porerefiner.models import Run, Tag, TagJunction, TripleTag, TTagJunction tags = ("TEST", "another tag") run = Run.create(name="TEST", path="/dev/null") run.tag(tags[0]) self.assertEqual(len(Run.get_by_tags(*tags)), 1) run.ttag(namespace="TEST", name="TEST", value=tags[0]) self.assertEqual(len(Run.get_by_tags(*tags)), 1) @given( tags=strat.lists(names(), min_size=1, unique=True), run=Model.Runs()) def test_tags_dont_bump_each_other(self, tags, run): run.save() for tag in tags: run.tag(tag) self.assertEqual(len(list(run.tags)), len(tags)) # @skip("") @settings(deadline=None) @given(tag=names(), run=Model.Runs()) def test_tags_arent_deleted_on_run_end(self, tag, run): run.save() ta = run.tag(tag) tta = run.ttag(tag, tag, tag) _run(fsevents.end_run(run)) fin = models.Tag.get(name="finished") self.assertIn(ta, run.tags) self.assertIn(fin, run.tags) self.assertIn(tta, run.tags) # @skip("") @given( tag=names(), file_event=file_events(), run=Model.Runs() ) def test_tags_arent_deleted_on_file_deletion(self, tag, file_event, run): file, event = file_event assert file.path == event.src_path file.save() models.File.get(file.id) file.tag(tag) run.save() tag = run.tag(tag) self.assertEqual(len(list(run.tags)), 1) self.assertEqual(len(list(file.tags)), 1) _run(Handler(event.src_path.parts[0]).on_deleted(event)) self.assertFalse(models.File.get_or_none(models.File.path==event.src_path)) # check file record is gone self.assertEqual(len(list(run.tags)), 1) self.assertIn(tag, run.tags)
python
""" Compare the results provided by the different solvers """ from tqdm import tqdm import pickle from sys import path path.append("..") path.append("solvers/") import settings from solvers.solver import SimulatedAnnealingSolver, RandomSolver from solvers.uncertainty_solver import UncertaintySimulatedAnnealingSolver, UncertaintyRandomSolver from solvers.uncertainty_battery_solver import UncertaintyBatteryRandomSolver, UncertaintyBatterySimulatedAnnealingSolver fs = open("../webserver/data/serialization/mapper.pickle", "rb") mapper = pickle.load(fs) fs.close() state = [(1059, 842), (505, 1214), (400, 1122), (502, 339), (866, 512), (1073, 82), (669, 1202), (32, 1122), (45, 52), (209, 993), (118, 653), (487, 896), (748, 638), (271, 1067), (1576, 567), (683, 316), (1483, 1156), (1448, 634), (303, 1220), (759, 823), (1614, 991), (1387, 174), (1618, 227), (367, 39), (35, 902), (967, 690), (944, 327), (912, 1029), (184, 1205), (779, 1026), (694, 123), (1502, 395)] nb_drone = 1 nb_test = 10 # print("Testing battery consumption solver") # battery_mean_battery = [] # battery_mean_uncertainty = [] # battery_mean_patrol = [] # for t in tqdm(range(nb_test)): # battery_rplan = RandomSolver(state, mapper, nb_drone) # battery_rplan.solve() # battery_saplan = SimulatedAnnealingSolver(battery_rplan.state, mapper, nb_drone) # battery_saplan.copy_strategy = "slice" # battery_saplan.steps = 1000000 # battery_saplan.Tmax = 250 # battery_saplan.Tmin = 1 # battery_saplan.updates = 0 # itinerary, energy = battery_saplan.solve() # battery_mean_battery.append(energy) # b = battery_mean_battery[len(battery_mean_battery) - 1] # battery_mean_uncertainty.append(UncertaintySimulatedAnnealingSolver(itinerary, mapper, nb_drone).compute_performance()) # u = battery_mean_uncertainty[len(battery_mean_uncertainty) - 1] # battery_saplan.detail_plan() # battery_mean_patrol.append(battery_saplan.get_number_patrols()[0]) # p = battery_mean_patrol[len(battery_mean_patrol) - 1] # f = open("memo_tester_battery", "a") # f.write(str(b) + " " + str(u) + " " + str(p) + "\n") # f.close() # battery_mean_battery = sum(battery_mean_battery) / len(battery_mean_battery) # battery_mean_uncertainty = sum(battery_mean_uncertainty) / len(battery_mean_uncertainty) # battery_mean_patrol = sum(battery_mean_patrol) / len(battery_mean_patrol) # # print("Testing uncertainty rate solver") # uncertainty_mean_battery = [] # uncertainty_mean_uncertainty = [] # uncertainty_mean_patrol = [] # for t in tqdm(range(nb_test)): # uncertainty_rplan = UncertaintyRandomSolver(state, mapper, nb_drone) # uncertainty_rplan.solve() # uncertainty_saplan = UncertaintySimulatedAnnealingSolver(uncertainty_rplan.state, mapper, nb_drone) # uncertainty_saplan.copy_strategy = "slice" # uncertainty_saplan.steps = 2000000 # uncertainty_saplan.Tmax = 50 # uncertainty_saplan.Tmin = 12 # uncertainty_saplan.updates = 0 # itinerary, energy = uncertainty_saplan.solve() # uncertainty_mean_battery.append(uncertainty_saplan.get_battery_consumption()) # b = uncertainty_mean_battery[len(uncertainty_mean_battery) - 1] # uncertainty_mean_uncertainty.append(uncertainty_saplan.compute_performance()) # u = uncertainty_mean_uncertainty[len(uncertainty_mean_uncertainty) - 1] # uncertainty_saplan.detail_plan() # uncertainty_mean_patrol.append(uncertainty_saplan.get_number_patrols()[0]) # p = uncertainty_mean_patrol[len(uncertainty_mean_patrol) - 1] # f = open("memo_tester_uncertainty", "a") # f.write(str(b) + " " + str(u) + " " + str(p) + "\n") # f.close() # uncertainty_mean_battery = sum(uncertainty_mean_battery) / len(uncertainty_mean_battery) # uncertainty_mean_uncertainty = sum(uncertainty_mean_uncertainty) / len(uncertainty_mean_uncertainty) # uncertainty_mean_patrol = sum(uncertainty_mean_patrol) / len(uncertainty_mean_patrol) print("Testing uncertainty rate + battery solver") uncertainty_battery_mean_battery = [] uncertainty_battery_mean_uncertainty = [] uncertainty_battery_mean_patrol = [] for t in tqdm(range(nb_test)): uncertainty_battery_rplan = UncertaintyBatteryRandomSolver(state, mapper, nb_drone) uncertainty_battery_rplan.solve() uncertainty_battery_saplan = UncertaintyBatterySimulatedAnnealingSolver(uncertainty_battery_rplan.state, mapper, nb_drone) uncertainty_battery_saplan.copy_strategy = "slice" uncertainty_battery_saplan.steps = 2000000 uncertainty_battery_saplan.Tmax = 50 uncertainty_battery_saplan.Tmin = 12 uncertainty_battery_saplan.updates = 0 itinerary, energy = uncertainty_battery_saplan.solve() uncertainty_battery_mean_battery.append(uncertainty_battery_saplan.battery_consumption) b = uncertainty_battery_mean_battery[len(uncertainty_battery_mean_battery) - 1] uncertainty_battery_mean_uncertainty.append(uncertainty_battery_saplan.uncertainty_rate) u = uncertainty_battery_mean_uncertainty[len(uncertainty_battery_mean_uncertainty) - 1] uncertainty_battery_saplan.detail_plan() uncertainty_battery_mean_patrol.append(uncertainty_battery_saplan.get_number_patrols()[0]) p = uncertainty_battery_mean_patrol[len(uncertainty_battery_mean_patrol) -1] f = open("memo_tester_uncertainty_battery", "a") f.write(str(b) + " " + str(u) + " " + str(p) + "\n") f.close() uncertainty_battery_mean_battery = sum(uncertainty_battery_mean_battery) / len(uncertainty_battery_mean_battery) uncertainty_battery_mean_uncertainty = sum(uncertainty_battery_mean_uncertainty) / len(uncertainty_battery_mean_uncertainty) uncertainty_battery_mean_patrol = sum(uncertainty_battery_mean_patrol) / len(uncertainty_battery_mean_patrol) print("TESTER BATTERY UNCERTAINTY RATE #PATROLS") #print("BATTERY", "\t", battery_mean_battery, "\t\t", battery_mean_uncertainty, "\t", battery_mean_patrol) #print("UNCERTAINTY", "\t", uncertainty_mean_battery, "\t\t", uncertainty_mean_uncertainty, "\t", uncertainty_mean_patrol) print("UNCERTAINTY + BATTERY", "\t", uncertainty_battery_mean_battery, "\t\t", uncertainty_battery_mean_uncertainty, "\t", uncertainty_battery_mean_patrol)
python
# Manipulação do Arquivo def abrir(path): """ Tenta abrir o arquivo no caminho que recebe. Caso não encontre o arquivo, Cria um arquivo com o nome no caminho especificado. :param path: Local onde o arquivo está ou será criado. """ try: a = open(path, 'tr') return False except: a = open(path, 'w+') c = 0 while c < 57: clear() if c < 56: cabecalho('Criando Arquivo...') else: cabecalho('Arquivo Criado!') cheio = "■" * c vazio = "□" * (56 - c) print(f'║ {cheio}{vazio} ║', flush=True) linhas('╚', '╝', '═', 60, flush=True) c += 1 sleep(0.01) input('Enter para Continuar') finally: a.close() def ler(path): """ Abre um arquivo no caminho especificado e adiciona o conteudo em uma lista separada pelas linhas do arquivo. :param path: Local do arquivo a ser lido. """ try: f = open(path, 'tr') arquivo = f.readlines() f.close() abriu = True except: abriu = False if abriu: return arquivo else: print('Não foi possivel ler o arquivo') sleep(1) def gravar(path, wra, gravacao): """ Abre um arquivo no caminho especificado. Do modo que lhe é definido e adiciona informações a esse arquivo. :param path: Local do arquivo onde as informações serão adicionadas. :param wra: Modo em que o arquivo será aberto. Sendo: 'r' - leitura, 'w' - escrita, 'a' - adicionar. :param gravacao: Conteudo que será salvo no arquivo. """ try: f = open(path, wra) abriu = True except Exception as erro: print(f'Não foi possivel devido erro: "{erro.__class__}"') if abriu: f.write(gravacao) f.close() def adicionar(path): """ Adiciona novos participantes a tabela. :param path: Local do arquivo em que o participante será adicionado. """ try: nome = str(input('Nome: ')).title().strip() pont = 0 nome = nome[0:38] gravar(path, 'a', f'{nome};{pont}\n') except: print('Não foi possivel Adicionar') else: print(f'{nome} adicionado com sucesso') sleep(1) def modificar(path, arquivo): """ Apenas modifica um elemento dentro do arquivo. :param path: Local do arquivo a ser modificado. :param arquivo: Lista de informações que serão modificadas e gravadas no arquivo. """ if len(arquivo) == 0: print('Lista Vazia') sleep(1) return pos = leiaInt('Posição: ') - 1 if pos >= len(arquivo) or pos < 0: print(f'"{pos+1}" É uma posição inválida') print('Por favor tente novamente') return pnt = leiaInt('Pontuação: ') try: for p, i in enumerate(arquivo): i = i.split(';') i[1] = i[1].replace('\n', '') if p == pos: i[1] = int(i[1]) i[1] += pnt if p == 0: f = open(path, 'w') f.write(f'{i[0]};{i[1]}\n') else: f = open(path, 'a') f.write(f'{i[0]};{i[1]}\n') f.close() except Exception as erro: print(f'Falha ao Gravar lista em arquivo: {erro.__class__}') else: print('Pontuação Adicionada com Sucesso!') def removerpessoa(path, arquivo): """ Remove um participante de uma tabela. :param path: Local do arquivo a ser modificado. :param arquivo: Lista de informações que serão modificadas e gravadas no arquivo. """ if len(arquivo) == 0: print('Lista Vazia! Não é possivel remover!') input('Enter para continuar') return pos = leiaInt('Posição: ') - 1 if -1 < pos <= len(arquivo): arquivo[pos] = arquivo[pos].split(';') deletado = arquivo[pos][0] while True: certeza = str(input(f'Tem Certeza que deseja Remover {deletado}? [S/N]: ')).strip().upper()[0] if certeza not in 'SN': print('Escolha Inválida') sleep(2) else: break if certeza == 'N': return del arquivo[pos] if len(arquivo) == 0: f = open(path, 'w') f.write('') else: try: for p , i in enumerate(arquivo): if len(arquivo) > 0: i = i.split(';') i[1] = i[1].replace('\n', '') if p == 0: f = open(path,'w') f.write(f'{i[0]};{i[1]}\n') else: f = open(path, 'a') f.write(f'{i[0]};{i[1]}\n') except Exception as erro: print(f'Falhao ao Remover da lista em arquivo: {erro.__class__}') input('Enter para continuar') f.close() print(f'{deletado} foi excluido da lista com sucesso!') sleep(2) else: print(f'"{pos+1}" Não faz parte da lista\nRetornando ao Menu Principal...') sleep(2) def delarquivo(path): """ Deleta o arquivo do local especificado. :param path: Local do arquivo a ser deletado. """ import os os.system(f'del {path}') c = 0 while c < 57: clear() if c < 56: cabecalho('Deletando Arquivo...') else: cabecalho('Arquivo Deletado!') cheio = "■" * (56 - c) vazio = "□" * c print(f'║ {cheio}{vazio} ║', flush=True) linhas('╚', '╝', '═', 60, flush=True) c += 1 sleep(0.01) input('Enter para Continuar!') # Manipulação Da Interface def linhas(inicio, fim , simb, tam, end='\n', flush=False): """ Cria uma sequência de simbolos formando uma linha :param inicio: Caractere que será utilizado na primeira posição da linha. :param fim: Caractere que será utilizado na última posição da linha. :param simb: Simbolo que será utilizado em todo o restante da linha. :param tam: Tamanho total que a linha terá. :param end: Função que define como a linha irá terminar. O padrão é '\n' para que o proximo print seja feito uma linha abaixo. :param flush: Define se a atualização do print será constante. """ lin = simb * (tam - 2) print(inicio, end='') print(f'{lin}', end='') print(fim) def cabecalho(titulo): """ Cria um cabeçalho padrão com um titulo personalizavel. :param titulo: Titulo do cabeçalho. """ linhas('╔', '╗', '═', 60) print(f'║{titulo:^58}║') linhas('╠', '╣', '═', 60) def menu(lista, ver=''): """ Cria um menu com todas as opções que forem adicionadas a lista. :param lista: Lista com todas as opções que serão mostradas no menu. :param ver: Versão atual do programa que será exibido no canto inferior direito do menu. """ cabecalho('Menu Principal') for p, i in enumerate(lista): if i == lista[-1]: print(f'║ {p+1} - {i:<42}{ver:>10} ║') else: print(f'║ {p+1} - {i:<53}║') linhas('╚', '╝', '═', 60) opc = leiaInt('Escolha uma Opção: ') return opc def organizar(arquivo): """ Organiza os itens de um arquivo em uma lista. :param arquivo: Arquivo a ser organizado em uma lista. """ lista = list() for linha in arquivo: dado = linha.split(';') dado[1] = dado[1].replace('\n', '') lista.append(dado[:]) return lista def mostrar(lista): """ Mostra uma lista organizada dos participantes. Com a Posição, O Nome e a Pontuação atual do participante. :param lista: Lista que será mostrada. """ cabecalho('Placar') print(f'║ POS ║{"Nome":^40}║{"Pontuação":^11}║') linhas('╠', '╣', "═", 60) if len(lista) == 0: print(f'║{"":58}║') print(f'║{"Lista Vazia":^58}║') print(f'║{"":58}║') for p, c in enumerate(lista): print(f'║ {p+1:^3} ║ {c[0]:_<38} ║ {c[1]:>5} pts ║') linhas('╚', '╝', '═', 60) pass # Funções extras def leiaInt(txt): """ Aceitando apenas que o usuário adicione um valor inteiro, caso não seja inserido um valor inteiro, é soliciado novamente que o Usuário digite um número que não está na lista o menu é recarregado adicione um valor inteiro. :param txt: Texto a ser exibido solicitando os dados do Usuário digite um número que não está na lista o menu é recarregado. """ while True: try: num = int(input(txt)) except: print('Por favor insira um número inteiro válido') continue else: return num def clear(): """ Limpa o prompt de comando. """ import os os.system('cls') # Programa Principal from time import sleep clear() # */ Pergunta o Usuário digite um número que não está na lista o menu é recarregado se o arquivo a ser aberto será o arquivo padrão 'placar.txt' # ou um arquivo com nome personalizado. /* while True: cabecalho('Qual Tipo de Arquivo?') print(f'║{"Padrão [1]":^28}║{"Personalizado [2]":^29}║') linhas('╚','╝', '═', 60) padrao = leiaInt('Escolha: ') if padrao == 1: nome = 'placar.txt' break elif padrao == 2: print('Não se esqueça do .txt no final') nome = str(input('Nome do arquivo: ')) break else: print('Opção Inválida, Tente Novamente!') sleep(3) clear() continue abrir(nome) while True: clear() # */ Menu Principal /* opc = menu(['Ler Placar', 'Adicionar Pontuação', 'Adicionar Pessoa' , 'Remover Pessoa', 'Deletar Arquivo', 'Sair'], 'Ver. 1.1.3') # */ Mostra os items já salvos na Tabela /* if opc == 1: try: clear() mostrar(organizar(ler(nome))) input('Enter pra Continuar') except: print('Não foi possivel Ler o Placar!') # */ Adiciona ou remove pontuação de um participante da tabela /* elif opc == 2: try: clear() mostrar(organizar(ler(nome))) modificar(nome, ler(nome)) except: print('Não Foi possivel Adicionar Pontuação!') sleep(3) else: input('Enter para continuar') # */ Adiciona um participante a tabela /* elif opc == 3: try: clear() mostrar(organizar(ler(nome))) adicionar(nome) input('Enter para Continuar') except: print('Não foi possivel Adicionar Pessoa') # */ Remove um participante da tabela /* elif opc == 4: try: clear() mostrar(organizar(ler(nome))) removerpessoa(nome, ler(nome)) except Exception as erro: print(f'Não Foi possivel remover: {erro.__class__} ') # */ Deleta o arquivo que foi aberto e está sendo lido pelo programa /* elif opc == 5: print('Deletando Arquivo, O programa irá fechar!') while True: try: certeza = str(input('Você tem certeza? [S/N]: ')).strip().upper()[0] if certeza not in 'SN': print('Escolha Inválida, Por favor escolha entre Sim[S] e Não[N]!') sleep(2) clear() continue else: break except: print('Escolha Inválida!') if certeza == 'S': delarquivo(nome) break else: continue # */ Sair do programa /* elif opc == 6: print('Saindo do Programa') sleep(1) print('Até Logo...') sleep(1) break # */ Caso o Usuário digite um número que não está na lista o menu é recarregado /* else: print('Opção Inválida') sleep(2) continue
python
__________________________________________________________________________________________________ Runtime: 388 ms Memory Usage: 18.5 MB class Solution: def maxLevelSum(self, root: TreeNode) -> int: mapping = {} self.helper(mapping, root, 1) max_val, max_level = -9999999, 0 for level, val in mapping.items(): if val > max_val: max_val = val max_level = level return max_level def helper(self, mapping, root, level): if not root: return mapping[level] = mapping.get(level, 0) + root.val self.helper(mapping, root.left, level + 1) self.helper(mapping, root.right, level + 1) __________________________________________________________________________________________________ __________________________________________________________________________________________________
python
import socket import dns import dns.resolver from .logbase import LogBase from threading import Lock from typing import Dict, List, Any from datetime import timedelta TTL_HOURS = 12 class Resolver(LogBase): def __init__(self, time): self.cache: Dict[str, Any] = {} self.overrides: Dict[str, List[str]] = {} self.resolver: dns.resolver.Resolver = None self.lock: Lock = Lock() self.old_getaddrinfo = None self.ignoreIpv6 = False self.time = time self.enabled = False def addResolveAddress(self, address): with self.lock: if address not in self.cache: self.cache[address] = None def addOverride(self, host, addresses): with self.lock: self.overrides[host] = addresses def toggle(self): self.enabled = not self.enabled def clearOverrides(self): with self.lock: self.overrides = {} def setDnsServers(self, servers): with self.lock: self.resolver = dns.resolver.Resolver() self.resolver.nameservers = servers def setIgnoreIpv6(self, ignore): self.ignoreIpv6 = ignore def __enter__(self): with self.lock: self.old_getaddrinfo = socket.getaddrinfo socket.getaddrinfo = self._override_getaddrinfo return self def __exit__(self, a, b, c): with self.lock: socket.getaddrinfo = self.old_getaddrinfo self.old_getaddrinfo = None def _override_getaddrinfo(self, *args, **kwargs): with self.lock: if len(args) > 1 and args[0] in self.cache: override = self.cachedLookup(args[0]) if override is not None and len(override) > 0: resp = [] for ip in override: resp.append((socket.AF_INET, socket.SOCK_STREAM, 6, '', (ip, args[1]))) return resp responses = self.old_getaddrinfo(*args, **kwargs) if self.ignoreIpv6: responses = [response for response in responses if response[0] != socket.AF_INET6] return responses def cachedLookup(self, host): if host in self.overrides: return self.overrides[host] if self.resolver is None: return None if not self.enabled: return None entry = self.cache.get(host) if entry is not None and entry[1] > self.time.now(): return entry[0] addresses = [] for data in self.resolver.query(host, "A", tcp=True): addresses.append(data.address) data = (addresses, self.time.now() + timedelta(hours=TTL_HOURS)) self.cache[host] = data return addresses
python
# -*- coding: utf-8 -*- import json import sys import argparse import numpy import bpy import bmesh # These are the RGB values that JMol uses to color atoms JMOL_COLORING = { "H": [255, 255, 255], "He": [217, 255, 255], "Li": [204, 128, 255], "Be": [194, 255, 0], "B": [255, 181, 181], "C": [144, 144, 144], "N": [48, 80, 248], "O": [255, 13, 13], "F": [144, 224, 80], "Ne": [179, 227, 245], "Na": [171, 92, 242], "Mg": [138, 255, 0], "Al": [191, 166, 166], "Si": [240, 200, 160], "P": [255, 128, 0], "S": [255, 255, 48], "Cl": [31, 240, 31], "Ar": [128, 209, 227], "K": [143, 64, 212], "Ca": [61, 255, 0], "Sc": [230, 230, 230], "Ti": [191, 194, 199], "V": [166, 166, 171], "Cr": [138, 153, 199], "Mn": [156, 122, 199], "Fe": [224, 102, 51], "Co": [240, 144, 160], "Ni": [80, 208, 80], "Cu": [200, 128, 51], "Zn": [125, 128, 176], "Ga": [194, 143, 143], "Ge": [102, 143, 143], "As": [189, 128, 227], "Se": [255, 161, 0], "Br": [166, 41, 41], "Kr": [92, 184, 209], "Rb": [112, 46, 176], "Sr": [0, 255, 0], "Y": [148, 255, 255], "Zr": [148, 224, 224], "Nb": [115, 194, 201], "Mo": [84, 181, 181], "Tc": [59, 158, 158], "Ru": [36, 143, 143], "Rh": [10, 125, 140], "Pd": [0, 105, 133], "Ag": [192, 192, 192], "Cd": [255, 217, 143], "In": [166, 117, 115], "Sn": [102, 128, 128], "Sb": [158, 99, 181], "Te": [212, 122, 0], "I": [148, 0, 148], "Xe": [66, 158, 176], "Cs": [87, 23, 143], "Ba": [0, 201, 0], "La": [112, 212, 255], "Ce": [255, 255, 199], "Pr": [217, 255, 199], "Nd": [199, 255, 199], "Pm": [163, 255, 199], "Sm": [143, 255, 199], "Eu": [97, 255, 199], "Gd": [69, 255, 199], "Tb": [48, 255, 199], "Dy": [31, 255, 199], "Ho": [0, 255, 156], "Er": [0, 230, 117], "Tm": [0, 212, 82], "Yb": [0, 191, 56], "Lu": [0, 171, 36], "Hf": [77, 194, 255], "Ta": [77, 166, 255], "W": [33, 148, 214], "Re": [38, 125, 171], "Os": [38, 102, 150], "Ir": [23, 84, 135], "Pt": [208, 208, 224], "Au": [255, 209, 35], "Hg": [184, 184, 208], "Tl": [166, 84, 77], "Pb": [87, 89, 97], "Bi": [158, 79, 181], "Po": [171, 92, 0], "At": [117, 79, 69], "Rn": [66, 130, 150], "Fr": [66, 0, 102], "Ra": [0, 125, 0], "Ac": [112, 171, 250], "Th": [0, 186, 255], "Pa": [0, 161, 255], "U": [0, 143, 255], "Np": [0, 128, 255], "Pu": [0, 107, 255], "Am": [84, 92, 242], "Cm": [120, 92, 227], "Bk": [138, 79, 227], "Cf": [161, 54, 212], "Es": [179, 31, 212], "Fm": [179, 31, 186], "Md": [179, 13, 166], "No": [189, 13, 135], "Lr": [199, 0, 102], "Rf": [204, 0, 89], "Db": [209, 0, 79], "Sg": [217, 0, 69], "Bh": [224, 0, 56], "Hs": [230, 0, 46], "Mt": [235, 0, 38], } # Blender needs a 4th value for the opacity in addition to the RGB values given # above. For all materials, we use 255 and append this to all of them here. Blender # needs these values on a 0-1 scale instead of the 0-255. We address # this below by dividing all values by 255 for key in JMOL_COLORING: color = JMOL_COLORING[key] color.append(255) JMOL_COLORING[key] = numpy.array(color) / 255 def make_structure_blend(lattice, sites_to_draw, filename): # convert variable from json str to original format lattice = json.loads(lattice) sites_to_draw = json.loads(sites_to_draw.replace("'", '"')) # import Verge3D settings # import addon_utils # addon_utils.enable(module_name="verge3d") # Clear existing objects. bpy.ops.wm.read_factory_settings(use_empty=True) # we grab the entire blender scene for reference as it let's us access # all objects later scene = bpy.context.scene # ------------------------------------------------------------------------- # ADDING THE SITES # We start by drawing each of the sites -- which is just a colored sphere # at the proper coordinates for site in sites_to_draw: # first pull the base information out of the serialized tuple element_symbol, radius, cartesian_coords = site # we change the coordinates into a numpy array for functionality cartesian_coords = numpy.array(cartesian_coords) # Add a sphere for the site. Note we make the radius size only 0.75% its # true size in order to help with visualization. bpy.ops.mesh.primitive_ico_sphere_add( subdivisions=3, radius=radius * 0.75, location=cartesian_coords, ) # Now we need to color and style the sphere. # grab the site color from our mappings above site_color = JMOL_COLORING[element_symbol] # first check if we have made this material already (i.e. an element of # this type has been made before). If so, we use that one. materials = bpy.data.materials if element_symbol in materials.keys(): mat = materials[element_symbol] # otherwise we make a new material and name it after the element for # future reference. else: mat = bpy.data.materials.new(name=element_symbol) mat.diffuse_color = site_color mat.metallic = 1 mat.specular_intensity = 0 mat.roughness = 0.6 # Now that we have the proper material create/selected, we can now # apply it to our sphere bpy.context.active_object.data.materials.append(mat) # We apply smooth shading to all the spheres and then deselect them before # moving on to the next step bpy.ops.object.select_all(action="SELECT") bpy.ops.object.shade_smooth() bpy.ops.object.select_all(action="DESELECT") # ------------------------------------------------------------------------- # ADDING THE LATTICE # We make a lattice by creating a cube, deleting all of the faces, and then # manually placing each of its verticies to match the lattice size. bpy.ops.mesh.primitive_cube_add(size=1, enter_editmode=True) bpy.ops.mesh.delete(type="ONLY_FACE") bpy.ops.object.editmode_toggle() verts = bpy.context.object.data.vertices verts[0].co = (0, 0, 0) verts[1].co = lattice[2] verts[2].co = lattice[0] verts[3].co = numpy.add(lattice[0], lattice[2]) verts[4].co = lattice[1] verts[5].co = numpy.add(lattice[1], lattice[2]) verts[6].co = numpy.add(lattice[0], lattice[1]) verts[7].co = numpy.sum(lattice, axis=0) # There's an issue where each lattice edge isn't a perfect line. To fix # this, we split the cube into separate lines and make sure each of those # are "full curves" which is really just a cylinder. # This is the easy want to do this with the UI but we get an error here... # bpy.ops.mesh.edge_split() # doesn't work because of context/poll check lattice = bpy.data.objects[0].data # regular bpy object bm = bmesh.new() # create new bmesh bm.from_mesh(lattice) # fill bmesh with data from bpy object bmesh.ops.split_edges(bm, edges=bm.edges) # spit the edges on the mesh bm.to_mesh(lattice) # write the result data back to the initial bpy object # now fill each vector to a given size bpy.ops.object.convert(target="CURVE") bpy.context.object.data.fill_mode = "FULL" bpy.context.object.data.bevel_depth = 0.1 bpy.context.object.data.bevel_resolution = 3 bpy.ops.object.shade_smooth() # Now we create a black material to color the lattice with mat = bpy.data.materials.new(name="Lattice") mat.diffuse_color = (0, 0, 0, 1) mat.specular_intensity = 0 bpy.context.active_object.data.materials.append(mat) # ------------------------------------------------------------------------- # CENTERING ALL OBJECTS # When we created all the objects above, the center of the scene is (0,0,0) # for the cartesian coordinates, but it's better to have the viewpoint and # object rotation about the center of the lattice. Therefore, we grab the # center of the lattice, and use this location to translate all objects in # the scene such that this is the new center. bpy.ops.object.origin_set(type="ORIGIN_GEOMETRY", center="MEDIAN") lattice_center = bpy.data.objects["Cube"].location.copy() for obj in bpy.data.objects: obj.location = numpy.subtract(obj.location, lattice_center) # ------------------------------------------------------------------------- # CONFIGURING THE REST OF THE SCENE # Camera cam_data = bpy.data.cameras.new(name="MyCam") cam_ob = bpy.data.objects.new(name="MyCam", object_data=cam_data) scene.collection.objects.link(cam_ob) # instance the camera object in the scene scene.camera = cam_ob # set the active camera cam_ob.rotation_euler = numpy.radians((70, 0, 93)) cam_ob.location = (30, 2, 11) # cam_ob.data.type = 'ORTHO' # 'PERSP' # Sun light_data = bpy.data.lights.new("MyLight", "SUN") light_ob = bpy.data.objects.new(name="MyLight", object_data=light_data) scene.collection.objects.link(light_ob) # Set sun to move along with the camera. This is because we don't want # shadows changing in the viewport for crystal structures. light_ob.parent = cam_ob light_ob.location = (4, 50, 4) light_ob.rotation_euler = numpy.radians((60, 10, 150)) # Background (aka the blender "World") world = bpy.data.worlds.new(name="MyWorld") world.color = (1, 1, 1) scene.world = world # ------------------------------------------------------------------------- ## Center all objects at the origin # fails as-is. consider centering camera to lattice # bpy.ops.object.select_all(action='SELECT') # bpy.ops.view3d.snap_selected_to_cursor(use_offset=True) ## scale the whole crystal structure # bpy.ops.object.select_all(action='SELECT') # bpy.ops.transform.resize(value=(1.29349, 1.29349, 1.29349)) # update view to include all the changes we made above bpy.context.view_layer.update() # set verge3D settings # bpy.context.scene.v3d_export.use_shadows = False # bpy.context.scene.v3d_export.lzma_enabled = ( # True # add compressed files (fails for some reason) # ) # bpy.context.scene.v3d_export.aa_method = "MSAA8" # bpy.data.objects["MyCam"].data.v3d.orbit_min_distance = 15 # bpy.data.objects["MyCam"].data.v3d.orbit_max_distance = 100 # now save this to a blender file bpy.ops.wm.save_as_mainfile(filepath=filename) # export in the gltf 2.0 format (.glb file) bpy.ops.export_scene.gltf(filepath="example_filename.glb") # export for Verge3D # bpy.ops.export_scene.v3d_gltf(filepath=save_path) def main(): # get the arguments passed to blender after "--", all of which are ignored by # blender so scripts may receive their own arguments. arguments = sys.argv[sys.argv.index("--") + 1 :] # To pull out the arguments passed to the script, we need to tell the parser # what they will be in advance. parser = argparse.ArgumentParser() parser.add_argument("--lattice", dest="lattice") parser.add_argument("--sites", dest="sites") parser.add_argument("--save", dest="filename") # we can now pull out the arguments passed into the command parsed_arguments = parser.parse_args(arguments) # Run the function we defined above make_structure_blend( parsed_arguments.lattice, parsed_arguments.sites, parsed_arguments.filename, ) # This is boiler plate code that calls the main function when this script is # ran with python directly. if __name__ == "__main__": main()
python
import pytest from argus.db.db_types import NodeDescription, NemesisStatus, NemesisRunInfo from pydantic import ValidationError from dataclasses import asdict from collections import namedtuple from time import time def test_node_description(): node = NodeDescription(name="test", ip="1.1.1.1", shards=10) assert asdict(node) == { "name": "test", "ip": "1.1.1.1", "shards": 10, } def test_node_description_invalid_ip_address(): with pytest.raises(ValidationError): NodeDescription(name="test", ip="666.666.666.666", shards=10) def test_node_description_recreate_from_udt_set(): nodedescription = namedtuple("NodeDescription", ["name", "ip", "shards"]) udt = nodedescription(name="test", ip="1.1.1.1", shards=10) node = NodeDescription.from_db_udt(udt) assert asdict(node) == udt._asdict() def test_nemesis_run_info(): start_time = int(time()) nem_dict = { "class_name": "SisyphusMonkey", "name": "disrupt_me", "duration": 400, "target_node": { "name": "test", "ip": "1.1.1.1", "shards": 10, }, "status": "started", "start_time": start_time, "end_time": 0, "stack_trace": "" } node = NodeDescription(name="test", ip="1.1.1.1", shards=10) nem = NemesisRunInfo("SisyphusMonkey", "disrupt_me", 400, target_node=node, status=NemesisStatus.STARTED, start_time=start_time) assert asdict(nem) == nem_dict def test_nemesis_run_complete_success(): start_time = int(time()) node = NodeDescription(name="test", ip="1.1.1.1", shards=10) nem = NemesisRunInfo("SisyphusMonkey", "disrupt_me", 400, target_node=node, status=NemesisStatus.STARTED, start_time=start_time) nem.complete() assert nem.nemesis_status == NemesisStatus.SUCCEEDED def test_nemesis_run_complete_failure(): start_time = int(time()) node = NodeDescription(name="test", ip="1.1.1.1", shards=10) nem = NemesisRunInfo("SisyphusMonkey", "disrupt_me", 400, target_node=node, status=NemesisStatus.STARTED, start_time=start_time) traceback = "Traceback: something happened" nem.complete(traceback) assert nem.nemesis_status == NemesisStatus.FAILED and nem.stack_trace == traceback def test_nemesis_run_state_enumerated_only(): start_time = int(time()) node = NodeDescription(name="test", ip="1.1.1.1", shards=10) nem = NemesisRunInfo("SisyphusMonkey", "disrupt_me", 400, target_node=node, status=NemesisStatus.STARTED, start_time=start_time) with pytest.raises(ValueError): nem.nemesis_status = "AGJKSDHGKJSG" def test_nemesis_run_state_valid_enum_coercible(): start_time = int(time()) node = NodeDescription(name="test", ip="1.1.1.1", shards=10) nem = NemesisRunInfo("SisyphusMonkey", "disrupt_me", 400, target_node=node, status=NemesisStatus.STARTED, start_time=start_time) nem.nemesis_status = "running" assert nem.nemesis_status == NemesisStatus.RUNNING
python
from sqlalchemy import create_engine from sqlalchemy.orm import sessionmaker from genera_tablas import Club from genera_tablas import Jugador import json # se importa información del archivo configuracion from configuracion import cadena_base_datos # se genera en enlace al gestor de base de # datos # para el ejemplo se usa la base de datos # sqlite engine = create_engine(cadena_base_datos) Session = sessionmaker(bind=engine) session = Session() # leer el archivo de clubes archivo_clubs = open("data/datos_clubs.txt", "r", encoding="utf-8") clubs = archivo_clubs.readlines() # leer el archivo de jugadores archivo_jugadores = open("data/datos_jugadores.txt", "r", encoding="utf-8") jugadores = archivo_jugadores.readlines() #Se crea objetos de tipo Club for club in clubs: club_array = club.split('\n'); club_array = club_array[0].split(';'); c = Club(nombre=club_array[0], deporte=club_array[1], fundacion=club_array[2]) session.add(c) # Obtener todos los registros de la entidad Club consulta_clubs = session.query(Club).all() #Se crea objetos de tipo Jugador for jugador in jugadores: jugador_array = jugador.split('\n'); jugador_array = jugador_array[0].split(';'); # Se asigna el id del club de acuerdo al jugador for club in consulta_clubs: if(jugador_array[0] == club.nombre): id_club = club.id j = Jugador(nombre=jugador_array[3], dorsal=jugador_array[2], posicion=jugador_array[1], club_id=id_club) session.add(j) # confirmar transacciones session.commit()
python
from dagster import job, lambda_solid, pipeline, repository @lambda_solid def do_something(): return 1 @pipeline(name="extra") def extra_pipeline(): do_something() @job def extra_job(): do_something() @repository def extra(): return {"pipelines": {"extra": extra_pipeline}, "jobs": {"extra_job": extra_job}}
python
import requests from datetime import datetime from elasticsearch import Elasticsearch es = Elasticsearch(host='0.0.0.0',port=9201) r = requests.get(url).json()['res']['res'] actions = [] for i,e in enumerate(r): actions.append( { "_index": "dummy", "_type": "dum", "_id": i, "content": e } ) helpers.bulk(es, actions)
python
from .replacer import replace_text
python