max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
---|---|---|---|---|---|---|
src/haploqa/customcsvimport.py | TheJacksonLaboratory/haploqa | 3 | 12799751 | import argparse
import csv
import haploqa.mongods as mds
SAMPLE_BATCH_SIZE = 20
def import_samples(platform, geno_matrix_csv, x_matrix_csv, y_matrix_csv, sample_tags, db):
platform_chrs, snp_count_per_chr, snp_chr_indexes = mds.within_chr_snp_indices(platform, db)
curr_sample_start_index = 0
while True:
def get_sample_names(header_row):
slice = header_row[curr_sample_start_index:curr_sample_start_index + SAMPLE_BATCH_SIZE]
return [x.strip() for x in slice]
def get_data(data_row):
# the '+ 1' is because we need to shift right to accommodate the SNP ID column
slice = data_row[curr_sample_start_index + 1:curr_sample_start_index + SAMPLE_BATCH_SIZE + 1]
return [x.strip() for x in slice]
with open(geno_matrix_csv, 'r', newline='') as geno_matrix_handle, \
open(x_matrix_csv, 'r', newline='') as x_matrix_handle, \
open(y_matrix_csv, 'r', newline='') as y_matrix_handle:
# grab the current sample names
geno_matrix_table = csv.reader(geno_matrix_handle)
x_matrix_table = csv.reader(x_matrix_handle)
y_matrix_table = csv.reader(y_matrix_handle)
sample_names = get_sample_names(next(geno_matrix_table))
if not sample_names:
# we've already imported all of the samples
return
x_sample_names = get_sample_names(next(x_matrix_table))
y_sample_names = get_sample_names(next(y_matrix_table))
if sample_names != x_sample_names or sample_names != y_sample_names:
raise Exception('sample IDs do not match in files')
def make_snp_stream():
while True:
next_geno_row = next(geno_matrix_table)
next_x_row = next(x_matrix_table)
next_y_row = next(y_matrix_table)
snp_id = next_geno_row[0].strip()
if snp_id != next_x_row[0].strip() or snp_id != next_y_row[0].strip():
raise Exception('snp IDs do not match in files')
genos = get_data(next_geno_row)
xs = [float(x) for x in get_data(next_x_row)]
ys = [float(y) for y in get_data(next_y_row)]
yield snp_id, genos, xs, ys
samples = []
for sample_name in sample_names:
chr_dict = dict()
for chr in platform_chrs:
curr_snp_count = snp_count_per_chr[chr]
chr_dict[chr] = {
'xs': [float('nan')] * curr_snp_count,
'ys': [float('nan')] * curr_snp_count,
'snps': ['-'] * curr_snp_count,
}
curr_sample = {
'sample_id': mds.gen_unique_id(db),
'other_ids': [sample_name],
'platform_id': platform,
'chromosome_data': chr_dict,
'tags': sample_tags,
'unannotated_snps': [],
}
samples.append(curr_sample)
for snp_id, genos, xs, ys in make_snp_stream():
snp_chr_index = snp_chr_indexes.get(snp_id)
if snp_chr_index is not None:
snp_chr = snp_chr_index['chromosome']
snp_index = snp_chr_index['index']
for i, curr_sample in enumerate(samples):
curr_geno = genos[i].upper()
if curr_geno == 'N':
curr_geno = '-'
curr_x = xs[i]
curr_y = ys[i]
curr_sample_chr = curr_sample['chromosome_data'][snp_chr]
curr_sample_chr['xs'][snp_index] = curr_x
curr_sample_chr['ys'][snp_index] = curr_y
curr_sample_chr['snps'][snp_index] = curr_geno
for curr_sample in samples:
mds.post_proc_sample(curr_sample)
db.samples.insert_one(curr_sample)
print('inserted samples:', ', '.join(sample_names))
curr_sample_start_index += SAMPLE_BATCH_SIZE
def main():
# parse command line arguments
parser = argparse.ArgumentParser(description='import the final report with probe intensities')
parser.add_argument(
'platform',
help='the platform for the data we are importing. eg: MegaMUGA')
parser.add_argument(
'tag',
help='a tag name that should be associated with all imported samples')
parser.add_argument(
'geno_matrix_csv',
help='comma-separated genotype values matrix')
parser.add_argument(
'x_matrix_csv',
help='comma-separated X intensity values matrix')
parser.add_argument(
'y_matrix_csv',
help='comma-separated Y intensity values matrix')
args = parser.parse_args()
import_samples(
args.platform,
args.geno_matrix_csv, args.x_matrix_csv, args.y_matrix_csv,
[args.tag, args.platform],
mds.init_db())
if __name__ == '__main__':
main()
| 2.546875 | 3 |
hangoutswordcount/hangoutswordcount.py | pinaky/utilities | 0 | 12799752 | # This program reads in a Google Hangouts JSON file and produces a wordcount
# Author: <NAME>
import json # JSON to handle Google's format
import re # regular expressions
# CHANGE THIS. For linux/mac, use '/home/user/restofpath/'
basepath = 'C:\\Users\\Pinaky\\Desktop\\cesmd\\gmail_hangout\\'
# INPUT: This is the input file path
jsonPath = basepath + 'Hangouts.json'
# OUTPUT: These are the output file paths. dict = sorted alphabetical; freq = sorted by frequency
mainDictPath = basepath + 'hangoutdict.txt'
mainFreqPath = basepath + 'hangoutfreq.txt'
# This is the path to a temporary intermediate file
tempPath = basepath + 'hangouttemp.txt'
# Read in the JSON file
jsonFile = open(jsonPath, 'r', encoding='utf8')
outFile = open(tempPath,'w', encoding='utf8')
# 'p' is the variable that contains all the data
p = json.load(jsonFile)
c = 0 # Count the number of chat messages
# This loops through Google's weird JSON format and picks out the chat text
for n in p['conversation_state']:
for e in n['conversation_state']['event']:
if 'chat_message' in e:
x = e['chat_message']['message_content']
if 'segment' in x:
xtype = x['segment'][0]['type']
xtext = x['segment'][0]['text'] + u" "
if xtype == u'TEXT':
# Write out the chat text to an intermediate file
outFile.write(xtext)
c += 1
print(u'Total number of chats: {0:d}'.format(c))
jsonFile.close()
outFile.close()
# The intermediate file has been written
# Now, run a wordcount
# Read in the intermediate file
inFile = open(tempPath,'r', encoding='utf8')
s = inFile.readlines()
inFile.close()
wordcount={} # The dictionary for wordcount
for l in range(len(s)):
line = s[l].lower().strip() # strip unnecessary white space
line = re.sub(u'[^A-Za-z]+', u' ', line) # keep only alphabets and remove the rest
for word in line.split():
if word not in wordcount:
wordcount[word] = 1
else:
wordcount[word] += 1
# Sort the wordcount like a dictionary and write to file
outFile = open(mainDictPath, 'w', encoding='utf8')
for k,v in sorted(wordcount.items()):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
# Sort the wordcount in descending order of frequency and write to file
outFile = open(mainFreqPath, 'w', encoding='utf8')
for k, v in sorted(wordcount.items(), key=lambda w: w[1], reverse=True):
outFile.write(str(k))
outFile.write(u' ')
outFile.write(str(v))
outFile.write(u'\n')
outFile.close()
| 3.6875 | 4 |
tools/get_user_info.py | yanetut/live-get | 4 | 12799753 | <gh_stars>1-10
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import re
import os
import argparse
from collections import Counter
from tqdm import tqdm
def get_danmu_user_name(line):
res = re.findall(r',\d{1,2},([^,]+),[^,]*">', line)
if res:
return res[0]
return ''
def get_danmu_time(line):
res = re.findall(r'p="(\d+)\.', line)
if res:
return int(res[0])
return -1
def get_users_info(danmu_file, danmu_delta_minute):
users_info = {}
danmu_list = []
with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file:
for line in dm_file:
if re.findall(r',\d{1,2},([^,]+?),[^,]*">', line):
danmu_list.append(line)
delta_idx = 0
danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60
# init all user danmu dict
danmu_total_time = get_danmu_time(danmu_list[-1])
delta_idx_total = int(danmu_total_time / (danmu_delta_minute * 60)) + 1
for danmu in danmu_list:
user_name = get_danmu_user_name(danmu)
if not (user_name in users_info):
users_info[user_name] = [0] * delta_idx_total
for danmu in danmu_list:
danmu_time = get_danmu_time(danmu)
if danmu_time >= danmu_delta_max:
delta_idx += 1
danmu_delta_max = (delta_idx + 1) * danmu_delta_minute * 60
user_name = get_danmu_user_name(danmu)
users_info[user_name][delta_idx] += 1
return users_info
def sort_by_stay(users_info):
users_stay_info = {}
for user in users_info:
stay_count = 0
for dm_count in users_info[user]:
if not dm_count == 0:
stay_count += 1
users_stay_info[user] = stay_count
sorted_list = sorted(users_stay_info.items(), key = lambda x: x[1],
reverse=True)
return sorted_list
def sort_by_count(users_info):
users_count_info = {}
for user in users_info:
stay_count = 0
for dm_count in users_info[user]:
if not dm_count == 0:
stay_count += dm_count
users_count_info[user] = stay_count
sorted_list = sorted(users_count_info.items(), key = lambda x: x[1],
reverse=True)
return sorted_list
def print_sorted_info(users_info):
pass
def users_info(args):
if not args.delta:
delta = 30
else:
delta = args.delta
print(args.file)
users_info = get_users_info(args.file, delta)
sorted_stay_info = sort_by_stay(users_info)
sorted_count_info = sort_by_count(users_info)
# user count
print('Total user count: {count}'.format(count = len(users_info)))
# user danmu count top 50
print('user danmu count top 50:')
for i in range(50):
key = sorted_count_info[i][0]
v = sorted_count_info[i][1]
print('{idx}\t{key}: {value}'.format(idx = i + 1, key = key,
value = v))
# user stay top 50
delta_max = len(list(users_info.items())[0][1])
print('user stay top 50(max {max}):'.format(max = delta_max))
for i in range(50):
key = sorted_stay_info[i][0]
v = sorted_stay_info[i][1]
print('{idx}\t{key}: {value}'.format(idx = i, key = key, value = v))
def get_users_count_info(danmu_file):
users_count_info = {}
danmu_list = []
with open(danmu_file, 'r', encoding='utf-8', errors='ignore') as dm_file:
for line in dm_file:
if re.findall(r',\d{1,2},([^,]+?),[^,]*">', line):
danmu_list.append(line)
for danmu in danmu_list:
user_name = get_danmu_user_name(danmu)
if not (user_name in users_count_info):
users_count_info[user_name] = 0
else:
users_count_info[user_name] += 1
return users_count_info
def count_info(args):
users_count_info = Counter({})
for f in args.count:
if os.path.exists(f):
add_count_info = Counter(get_users_count_info(f))
users_count_info += add_count_info
# user count
print('Total user count: {count}'.format(count = len(users_count_info)))
# sort count
sorted_count_info = sorted(users_count_info.items(), key = lambda x: x[1],
reverse=True)
for i in range(50):
key = sorted_count_info[i][0]
v = sorted_count_info[i][1]
print('{idx}\t{key}: {value}'.format(idx = i + 1, key = key,
value = v))
with open('counts.txt', 'w', encoding='utf-8') as count_file:
idx = 0
for info in sorted_count_info:
key = info[0]
v = info[1]
count_file.write('{idx}\t{key}: {value}\n'.format(
idx = idx + 1, key = key, value = v))
idx += 1
def count_all_info(args):
users_count_info = Counter({})
danmu_dir = os.path.join(os.getcwd(), args.countall[0])
danmu_dirs = os.listdir(danmu_dir)
danmu_file_list = []
for dm_dir in danmu_dirs:
danmu_file_dir = os.path.join(danmu_dir, dm_dir)
danmu_files = os.listdir(danmu_file_dir)
for danmu_file in danmu_files:
dm_file = os.path.join(danmu_file_dir, danmu_file)
danmu_file_list.append(dm_file)
for dm_file in tqdm(danmu_file_list):
add_count_info = Counter(get_users_count_info(dm_file))
users_count_info += add_count_info
# user count
print('Total user count: {count}'.format(count = len(users_count_info)))
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Get User info from danmaku file.')
parser.add_argument('-c', '--count', type = str, nargs = '+',
help='count help')
parser.add_argument('-ca', '--countall', type = str, nargs = '+',
help='count help')
parser.add_argument('-f', '--file', type = str, help='danmaku help')
parser.add_argument('-d', '--delta', type = int, help='delta minute help')
args = parser.parse_args()
if args.count:
count_info(args)
elif args.file:
users_info(args)
elif args.countall:
count_all_info(args)
| 2.5 | 2 |
Scripts/Obsolete/Markov.py | DLasher95/TransEmote | 0 | 12799754 | <gh_stars>0
import random
import mido
from mido import MidiFile
# what things to store?
# random choice
# https://stackoverflow.com/questions/4859292/how-to-get-a-random-value-from-dictionary-in-python
notes = []
#random.choice(notes)
# dictionaries
# https://www.w3schools.com/python/python_dictionaries.asp
# { note : recorded_data[] }
channel_notes = {}
note_velocities = {}
note_neighbors = {}
note_times = {}
note_successors = {}
def train(midi):
for i, msg in enumerate(midi):
try:
if not notes.__contains__(msg.note):
# initialize lists
notes.append(msg.note)
note_velocities[msg.note] = []
note_successors[msg.note] = []
note_times[msg.note] = []
# add data
note_velocities[msg.note].append(msg.velocity)
note_times[msg.note].append(msg.time)
note_successors[msg.note].append(midi[i+1].note)
except:
i=0
print(f'Notes: {len(notes)}')
print(f'Velocity keys: {len(note_velocities.keys())}')
print(f'Time keys: {len(note_times.keys())}')
print(f'Successors keys: {len(note_successors.keys())}') | 3 | 3 |
kvs/client/python/setup.py | saurav-c/fluent | 1,164 | 12799755 | <gh_stars>1000+
from distutils.core import setup
import os
from setuptools.command.install import install
class InstallWrapper(install):
def run(self):
# compile the relevant protobufs
self.compile_proto()
# Run the standard PyPi copy
install.run(self)
# remove the compiled protobufs
self.cleanup()
def compile_proto(self):
# compile the protobufs
os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +
'kvs.proto')
os.system('cd anna && protoc -I=../../../../include/proto --python_out=. ' +
'functions.proto')
def cleanup(self):
os.system('rm anna/kvs_pb2.py')
setup(
name='Anna',
version='0.1',
packages=['anna', ],
license='Apache v2',
long_description='Client for the Anna KVS',
install_requires=['zmq', 'protobuf'],
cmdclass={'install': InstallWrapper}
)
| 2 | 2 |
botc/commands/abilities/tb/learn.py | BlueLenz/Blood-on-the-Clocktower-Storyteller-Discord-Bot | 1 | 12799756 | """Learn command"""
import botutils
import discord
import traceback
import json
from discord.ext import commands
from botc import check_if_is_player, check_if_is_night, check_if_dm, RoleCannotUseCommand, \
check_if_player_really_dead, check_if_can_learn, PlayerParser, AbilityForbidden, \
NotAPlayer, BOTCUtils, DeadOnlyCommand, NotDawn, NotDMChannel, check_if_is_dawn
with open('botutils/bot_text.json') as json_file:
language = json.load(json_file)
error_str = language["system"]["error"]
with open('botc/game_text.json') as json_file:
documentation = json.load(json_file)
class Learn(commands.Cog, name = documentation["misc"]["abilities_cog"]):
"""BoTC in-game commands cog
Learn command - used by ravenkeeper
"""
def __init__(self, client):
self.client = client
def cog_check(self, ctx):
"""Check performed on all commands of this cog.
Must be a non-fleaved player to use these commands.
"""
return check_if_is_player(ctx) # Registered non-quit player -> NotAPlayer
# ---------- LEARN COMMAND (Ravenkeeper) ----------------------------------------
@commands.command(
pass_context = True,
name = "learn",
hidden = False,
brief = documentation["doc"]["learn"]["brief"],
help = documentation["doc"]["learn"]["help"],
description = documentation["doc"]["learn"]["description"]
)
@commands.check(check_if_is_dawn) # Correct phase -> NotNight
@commands.check(check_if_dm) # Correct channel -> NotDMChannel
@commands.check(check_if_player_really_dead) # Player dead -> DeadOnlyCommand
@commands.check(check_if_can_learn) # Correct character -> RoleCannotUseCommand
async def learn(self, ctx, *, learned: PlayerParser()):
"""Learn command
usage: learn <player> and <player> and...
characters: ravenkeeper
"""
player = BOTCUtils.get_player_from_id(ctx.author.id)
await player.role.ego_self.register_learn(player, learned)
@learn.error
async def learn_error(self, ctx, error):
emoji = documentation["cmd_warnings"]["x_emoji"]
# Incorrect character -> RoleCannotUseCommand
if isinstance(error, RoleCannotUseCommand):
return
# If it passed all the checks but raised an error in the character class
elif isinstance(error, AbilityForbidden):
error = getattr(error, 'original', error)
await ctx.send(error)
# Non-registered or quit player -> NotAPlayer
elif isinstance(error, NotAPlayer):
return
# Incorrect channel -> NotDMChannel
elif isinstance(error, NotDMChannel):
return
# Incorrect argument -> commands.BadArgument
elif isinstance(error, commands.BadArgument):
return
# Incorrect phase -> NotNight
elif isinstance(error, NotDawn):
try:
await ctx.author.send(documentation["cmd_warnings"]["dawn_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Player not dead -> DeadOnlyCommand
elif isinstance(error, DeadOnlyCommand):
try:
await ctx.author.send(documentation["cmd_warnings"]["dead_only"].format(ctx.author.mention, emoji))
except discord.Forbidden:
pass
# Missing argument -> commands.MissingRequiredArgument
elif isinstance(error, commands.MissingRequiredArgument):
player = BOTCUtils.get_player_from_id(ctx.author.id)
msg = player.role.ego_self.emoji + " " + player.role.ego_self.instruction + " " + player.role.ego_self.action
try:
await ctx.author.send(msg)
except discord.Forbidden:
pass
else:
try:
raise error
except Exception:
await ctx.send(error_str)
await botutils.log(botutils.Level.error, traceback.format_exc())
def setup(client):
client.add_cog(Learn(client))
| 2.625 | 3 |
piecrust/sources/interfaces.py | ludovicchabant/PieCrust2 | 43 | 12799757 |
class IPreparingSource(object):
""" Defines the interface for a source whose pages can be created by the
`chef prepare` command.
"""
def setupPrepareParser(self, parser, app):
raise NotImplementedError()
def createContent(self, args):
raise NotImplementedError()
class InteractiveField(object):
""" A field to display in the administration web UI.
"""
TYPE_STRING = 0
TYPE_INT = 1
def __init__(self, name, field_type, default_value):
self.name = name
self.field_type = field_type
self.default_value = default_value
class IInteractiveSource(object):
""" A content source that a user can interact with in the administration
web UI.
"""
def getInteractiveFields(self):
raise NotImplementedError()
| 2.5 | 2 |
roulette.py | velzerat/lb-bot | 10 | 12799758 | from random import randrange
from film import film_embed
from api import api_call
import os
async def random_embed():
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(__location__, "films.txt"), "r", encoding="utf8", errors="ignore")
film = random_line(f)
f.close()
return await film_embed(film)
def random_line(afile, default=None):
line = default
for i, aline in enumerate(afile, start=1):
if randrange(i) == 0: # random int [0..i)
line = aline
return line
| 2.875 | 3 |
kubetools/kubernetes/api.py | EDITD/kubetools | 5 | 12799759 | from time import sleep
from kubernetes import client, config
from kubernetes.client.rest import ApiException
from kubetools.constants import MANAGED_BY_ANNOTATION_KEY
from kubetools.exceptions import KubeBuildError
from kubetools.settings import get_settings
def get_object_labels_dict(obj):
return obj.metadata.labels or {}
def get_object_annotations_dict(obj):
return obj.metadata.annotations or {}
def get_object_name(obj):
if isinstance(obj, dict):
return obj['metadata']['name']
return obj.metadata.name
def is_kubetools_object(obj):
if get_object_annotations_dict(obj).get(MANAGED_BY_ANNOTATION_KEY) == 'kubetools':
return True
def _get_api_client(env):
return config.new_client_from_config(context=env)
def _get_k8s_core_api(env):
api_client = _get_api_client(env)
return client.CoreV1Api(api_client=api_client)
def _get_k8s_apps_api(env):
api_client = _get_api_client(env)
return client.AppsV1Api(api_client=api_client)
def _get_k8s_batch_api(env):
api_client = _get_api_client(env)
return client.BatchV1Api(api_client=api_client)
def _object_exists(api, method, namespace, obj):
try:
if namespace:
getattr(api, method)(
namespace=namespace,
name=get_object_name(obj),
)
else:
getattr(api, method)(
name=get_object_name(obj),
)
except ApiException as e:
if e.status == 404:
return False
raise
return True
def _wait_for(function, name='object'):
settings = get_settings()
sleeps = 0
while True:
if function():
return
sleep(settings.WAIT_SLEEP_TIME)
sleeps += 1
if sleeps > settings.WAIT_MAX_SLEEPS:
raise KubeBuildError(f'Timeout waiting for {name} to be ready')
def _wait_for_object(*args):
return _wait_for(lambda: _object_exists(*args) is True)
def _wait_for_no_object(*args):
return _wait_for(lambda: _object_exists(*args) is False)
def namespace_exists(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
return _object_exists(k8s_core_api, 'read_namespace', None, namespace_obj)
def list_namespaces(env):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespace().items
def create_namespace(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_namespace = k8s_core_api.create_namespace(
body=namespace_obj,
)
_wait_for_object(k8s_core_api, 'read_namespace', None, namespace_obj)
return k8s_namespace
def update_namespace(env, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_namespace = k8s_core_api.patch_namespace(
name=get_object_name(namespace_obj),
body=namespace_obj,
)
return k8s_namespace
def delete_namespace(env, namespace, namespace_obj):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespace(
name=get_object_name(namespace_obj),
)
_wait_for_no_object(k8s_core_api, 'read_namespace', None, namespace_obj)
def list_pods(env, namespace):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespaced_pod(namespace=namespace).items
def delete_pod(env, namespace, pod):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespaced_pod(
name=get_object_name(pod),
namespace=namespace,
)
_wait_for_no_object(k8s_core_api, 'read_namespaced_pod', namespace, pod)
def list_replica_sets(env, namespace):
k8s_apps_api = _get_k8s_apps_api(env)
return k8s_apps_api.list_namespaced_replica_set(namespace=namespace).items
def delete_replica_set(env, namespace, replica_set):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_apps_api.delete_namespaced_replica_set(
name=get_object_name(replica_set),
namespace=namespace,
)
_wait_for_no_object(k8s_apps_api, 'read_namespaced_replica_set', namespace, replica_set)
def list_services(env, namespace):
k8s_core_api = _get_k8s_core_api(env)
return k8s_core_api.list_namespaced_service(namespace=namespace).items
def delete_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_core_api.delete_namespaced_service(
name=get_object_name(service),
namespace=namespace,
)
_wait_for_no_object(k8s_core_api, 'read_namespaced_service', namespace, service)
def service_exists(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
return _object_exists(k8s_core_api, 'read_namespaced_service', namespace, service)
def create_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_service = k8s_core_api.create_namespaced_service(
body=service,
namespace=namespace,
)
_wait_for_object(k8s_core_api, 'read_namespaced_service', namespace, service)
return k8s_service
def update_service(env, namespace, service):
k8s_core_api = _get_k8s_core_api(env)
k8s_service = k8s_core_api.patch_namespaced_service(
name=get_object_name(service),
body=service,
namespace=namespace,
)
return k8s_service
def list_deployments(env, namespace):
k8s_apps_api = _get_k8s_apps_api(env)
return k8s_apps_api.list_namespaced_deployment(namespace=namespace).items
def delete_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_apps_api.delete_namespaced_deployment(
name=get_object_name(deployment),
namespace=namespace,
)
_wait_for_no_object(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)
def deployment_exists(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
return _object_exists(k8s_apps_api, 'read_namespaced_deployment', namespace, deployment)
def create_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_deployment = k8s_apps_api.create_namespaced_deployment(
body=deployment,
namespace=namespace,
)
wait_for_deployment(env, namespace, k8s_deployment)
return k8s_deployment
def update_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
k8s_deployment = k8s_apps_api.patch_namespaced_deployment(
name=get_object_name(deployment),
body=deployment,
namespace=namespace,
)
wait_for_deployment(env, namespace, k8s_deployment)
return k8s_deployment
def wait_for_deployment(env, namespace, deployment):
k8s_apps_api = _get_k8s_apps_api(env)
def check_deployment():
d = k8s_apps_api.read_namespaced_deployment(
name=get_object_name(deployment),
namespace=namespace,
)
if d.status.ready_replicas == d.status.replicas:
return True
_wait_for(check_deployment, get_object_name(deployment))
def list_jobs(env, namespace):
k8s_batch_api = _get_k8s_batch_api(env)
return k8s_batch_api.list_namespaced_job(namespace=namespace).items
def is_running(job):
conditions = job.status.conditions
if conditions is None:
return True
complete = any(condition.type == 'Complete' for condition in job.status.conditions)
return not complete
def list_running_jobs(env, namespace):
jobs = list_jobs(env, namespace)
return [job for job in jobs if is_running(job)]
def list_complete_jobs(env, namespace):
jobs = list_jobs(env, namespace)
return [job for job in jobs if not is_running(job)]
valid_propagation_policies = ["Orphan", "Background", "Foreground"]
def delete_job(env, namespace, job, propagation_policy=None):
if propagation_policy and propagation_policy not in valid_propagation_policies:
raise KubeBuildError(f"Propagation policy must be one of {valid_propagation_policies}")
args = {}
if propagation_policy:
args['propagation_policy'] = propagation_policy
k8s_batch_api = _get_k8s_batch_api(env)
k8s_batch_api.delete_namespaced_job(
name=get_object_name(job),
namespace=namespace,
**args,
)
_wait_for_no_object(k8s_batch_api, 'read_namespaced_job', namespace, job)
def create_job(env, namespace, job, wait_for_completion=True):
k8s_batch_api = _get_k8s_batch_api(env)
k8s_job = k8s_batch_api.create_namespaced_job(
body=job,
namespace=namespace,
)
if wait_for_completion:
wait_for_job(env, namespace, k8s_job)
return k8s_job
def wait_for_job(env, namespace, job):
k8s_batch_api = _get_k8s_batch_api(env)
def check_job():
j = k8s_batch_api.read_namespaced_job(
name=get_object_name(job),
namespace=namespace,
)
if j.status.succeeded == j.spec.completions:
return True
_wait_for(check_job, get_object_name(job))
| 1.9375 | 2 |
jenkins/modules/jjb_afs/jjb_afs/afs.py | cwolferh/project-config | 0 | 12799760 | <reponame>cwolferh/project-config
# Copyright 2016 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from jenkins_jobs.errors import MissingAttributeError
def afs_publisher(parser, xml_parent, data):
for attr in ['site', 'source', 'target']:
if attr not in data:
raise MissingAttributeError(attr)
| 1.429688 | 1 |
kqueen/gunicorn.py | LaudateCorpus1/kqueen | 140 | 12799761 | <filename>kqueen/gunicorn.py
from kqueen.config import current_config
from prometheus_client import multiprocess
import multiprocessing
import os
app_config = current_config()
bind = "{host}:{port}".format(
host=app_config.get('KQUEEN_HOST'),
port=app_config.get('KQUEEN_PORT'),
)
timeout = 180
workers = multiprocessing.cpu_count() * 2 + 1
worker_class = 'gthread'
# check for prometheus settings
if 'prometheus_multiproc_dir' not in os.environ:
raise Exception('Variable prometheus_multiproc_dir is required')
def child_exit(server, worker):
multiprocess.mark_process_dead(worker.pid)
| 2.265625 | 2 |
applemusicAPI.py | SeanCBlue652/swap-stream-backend | 0 | 12799762 | import requests
import json
import jwt
import cryptography
#ploads = {'Authorization': 'Bearer '}
#r = requests.get('https://api.music.apple.com/v1/me/library/playlists')
#print(r.headers)
#print(r.text)
#print(r.json())
import applemusicpy
secret_key = ''
key_id = '74G4697BU4'
team_id = 'QTM38LJQ3P'
am = applemusicpy.AppleMusic(secret_key, key_id, team_id)
results = am.search('<NAME>', types=['albums'], limit=5)
for item in results['results']['albums']['data']:
print(item['attributes']['name']) | 2.890625 | 3 |
tests/test_utils.py | MerleLiuKun/python-twitter | 53 | 12799763 | <filename>tests/test_utils.py
"""
Utils tests
"""
import pytest
from pytwitter.error import PyTwitterError
from pytwitter.utils.validators import enf_comma_separated
from pytwitter.utils.convertors import conv_type
def test_comma_separated():
value_is_none = enf_comma_separated(name="none", value="")
assert value_is_none is None
value_is_str = enf_comma_separated(name="str", value="id1,id2")
assert value_is_str == "id1,id2"
value_is_list = enf_comma_separated(name="array", value=["id1", "id2"])
assert value_is_list == "id1,id2"
value_is_tuple = enf_comma_separated(name="tuple", value=("id1", "id2"))
assert value_is_tuple == "id1,id2"
with pytest.raises(PyTwitterError) as ex:
enf_comma_separated(name="other", value={1, 2, 3}) # noqa
assert "comma-separated" in ex.value.message # noqa
def test_conv_type():
with pytest.raises(PyTwitterError) as e:
conv_type("limit", int, None)
assert "limit" in e.value.message
| 2.671875 | 3 |
stdlib2-src/dist-packages/quodlibet/util/copool.py | ch1huizong/Scode | 0 | 12799764 | <reponame>ch1huizong/Scode<filename>stdlib2-src/dist-packages/quodlibet/util/copool.py
# Copyright 2006 <NAME>, <NAME>
# 2014 <NAME>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation
"""Manage a pool of routines using Python iterators."""
from gi.repository import GLib
class _Routine(object):
def __init__(self, pool, func, funcid, priority, timeout, args, kwargs):
self.priority = priority
self.timeout = timeout
self._source_id = None
def wrap(func, funcid, args, kwargs):
for value in func(*args, **kwargs):
yield True
pool.remove(funcid)
yield False
self.source_func = wrap(func, funcid, args, kwargs).next
@property
def paused(self):
"""If the routine is currently running"""
return self._source_id is None
def step(self):
"""Raises StopIteration if the routine has nothing more to do"""
return self.source_func()
def resume(self):
"""Resume, if already running do nothing"""
if not self.paused:
return
if self.timeout:
self._source_id = GLib.timeout_add(
self.timeout, self.source_func, priority=self.priority)
else:
self._source_id = GLib.idle_add(
self.source_func, priority=self.priority)
def pause(self):
"""Pause, if already paused, do nothing"""
if self.paused:
return
GLib.source_remove(self._source_id)
self._source_id = None
class CoPool(object):
def __init__(self):
self.__routines = {}
def add(self, func, *args, **kwargs):
"""Register a routine to run in GLib main loop.
func should be a function that returns a Python iterator (e.g.
generator) that provides values until it should stop being called.
Optional Keyword Arguments:
priority -- priority to run at (default GLib.PRIORITY_LOW)
funcid -- mutex/removal identifier for this function
timeout -- use timeout_add (with given timeout) instead of idle_add
(in milliseconds)
Only one function with the same funcid can be running at once.
Starting a new function with the same ID will stop the old one. If
no funcid is given, the function itself is used. The funcid must
be usable as a hash key.
"""
funcid = kwargs.pop("funcid", func)
if funcid in self.__routines:
remove(funcid)
priority = kwargs.pop("priority", GLib.PRIORITY_LOW)
timeout = kwargs.pop("timeout", None)
print_d("Added copool function %r with id %r" % (func, funcid))
routine = _Routine(self, func, funcid, priority, timeout, args, kwargs)
self.__routines[funcid] = routine
routine.resume()
def _get(self, funcid):
if funcid in self.__routines:
return self.__routines[funcid]
raise ValueError("no pooled routine %r" % funcid)
def remove(self, funcid):
"""Stop a registered routine."""
routine = self._get(funcid)
routine.pause()
del self.__routines[funcid]
print_d("Removed copool function id %r" % funcid)
def remove_all(self):
"""Stop all running routines."""
for funcid in self.__routines.keys():
self.remove(funcid)
def pause(self, funcid):
"""Temporarily pause a registered routine."""
routine = self._get(funcid)
routine.pause()
print_d("Paused copool function id %r" % funcid)
def pause_all(self):
"""Temporarily pause all registered routines."""
for funcid in self.__routines.keys():
self.pause(funcid)
def resume(self, funcid):
"""Resume a paused routine."""
routine = self._get(funcid)
routine.resume()
print_d("Resumed copool function id %r" % funcid)
def step(self, funcid):
"""Force this function to iterate once."""
routine = self._get(funcid)
return routine.step()
# global instance
_copool = CoPool()
add = _copool.add
pause = _copool.pause
pause_all = _copool.pause_all
remove = _copool.remove
remove_all = _copool.remove_all
resume = _copool.resume
step = _copool.step
| 2.53125 | 3 |
study-hall/__main__.py | matt-ketk/study-hall | 0 | 12799765 | import sys
from .classmodule import MyCla
| 1.132813 | 1 |
fyp/api/tests/serializers/test_login_serializer.py | Fanner487/fyp-django | 0 | 12799766 | <filename>fyp/api/tests/serializers/test_login_serializer.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from api import serializers
class LoginSerializerTest(TestCase):
"""
Tests all parameters of LoginSerializer
"""
def setUp(self):
self.user = User.objects.create_user(username="testuser", email="<EMAIL>", password="<PASSWORD>")
def test_login_success(self):
data = {
'username': 'testuser',
'password': '<PASSWORD>'
}
serializer = serializers.LoginSerializer(data=data)
self.assertTrue(serializer.is_valid())
def test_login_wrong_username(self):
data = {
'username': 'NOTtestuser',
'password': '<PASSWORD>'
}
serializer = serializers.LoginSerializer(data=data)
self.assertFalse(serializer.is_valid())
def test_login_wrong_password(self):
data = {
'username': 'testuser',
'password': '<PASSWORD>'
}
serializer = serializers.LoginSerializer(data=data)
self.assertFalse(serializer.is_valid())
def test_login_null_username(self):
data = {
'password': '<PASSWORD>'
}
serializer = serializers.LoginSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(len(serializer.errors['username']), 1)
def test_login_null_password(self):
data = {
'username': 'testuser'
}
serializer = serializers.LoginSerializer(data=data)
self.assertFalse(serializer.is_valid())
self.assertEqual(len(serializer.errors['password']), 1)
| 2.765625 | 3 |
py/path/svn/testing/svntestbase.py | woodrow/pyoac | 1 | 12799767 | <filename>py/path/svn/testing/svntestbase.py
import sys
import py
from py import path, test, process
from py.__.path.testing.fscommon import CommonFSTests, setuptestfs
from py.__.path.svn import cache, svncommon
mypath = py.magic.autopath()
repodump = mypath.dirpath('repotest.dump')
def getsvnbin():
svnbin = py.path.local.sysfind('svn')
if svnbin is None:
py.test.skip("svn binary not found")
return svnbin
# make a wc directory out of a given root url
# cache previously obtained wcs!
#
def getrepowc(reponame='basetestrepo', wcname='wc'):
repo = py.test.ensuretemp(reponame)
wcdir = py.test.ensuretemp(wcname)
if not repo.listdir():
#assert not wcdir.check()
repo.ensure(dir=1)
py.process.cmdexec('svnadmin create "%s"' %
svncommon._escape_helper(repo))
py.process.cmdexec('svnadmin load -q "%s" <"%s"' %
(svncommon._escape_helper(repo), repodump))
print "created svn repository", repo
wcdir.ensure(dir=1)
wc = py.path.svnwc(wcdir)
if py.std.sys.platform == 'win32':
repo = '/' + str(repo).replace('\\', '/')
wc.checkout(url='file://%s' % repo)
print "checked out new repo into", wc
else:
print "using repository at", repo
wc = py.path.svnwc(wcdir)
return ("file://%s" % repo, wc)
def save_repowc():
repo, wc = getrepowc()
repo = py.path.local(repo[len("file://"):])
assert repo.check()
savedrepo = repo.dirpath('repo_save')
savedwc = wc.dirpath('wc_save')
repo.copy(savedrepo)
wc.localpath.copy(savedwc.localpath)
return savedrepo, savedwc
def restore_repowc((savedrepo, savedwc)):
repo, wc = getrepowc()
print repo
print repo[len("file://"):]
repo = py.path.local(repo[len("file://"):])
print repo
assert repo.check()
# repositories have read only files on windows
#repo.chmod(0777, rec=True)
repo.remove()
wc.localpath.remove()
savedrepo.move(repo)
savedwc.localpath.move(wc.localpath)
# create an empty repository for testing purposes and return the url to it
def make_test_repo(name="test-repository"):
repo = py.test.ensuretemp(name)
try:
py.process.cmdexec('svnadmin create %s' % repo)
except:
repo.remove()
raise
if sys.platform == 'win32':
repo = '/' + str(repo).replace('\\', '/')
return py.path.svnurl("file://%s" % repo)
class CommonSvnTests(CommonFSTests):
def setup_method(self, meth):
bn = meth.func_name
for x in 'test_remove', 'test_move', 'test_status_deleted':
if bn.startswith(x):
self._savedrepowc = save_repowc()
def teardown_method(self, meth):
x = getattr(self, '_savedrepowc', None)
if x is not None:
restore_repowc(x)
del self._savedrepowc
def test_propget(self):
url = self.root.join("samplefile")
value = url.propget('svn:eol-style')
assert value == 'native'
def test_proplist(self):
url = self.root.join("samplefile")
res = url.proplist()
assert res['svn:eol-style'] == 'native'
def test_info(self):
url = self.root.join("samplefile")
res = url.info()
assert res.size > len("samplefile") and res.created_rev >= 0
def test_log_simple(self):
url = self.root.join("samplefile")
logentries = url.log()
for logentry in logentries:
assert logentry.rev == 1
assert hasattr(logentry, 'author')
assert hasattr(logentry, 'date')
class CommonCommandAndBindingTests(CommonSvnTests):
def test_trailing_slash_is_stripped(self):
# XXX we need to test more normalizing properties
url = self.root.join("/")
assert self.root == url
#def test_different_revs_compare_unequal(self):
# newpath = self.root.new(rev=1199)
# assert newpath != self.root
def test_exists_svn_root(self):
assert self.root.check()
#def test_not_exists_rev(self):
# url = self.root.__class__(self.rooturl, rev=500)
# assert url.check(exists=0)
#def test_nonexisting_listdir_rev(self):
# url = self.root.__class__(self.rooturl, rev=500)
# raises(py.error.ENOENT, url.listdir)
#def test_newrev(self):
# url = self.root.new(rev=None)
# assert url.rev == None
# assert url.strpath == self.root.strpath
# url = self.root.new(rev=10)
# assert url.rev == 10
#def test_info_rev(self):
# url = self.root.__class__(self.rooturl, rev=1155)
# url = url.join("samplefile")
# res = url.info()
# assert res.size > len("samplefile") and res.created_rev == 1155
# the following tests are easier if we have a path class
def test_repocache_simple(self):
repocache = cache.RepoCache()
repocache.put(self.root.strpath, 42)
url, rev = repocache.get(self.root.join('test').strpath)
assert rev == 42
assert url == self.root.strpath
def test_repocache_notimeout(self):
repocache = cache.RepoCache()
repocache.timeout = 0
repocache.put(self.root.strpath, self.root.rev)
url, rev = repocache.get(self.root.strpath)
assert rev == -1
assert url == self.root.strpath
def test_repocache_outdated(self):
repocache = cache.RepoCache()
repocache.put(self.root.strpath, 42, timestamp=0)
url, rev = repocache.get(self.root.join('test').strpath)
assert rev == -1
assert url == self.root.strpath
def _test_getreporev(self):
""" this test runs so slow it's usually disabled """
old = cache.repositories.repos
try:
_repocache.clear()
root = self.root.new(rev=-1)
url, rev = cache.repocache.get(root.strpath)
assert rev>=0
assert url == svnrepourl
finally:
repositories.repos = old
#cache.repositories.put(svnrepourl, 1200, 0)
| 2.28125 | 2 |
MagniPy/MassModels/Sersic.py | dangilman/MagniPy | 2 | 12799768 | import numpy as np
class Sersic:
def b(self,n):
return 1.9992*n - 0.3271 + 4*(405*n)**-1
def kappa(self,x, y, n_sersic, r_eff, k_eff, q, center_x=0, center_y=0):
bn = self.b(n_sersic)
r = (x**2+y**2*q**-2)**0.5
return k_eff*np.exp(-bn*((r*r_eff**-1)**(n_sersic**-1)-1))
| 2.734375 | 3 |
client_handler.py | Walabot-Projects/Walabot-WebSocketServer | 2 | 12799769 | <reponame>Walabot-Projects/Walabot-WebSocketServer
'''
This is a asynchronous function to handle with client requirements.
This client handler is with a strong bond to NewWalabotAppTemplate.py requirements.
Basic functions that must be:
start()
get_data()
stop()
Feel free to add your uwn function just note that you also support client-side commands.
The information is transmitted in json.
The protocol: {"Command": "", "Message": "", Params":[data]}
Two examples are attached : Breathing and Tracker
Follow TODO to add a new project
'''
import asyncio
import websockets
import json
import socket
from imp import load_source
import WalabotBreathing as breathing
import WalabotTracker as tracker
#####################################
# TODO import WalabotMyApp as my_app
#####################################
class WalabotHandler:
def __init__(self):
self.initialize = False
self.stop = False
async def commandsHandler(self ,client,path):
app = None
try:
print("Client connected..")
print(str(self.stop))
while not self.stop:
data = json.loads(await client.recv())
command = data['Command']
if command == 'BREATHING':
print(command)
app = breathing
elif command == "TRACKER":
print(command)
app = tracker
############################ ADD YOUR APP HERE ##################################
# TODO elif command == "MY_APP":
# print(command)
# app = my_app
##################################################################################
if not self.initialize:
self.initialize = True
app.start()
elif command == 'STOP':
print(command)
app.stop()
self.initialize=False
await client.send(json.dumps({"Command": "EXIT"}))
elif command == 'DATA':
try:
if self.initialize:
data = app.get_data()
res = {"Command": "DATA", "Params":[data] , "Message": ""}
await client.send(json.dumps(res))
else:
await client.send(json.dumps({"Command": "EXIT","Message": "App not initialized"}))
except:
res = {"Command": "ERROR", "Message": "App is NOT defined"}
await client.send(json.dumps(res))
else:
res = {"Command": "ERROR", "Message": "Unknown Command"}
await client.send(json.dumps(res))
except Exception as e:
print("Connection problem" + str(e))
res = {"Command": "ERROR", "Message": str(e)}
await client.send(json.dumps(res))
| 2.546875 | 3 |
app_settings/config.py | arcticle/app-settings | 0 | 12799770 | import os, re, collections
from attrdict import AttrDict
from app_settings import file_search
from app_settings import FileFactory
__all__ = ["Config"]
class Config(object):
def __init__(self, files=None, dir=None, default=None, filter=None, **kwargs):
self._validate(files, dir, default)
self._create_files(files, dir, filter, default, **kwargs)
self._load_files()
def save(self, config_name):
if config_name in self._files:
self._save_config(config_name)
def save_all(self):
for _name in self._files:
self.save(_name)
@property
def files(self):
return list(self._files.keys())
def __getitem__(self, key):
return self._get_config(key)
def _create_files(self, files, dir, filter, default, **kwargs):
self._files = {}
files = self._get_files(files, dir, filter)
for f in files:
_file = FileFactory.create(f, default, **kwargs)
_name = self._transform_invalid_name(_file.name)
self._files[_name] = _file
def _get_files(self, files, dir, filter):
if isinstance(files, str):
return [files]
if isinstance(files, collections.Iterable):
return files
if dir:
return file_search(dir, filter, recursive=True)
return []
def _load_files(self):
for _name, _file in self._files.items():
self._add_config(_name, _file.load())
def _get_config(self, config_name):
return getattr(self, config_name)
def _add_config(self, config_name, config):
setattr(self, config_name, AttrDict(config))
def _save_config(self, name):
config_dict = dict(self._get_config(name))
self._files[name].flush(config_dict)
def _transform_invalid_name(self, filename):
return re.sub(r"[^A-Za-z]", "_", filename)
def _validate(self, files, dir, resolve_type):
if not files and not dir:
raise ValueError("No files or search directory provided.")
if files:
if isinstance(files, collections.Iterable):
for f in files:
assert isinstance(f, str)
else:
assert isinstance(files, str)
if dir:
assert isinstance(dir, str)
assert os.path.isdir(dir)
| 2.59375 | 3 |
tvrenamer/processors/__init__.py | shad7/tvrenamer | 1 | 12799771 | <reponame>shad7/tvrenamer
"""Result processors plugins"""
from tvrenamer.processors import base
def load():
"""Load all processor plugins that are enabled.
:returns: priority sorted processor plugins (high to low)
:rtype: list
"""
return base.EnabledExtensionManager()
| 1.945313 | 2 |
airbyte-integrations/connectors/source-freshdesk/unit_tests/conftest.py | heap/airbyte | 22 | 12799772 | #
# Copyright (c) 2022 Airbyte, Inc., all rights reserved.
#
import pytest
from requests.auth import HTTPBasicAuth
@pytest.fixture(name="config")
def config_fixture():
return {"domain": "test.freshdesk.com", "api_key": "secret_api_key", "requests_per_minute": 50, "start_date": "2002-02-10T22:21:44Z"}
@pytest.fixture(name="authenticator")
def authenticator_fixture(config):
return HTTPBasicAuth(username=config["api_key"], password="<PASSWORD>")
| 1.921875 | 2 |
gridding/JG_pchip_interpolation/pchipOceanSlices.py | BillMills/argo-database | 2 | 12799773 | import pandas as pd
import pdb
import requests
import numpy as np
import os, sys
import xarray as xr
from datetime import datetime, timedelta
import logging
from scipy.interpolate import PchipInterpolator
import argparse
from collections import OrderedDict, defaultdict
class PchipOceanSlices(object):
def __init__(self, pLevelRange, basin=None, exceptBasin={None}, starttdx=None, appLocal=False):
self.appLocal = appLocal
self.datesSet = self.get_dates_set()
self.exceptBasin = exceptBasin
self.starttdx = starttdx
self.reduceMeas = False #removes excess points from db query
self.qcKeep = set([1,2]) # used to filter bad positions and dates
self.basin = basin # indian ocean only Set to None otherwise
self.presLevels = [ 2.5, 10. , 20. , 30. , 40. , 50. , 60. , 70. , 80. ,
90. , 100. , 110. , 120. , 130. , 140. , 150. , 160. , 170. ,
182.5, 200. , 220. , 240. , 260. , 280. , 300. , 320. , 340. ,
360. , 380. , 400. , 420. , 440. , 462.5, 500. , 550. , 600. ,
650. , 700. , 750. , 800. , 850. , 900. , 950. , 1000. , 1050. ,
1100. , 1150. , 1200. , 1250. , 1300. , 1350. , 1412.5, 1500. , 1600. ,
1700. , 1800. , 1900. , 1975., 2000.]
self.pLevelRange = pLevelRange
self.presRanges = self.make_rg_pres_ranges()
self.reduce_presLevels_and_presRanges()
@staticmethod
def get_dates_set(period=30):
"""
create a set of dates split into n periods.
period is in days.
"""
n_rows = int(np.floor(365/period))
datesSet = []
for year in range(2007, 2019):
yearSet = np.array_split(pd.date_range(str(year)+'-01-01', str(year)+'-12-31'), n_rows)
datesSet = datesSet + yearSet
keepEnds = lambda x: [x[0].strftime(format='%Y-%m-%d'), x[-1].strftime(format='%Y-%m-%d')]
datesSet = list(map(keepEnds, datesSet))
return datesSet
@staticmethod
def get_ocean_slice(startDate, endDate, presRange, intPres, basin=None, appLocal=None, reduceMeas=False):
'''
query horizontal slice of ocean for a specified time range
startDate and endDate should be a string formated like so: 'YYYY-MM-DD'
presRange should comprise of a string formatted to be: '[lowPres,highPres]'
Try to make the query small enough so as to not pass the 15 MB limit set by the database.
'''
if appLocal:
baseURL = 'http://localhost:3000'
else:
baseURL = 'https://argovis.colorado.edu'
baseURL += '/gridding/presSliceForInterpolation/'
startDateQuery = '?startDate=' + startDate
endDateQuery = '&endDate=' + endDate
presRangeQuery = '&presRange=' + presRange
intPresQuery = '&intPres=' + str(intPres)
url = baseURL + startDateQuery + endDateQuery + presRangeQuery + intPresQuery
if basin:
basinQuery = '&basin=' + basin
url += basinQuery
url += '&reduceMeas=' + str(reduceMeas).lower()
resp = requests.get(url)
# Consider any status other than 2xx an error
if not resp.status_code // 100 == 2:
raise ValueError("Error: Unexpected response {}".format(resp))
profiles = resp.json()
return profiles
def reject_profile(self, profile):
if not profile['position_qc'] in self.qcKeep:
reject = True
elif not profile['date_qc'] in self.qcKeep:
reject = True
elif len(profile['measurements']) < 2: # cannot be interpolated
reject = True
elif profile['BASIN'] in self.exceptBasin: # ignores basins
reject=True
else:
reject = False
return reject
@staticmethod
def make_profile_interpolation_function(x,y):
'''
creates interpolation function
df is a dataframe containing columns xLab and yLab
'''
try:
f = PchipInterpolator(x, y, axis=1, extrapolate=False)
except Exception as err:
pdb.set_trace()
logging.warning(err)
raise Exception
return f
@staticmethod
def make_pres_ranges(presLevels):
"""
Pressure ranges are based off of depths catagory
surface: at 2.5 dbar +- 2.5
shallow: 10 to 182.5 dbar +- 5
medium: 200 to 462.5 dbar +- 15
deep: 500 to 1050 dbar +- 30
abbysal: 1100 to 1975 dbar +- 60
"""
stringifyArray = lambda x: str(x).replace(' ', '')
surfaceRange = [[presLevels[0] - 2.5, presLevels[0]+ 2.5]]
shallowRanges = [ [x - 5, x + 5] for x in presLevels[1:19] ]
mediumRanges = [ [x - 15, x + 15] for x in presLevels[19:33] ]
deepRanges = [ [x - 30, x + 30] for x in presLevels[33:45] ]
abbysalRanges = [ [x - 60, x + 60] for x in presLevels[45:] ]
presRanges = surfaceRange + shallowRanges + mediumRanges + deepRanges + abbysalRanges
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def make_rg_pres_ranges():
'''
uses pressure ranges defined in RG climatology
'''
rgFilename = '/home/tyler/Desktop/RG_ArgoClim_Temp.nc'
rg = xr.open_dataset(rgFilename, decode_times=False)
bnds = rg['PRESSURE_bnds']
presRanges = bnds.values.tolist()
stringifyArray = lambda x: str(x).replace(' ', '')
presRanges = [stringifyArray(x) for x in presRanges]
return presRanges
@staticmethod
def save_iDF(iDf, filename, tdx):
iDf.date = pd.to_datetime(iDf.date)
iDf.date = iDf.date.apply(lambda d: d.strftime("%d-%b-%Y %H:%M:%S"))
if not iDf.empty:
with open(filename, 'a') as f:
if tdx==0:
iDf.to_csv(f, header=True)
else:
iDf.to_csv(f, header=False)
@staticmethod
def record_to_array(measurements, xLab, yLab):
x = []
y = []
for meas in measurements:
x.append(meas[xLab])
y.append(meas[yLab])
return x, y
@staticmethod
def sort_list(x, y):
'''sort x based off of y'''
xy = zip(x, y)
ys = [y for _, y in sorted(xy)]
xs = sorted(x)
return xs, ys
@staticmethod
def unique_idxs(seq):
'''gets unique, non nan and non -999 indexes'''
tally = defaultdict(list)
for idx,item in enumerate(seq):
tally[item].append(idx)
dups = [ (key,locs) for key,locs in tally.items() ]
dups = [ (key, locs) for key, locs in dups if not np.isnan(key) or key not in {-999, None, np.NaN} ]
idxs = []
for dup in sorted(dups):
idxs.append(dup[1][0])
return idxs
def format_xy(self, x, y):
'''prep for interpolation'''
x2, y2 = self.sort_list(x, y)
try:
x_dup_idx = self.unique_idxs(x2)
xu = [x2[idx] for idx in x_dup_idx]
yu = [y2[idx] for idx in x_dup_idx]
# remove none -999 and none
y_nan_idx =[idx for idx,key in enumerate(yu) if not key in {-999, None, np.NaN} ]
except Exception as err:
pdb.set_trace()
print(err)
xu = [xu[idx] for idx in y_nan_idx]
yu = [yu[idx] for idx in y_nan_idx]
return xu, yu
def make_interpolated_profile(self, profile, xintp, xLab, yLab):
meas = profile['measurements']
if len(meas) == 0:
return None
if not yLab in meas[0].keys():
return None
x, y = self.record_to_array(meas, xLab, yLab)
x, y = self.format_xy(x, y)
if len(x) < 2: # pchip needs at least two points
return None
f = self.make_profile_interpolation_function(x, y)
rowDict = profile.copy()
del rowDict['measurements']
rowDict[xLab] = xintp
if len(meas) == 1 and meas[xLab][0] == xintp:
yintp = meas[yLab][0]
else:
yintp = f(xintp)
rowDict[yLab] = yintp
return rowDict
def make_interpolated_df(self, profiles, xintp, xLab='pres', yLab='temp'):
'''
make a dataframe of interpolated values set at xintp for each profile
xLab: the column name for the interpolation input x
yLab: the column to be interpolated
xintp: the values to be interpolated
'''
outArray = []
for profile in profiles:
rowDict = self.make_interpolated_profile(profile, xintp, xLab, yLab)
if rowDict:
outArray.append(rowDict)
outDf = pd.DataFrame(outArray)
outDf = outDf.rename({'_id': 'profile_id'}, axis=1)
outDf = outDf.dropna(subset=[xLab, yLab], how='any', axis=0)
logging.debug('number of rows in df: {}'.format(outDf.shape[0]))
logging.debug('number of profiles interpolated: {}'.format(len(outDf['profile_id'].unique())))
return outDf
def intp_pres(self, xintp, presRange):
if self.basin:
iTempFileName = 'iTempData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
iPsalFileName = 'iPsalData_pres_{0}_basin_{1}.csv'.format(xintp, self.basin)
else:
iTempFileName = 'iTempData_pres_{}.csv'.format(xintp)
iPsalFileName = 'iPsalData_pres_{}.csv'.format(xintp)
start = datetime.now()
logging.debug('number of dates:{}'.format(len(self.datesSet)))
for tdx, dates in enumerate(self.datesSet):
if tdx < self.starttdx:
continue
logging.debug('starting interpolation at time index: {}'.format(tdx))
startDate, endDate = dates
try:
sliceProfiles = self.get_ocean_slice(startDate, endDate, presRange, xintp, self.basin, self.appLocal, self.reduceMeas)
except Exception as err:
logging.warning('profiles not recieved: {}'.format(err))
continue
logging.debug('xintp: {0} on tdx: {1}'.format(xintp, tdx))
logging.debug('number of profiles found in interval: {}'.format(len(sliceProfiles)))
try:
iTempDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'temp')
except Exception as err:
logging.warning('error when interpolating temp')
logging.warning(err)
continue
try:
iPsalDf = self.make_interpolated_df(sliceProfiles, xintp, 'pres', 'psal')
except Exception as err:
pdb.set_trace()
logging.warning('error when interpolating psal')
logging.warning(err)
continue
self.save_iDF(iTempDf, iTempFileName, tdx)
self.save_iDF(iPsalDf, iPsalFileName, tdx)
logging.debug('interpolation complete at time index: {}'.format(tdx))
timeTick = datetime.now()
logging.debug(timeTick.strftime(format='%Y-%m-%d %H:%M'))
dt = timeTick-start
logging.debug('completed run for psal {0} running for: {1}'.format(xintp, dt))
def reduce_presLevels_and_presRanges(self):
'''
reduces presLevels and pres ranges to those specified in pLevelRange
'''
self.startIdx = self.presLevels.index(self.pLevelRange[0])
self.endIdx = self.presLevels.index(self.pLevelRange[1])
self.presLevels = self.presLevels[ self.startIdx:self.endIdx ]
self.presRanges = self.presRanges[ self.startIdx:self.endIdx ]
def main(self):
logging.debug('inside main loop')
logging.debug('running pressure level ranges: {}'.format(self.pLevelRange))
for idx, presLevel in enumerate(self.presLevels):
xintp = presLevel
presRange = self.presRanges[idx]
self.intp_pres(xintp, presRange)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--maxl", help="start on pressure level", type=float, nargs='?', default=2000)
parser.add_argument("--minl", help="end on pressure level", type=float, nargs='?', default=1975)
parser.add_argument("--basin", help="filter this basin", type=str, nargs='?', default=None)
parser.add_argument("--starttdx", help="start time index", type=int, nargs='?', default=0)
parser.add_argument("--logFileName", help="name of log file", type=str, nargs='?', default='pchipOceanSlices.log')
myArgs = parser.parse_args()
pLevelRange = [myArgs.minl, myArgs.maxl]
basin = myArgs.basin
starttdx = myArgs.starttdx
#idxStr = str(myArgs.minl) + ':' + str(myArgs.maxl)
#logFileName = 'pchipOceanSlices{}.log'.format(idxStr)
FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(format=FORMAT,
filename=myArgs.logFileName,
level=logging.DEBUG)
logging.debug('Start of log file')
startTime = datetime.now()
pos = PchipOceanSlices(pLevelRange, basin=basin, exceptBasin={}, starttdx=starttdx, appLocal=True)
pos.main()
endTime = datetime.now()
dt = endTime - startTime
logging.debug('end of log file for pressure level ranges: {}'.format(pLevelRange))
dtStr = 'time to complete: {} seconds'.format(dt.seconds)
print(dtStr)
logging.debug(dtStr) | 2.359375 | 2 |
keystone/managers/grant.py | admiyo/keystone | 0 | 12799774 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2011 OpenStack LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Role-Grant manager module """
import logging
import keystone.backends.api as api
logger = logging.getLogger(__name__) # pylint: disable=C0103
class Manager(object):
def __init__(self):
self.driver = api.ROLE
#
# Role-Grant Methods
#
def rolegrant_get_page(self, user_id, tenant_id, marker, limit):
""" Get one page of role grant list """
return self.driver.rolegrant_get_page(user_id, tenant_id, marker,
limit)
def rolegrant_get_page_markers(self, user_id, tenant_id, marker, limit):
""" Calculate pagination markers for role grants list """
return self.driver.rolegrant_get_page_markers(user_id, tenant_id,
marker, limit)
def list_global_roles_for_user(self, user_id):
return self.driver.list_global_roles_for_user(user_id)
def list_tenant_roles_for_user(self, user_id, tenant_id):
return self.driver.list_tenant_roles_for_user(user_id, tenant_id)
def rolegrant_list_by_role(self, role_id):
return self.driver.rolegrant_list_by_role(role_id)
def rolegrant_get_by_ids(self, user_id, role_id, tenant_id):
return self.driver.rolegrant_get_by_ids(user_id, role_id, tenant_id)
def rolegrant_delete(self, grant_id):
return self.driver.rolegrant_delete(grant_id)
def list_role_grants(self, role_id, user_id, tenant_id):
return self.driver.list_role_grants(role_id, user_id, tenant_id)
| 1.929688 | 2 |
server/website/website/db/oracle/target_objective.py | yangdsh/ottertune | 3 | 12799775 | #
# OtterTune - target_objective.py
#
# Copyright (c) 2017-18, Carnegie Mellon University Database Group
#
import logging
from website.models import DBMSCatalog, MetricCatalog
from website.types import DBMSType
from ..base.target_objective import (BaseTargetObjective, BaseThroughput, LESS_IS_BETTER,
MORE_IS_BETTER)
LOG = logging.getLogger(__name__)
class CustomDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='custom_db_time', pprint='Custom DB Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
total_wait_time = 0.
# dba_hist db_time will be 0 after cleaning if & only if it does not exist before cleaning
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
for name, value in metrics.items():
if has_dba_hist and 'dba_hist_' not in name:
continue
if 'db cpu' in name:
total_wait_time += float(value)
elif 'time_waited_micro_fg' in name:
wait_time = float(value)
elif name.endswith('wait_class'):
# wait_class#:
# 0: Other; 1: Application; 2: Configuration; 3: Administrative; 4: Concurrency;
# 5: Commit; 6: Idle; 7: Network; 8: User I/O; 9: System I/O
if value == 'Idle':
wait_time = 0
total_wait_time += wait_time
return total_wait_time / 1000000.
class NormalizedDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='db_time', pprint='Normalized DB Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
# This target objective is designed for Oracle v12.2.0.1.0
dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')
self.default_values = {}
for metric in MetricCatalog.objects.filter(dbms=dbms):
self.default_values[metric.name] = metric.default
def reload_default_metrics(self):
dbms = DBMSCatalog.objects.get(type=DBMSType.ORACLE, version='172.16.31.10.0')
self.default_values = {}
for metric in MetricCatalog.objects.filter(dbms=dbms):
self.default_values[metric.name] = metric.default
def compute(self, metrics, observation_time):
extra_io_metrics = ["log file sync"]
not_io_metrics = ["read by other session"]
total_wait_time = 0.
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
for name, value in metrics.items():
if has_dba_hist and 'dba_hist_' not in name:
continue
if 'db cpu' in name:
total_wait_time += float(value)
elif 'time_waited_micro_fg' in name:
default_wait_time = float(self.default_values[name])
wait_time = float(value)
elif 'total_waits_fg' in name:
default_total_waits = float(self.default_values[name])
total_waits = float(value)
elif name.endswith('wait_class'):
if value == 'Idle':
wait_time = 0
elif value in ('User I/O', 'System I/O') or \
any(n in name for n in extra_io_metrics):
if not any(n in name for n in not_io_metrics):
if default_total_waits == 0:
average_wait = 0
else:
average_wait = default_wait_time / default_total_waits
wait_time = total_waits * average_wait
total_wait_time += wait_time
return total_wait_time / 1000000.
class RawDBTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='raw_db_time', pprint='Raw DB Time',
unit='seconds', short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
has_dba_hist = metrics['global.dba_hist_sys_time_model.db time'] > 0
if has_dba_hist:
return metrics['global.dba_hist_sys_time_model.db time'] / 1000000.
return metrics['global.sys_time_model.db time'] / 1000000.
class TransactionCounter(BaseTargetObjective):
def __init__(self):
super().__init__(name='transaction_counter', pprint='Number of commits and rollbacks',
unit='transactions', short_unit='txn', improvement=MORE_IS_BETTER)
def compute(self, metrics, observation_time):
num_txns = sum(metrics[ctr] for ctr in ('global.sysstat.user commits',
'global.sysstat.user rollbacks'))
return num_txns
class ElapsedTime(BaseTargetObjective):
def __init__(self):
super().__init__(name='elapsed_time', pprint='Elapsed Time', unit='seconds',
short_unit='s', improvement=LESS_IS_BETTER)
def compute(self, metrics, observation_time):
return observation_time
target_objective_list = tuple((DBMSType.ORACLE, target_obj) for target_obj in [ # pylint: disable=invalid-name
BaseThroughput(transactions_counter=('global.sysstat.user commits',
'global.sysstat.user rollbacks')),
CustomDBTime(),
NormalizedDBTime(),
RawDBTime(),
TransactionCounter(),
ElapsedTime(),
])
| 2.015625 | 2 |
python3/leetcodepy/regular_expression_matching.py | qianbinbin/leetcode | 4 | 12799776 | """
Given an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*' where:
'.' Matches any single character.
'*' Matches zero or more of the preceding element.
The matching should cover the entire input string (not partial).
Example 1:
Input: s = "aa", p = "a"
Output: false
Explanation: "a" does not match the entire string "aa".
Example 2:
Input: s = "aa", p = "a*"
Output: true
Explanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes "aa".
Example 3:
Input: s = "ab", p = ".*"
Output: true
Explanation: ".*" means "zero or more (*) of any character (.)".
Example 4:
Input: s = "aab", p = "c*a*b"
Output: true
Explanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches "aab".
Example 5:
Input: s = "mississippi", p = "mis*is*p*."
Output: false
Constraints:
0 <= s.length <= 20
0 <= p.length <= 30
s contains only lowercase English letters.
p contains only lowercase English letters, '.', and '*'.
It is guaranteed for each appearance of the character '*', there will be a previous valid character to match.
"""
class Solution1:
def __match(self, s: str, p: str) -> bool:
if p[0] == '\0':
return s[0] == '\0'
if s[0] == '\0':
return p[1] == '*' and self.isMatch(s, p[2:])
if p[1] == '*':
if p[0] == '.' or p[0] == s[0]:
return self.__match(s, p[2:]) or self.__match(s[1:], p)
return self.__match(s, p[2:])
return (p[0] == '.' or p[0] == s[0]) and self.__match(s[1:], p[1:])
def isMatch(self, s: str, p: str) -> bool:
return self.__match(s + '\0', p + '\0')
class Solution2:
def isMatch(self, s: str, p: str) -> bool:
m, n = len(s), len(p)
if n == 0:
return m == 0
dp = [[False] * (n + 1) for _ in range(m + 1)]
dp[0][0] = True
for j in range(2, n + 1, 2):
if p[j - 1] == '*':
dp[0][j] = True
else:
break
if m > 0:
dp[1][1] = p[0] == '.' or p[0] == s[0]
for i in range(1, m + 1):
for j in range(2, n + 1):
if p[j - 1] != '*':
dp[i][j] = (p[j - 1] == '.' or p[j - 1] == s[i - 1]) and dp[i - 1][j - 1]
else:
dp[i][j] = dp[i][j - 2] or ((p[j - 2] == '.' or p[j - 2] == s[i - 1]) and dp[i - 1][j])
return dp[m][n]
| 4.34375 | 4 |
generate_prediction/bio.py | yangyxt/QBiC-Pred | 7 | 12799777 | import itertools
import pandas as pd
import numpy as np
# all permutations are already reverse-deleted
# all sequences are represented in binary
nucleotides = {'A':0,'C':1,'G':2,'T':3}
numtonuc = {0:'A',1:'C',2:'G',3:'T'}
complement = {0:3,3:0,1:2,2:1}
def window(fseq, window_size):
for i in range(len(fseq) - window_size + 1):
yield fseq[i:i+window_size]
# return the first or the last number representation
def seqpos(kmer,last):
return 1 << (1 + 2 * kmer) if last else 1 << 2 * kmer;
def seq_permutation(seqlen):
return (range(seqpos(seqlen,False),seqpos(seqlen,True)))
def gen_nonreversed_kmer(k):
nonrevk = list()
for i in range(seqpos(k,False),seqpos(k,True)):
if i <= revcomp(i):
nonrevk.append(i)
return nonrevk
def itoseq(seqint):
if type(seqint) is not int:
return seqint
seq = ""
mask = 3
copy = int(seqint) # prevent changing the original value
while(copy) != 1:
seq = numtonuc[copy&mask] + seq
copy >>= 2
if copy == 0:
print("Could not find the append-left on the input sequence")
return 0
return seq
def seqtoi(seq,gappos=0,gapsize=0):
# due to various seqlengths, this project always needs append 1 to the left
binrep = 1
gaps = range(gappos,gappos+gapsize)
for i in range(0,len(seq)):
if i in gaps:
continue
binrep <<= 2
binrep |= nucleotides[seq[i]]
return binrep
def revcomp(seqbin):
rev = 1
mask = 3
copy = int(seqbin)
while copy != 1:
rev <<= 2
rev |= complement[copy&mask]
copy >>= 2
if copy == 0:
print("Could not find the append-left on the input sequence")
return 0
return rev
def revcompstr(seq):
rev = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
return "".join([rev[base] for base in reversed(seq)])
def insert_pos(seqint,base,pos): # pos is position from the right
return ((seqint << 2) & ~(2**(2*pos+2)-1)) | ((seqint & 2**(2*pos)-1) | (nucleotides[base] << pos*2))
#return (seqint << 2) | (seqint & 2**pos-1) & ~(3 << (pos*2)) | (nucleotides[base] << pos*2)
# this function already counts without its reverse complement,
# i.e. oligfreq + reverse merge in the original R code
# Input: panda list and kmer length
# Output: oligonucleotide count with reverse removed
def nonr_olig_freq(seqtbl,kmer,nonrev_list,gappos=0,gapsize=0):
# with the gapmodel, our model become gapsize + kmer
gapmer = kmer+gapsize
# separator, since this is binary, the number is counted from the right
rightseparator = kmer-gappos
leftseparator = rightseparator+gapsize
olig_df = {k: [0] * len(seqtbl) for k in nonrev_list} # use dictionary first to avoid slow indexing from panda data frame
for i in range(0,len(seqtbl)): #22s for 3000
mask = (4**gapmer)-1
cpy = int(seqtbl[i])
while cpy > (4**gapmer)-1:
# gap calculation here
cur = cpy & mask
right = cur & ((4**rightseparator)-1)
left = (cur >> 2*leftseparator) << 2*rightseparator
gappedseqint = left | right
r = (1<<(2*kmer))|gappedseqint # append 1
rc = revcomp(r)
if r > rc:
r = rc
# 392secs with loc,434 secs with the regression. R time, 10secs for allocation, 3.97mins for linreg
# with 'at', only 23secs! -- 254secs total for 6mer
olig_df[r][i] += 1
cpy >>= 2
return pd.DataFrame(olig_df)
| 2.59375 | 3 |
je_auto_control/osx/listener/__init__.py | JE-Chen/Python_JEAutoControl | 9 | 12799778 | from je_auto_control.osx.listener import *
| 1.046875 | 1 |
utility/mac_mapper.py | jcal329/cs158b-Storm | 3 | 12799779 | <gh_stars>1-10
from pysnmp.hlapi import *
from parse_config import config
import sys
#Reference SNMAP-WALK from:https://www.google.com/search?q=snmp+walk+solarwinds&oq=snmp+walk&aqs=chrome.5.69i57j0l5.2209j0j4&sourceid=chrome&ie=UTF-8
#.env file need to have VLAN number & Switch address
switches = config['switches']
vlan = config['private_number']
def decToHexAddress(arg):
arr = arg.split(".")
output = ''
for i in range(len(arr)):
if i == len(arr) - 1:
output = output + hex(int(arr[i])).replace('0x', '').upper()
else:
output = output + hex(int(arr[i])).replace('0x', '').upper() + ":"
return output
def mac_mapper(file):
output = []
mac_addresses = []
for s in switches:
host = s['switch_address']
for (errorIndication, errorStatus, errorIndex, varBinds) in nextCmd(SnmpEngine(),
CommunityData('private@'+str(vlan)), UdpTransportTarget((host, 161),timeout = 2, retries = 5), ContextData(),
ObjectType(ObjectIdentity('1.3.6.1.2.1.17.4.3.1.2')), lexicographicMode=False):
if errorIndication:
print(errorIndication, file=sys.stderr)
break
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'),
file=sys.stderr)
break
else:
data = []
for varBind in varBinds:
element = str(varBind)
element = element.replace("SNMPv2-SMI::mib-2.17.4.3.1.2.", "").replace(" = ", ";")
splitArr = element.split(";")
mac_address = element.replace(splitArr[0],decToHexAddress(splitArr[0]))
mac_addresses.append(mac_address)
data.append(host + ',' + mac_address)
print("['SWITCH ADDRESS,MAC ADDRESS;PORT']")
print(data)
output.extend(data)
text = ""
for j in output:
text += j + '\n'
with open('mac_mapper.txt', "w") as f:
f.write(text)
if file != None:
with open(file, "w") as f:
for address in mac_addresses:
f.write(address+"\n")
if __name__ == "__main__":
mac_mapper()
| 2.328125 | 2 |
define_jogadores.py | LuizArthur135/Joga-da-velha | 0 | 12799780 |
jogadores = {
"jogador_1":{
"name": "",
"escolha": ""
},
"jogador_2":{
"name": "",
"escolha": ""
}
}
def inicia_jogadores():
jogadores["jogador_1"]["name"] = str(input('Qual e seu nome? '))
jogadores["jogador_2"]["name"] = str(input('Nome da pessoa que vai jogar com vc: '))
jogadores["jogador_1"]["escolha"] = str(input(f'Qual você quer {jogadores["jogador_1"]["name"]}? [O/X] ')).upper()
valida_escolha(jogadores["jogador_1"]["escolha"])
if jogadores["jogador_1"]["escolha"] == "X":
jogadores["jogador_2"]["escolha"] = "O"
else:
jogadores["jogador_2"]["escolha"] = "X"
return jogadores
def valida_escolha(escolha):
if escolha != "O" and escolha != "X":
while jogadores["jogador_1"]["escolha"] != "O" and jogadores["jogador_1"]["escolha"] != "X":
jogadores["jogador_1"]["escolha"] = str(input("ERRO: tente novamente, Qual você quer? [O/X] ")).upper() | 3.921875 | 4 |
core/apps/kubeops_api/migrations/0063_auto_20200223_0557.py | r4b3rt/KubeOperator | 3 | 12799781 | <filename>core/apps/kubeops_api/migrations/0063_auto_20200223_0557.py
# Generated by Django 2.2.10 on 2020-02-23 05:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0002_userprofile'),
('kubeops_api', '0062_auto_20200221_0510'),
]
operations = [
migrations.AddField(
model_name='item',
name='users',
field=models.ManyToManyField(to='users.UserProfile'),
),
migrations.AlterField(
model_name='clusterhealthhistory',
name='date_type',
field=models.CharField(choices=[('HOUR', 'HOUR'), ('DAY', 'DAY')], default='HOUR', max_length=255),
),
]
| 1.484375 | 1 |
tests/detail/jupyter_app/test_format_deployments_info.py | intdata-bsc/idact | 5 | 12799782 | from idact.detail.jupyter_app.format_deployments_info import \
format_deployments_info
def test_format_deployments_info():
formatted = format_deployments_info(cluster_name='cluster1')
assert formatted == (
"\nTo access the allocation and notebook deployments from cluster,"
" you can use the following snippet.\n"
"You may need to change the cluster name if it's different in"
" the target environment.\n"
"----------------\n"
"from idact import show_cluster\n"
"cluster = show_cluster('cluster1')\n"
"deployments = cluster.pull_deployments()\n"
"nodes = deployments.nodes[-1]\n"
"nb = deployments.jupyter_deployments[-1]\n"
"----------------")
| 2.390625 | 2 |
Red_Color_E_Paper_HAT/e_paper_2in7_color_air.py | sbcshop/2.7-E-Paper-HAT | 1 | 12799783 | # Red color e-paper
import sys
import os
import lib_2inch7_ec_paper
import time
from PIL import Image,ImageDraw,ImageFont
from pms_a003 import Sensor
air_mon = Sensor()
air_mon.connect_hat(port="/dev/ttyS0", baudrate=9600)
while True:
try:
e_paper = lib_2inch7_ec_paper.Ec_Paper()
e_paper.init()
# Drawing on the image
black_image = Image.new('1', (e_paper.width, e_paper.height), 255) # 255: clear the frame
red_image = Image.new('1', (e_paper.width, e_paper.height), 255) #
font28 = ImageFont.truetype(('images/Font.ttc'), 28)
font18 = ImageFont.truetype(('images/Font.ttc'), 18)
# Drawing on the Horizontal image
horizontal_black_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126
horizontal_red_image = Image.new('1', (e_paper.height, e_paper.width), 255) # 298*126
values = air_mon.read()
print("PMS 1 value is {}".format(values.pm10_cf1))
print("PMS 2.5 value is {}".format(values.pm25_cf1))
print("PMS 10 value is {}".format(values.pm100_cf1))
drawblack = ImageDraw.Draw(horizontal_black_image)
drawred = ImageDraw.Draw(horizontal_red_image)
drawred.text((10, 0), 'AIR MONITORING', font = font28, fill = 0)
drawblack.text((10, 40), 'PMS 1 value = ', font = font28, fill = 0)
drawblack.text((10, 80), 'PMS 2.5 value = ', font = font28, fill = 0)
drawblack.text((10, 120), 'PMS 10 value =', font = font28, fill = 0)
drawred.text((210, 40), str(values.pm10_cf1), font = font28, fill = 0)
drawred.text((210, 80), str(values.pm25_cf1), font = font28, fill = 0)
drawred.text((210, 120),str(values.pm100_cf1), font = font28, fill = 0)
e_paper.display(e_paper.buffer(horizontal_black_image),e_paper.buffer(horizontal_red_image))
time.sleep(4)
e_paper.Clear_screen()
#e_paper.exit()
except KeyboardInterrupt:
epd_2in7_color_air.e_paperconfig.module_exit()
exit()
| 2.828125 | 3 |
firmware/bait/git_rev_macro.py | dkadish/BioAcousticIndexTool | 1 | 12799784 | <gh_stars>1-10
#!/Users/davk/anaconda/envs/platformio_setup python
import subprocess
revision = subprocess.check_output(["git", "rev-parse", "HEAD"]).strip()
print('-DPIO_SRC_REV="%s"' % revision) | 1.828125 | 2 |
Project_cmNotice/expressChecker.py | corkine/pyBook | 10 | 12799785 | <gh_stars>1-10
#/usr/bin/env python3
# -*- coding:utf8 -*-
import requests
import lxml.etree
import json
import pickle,traceback,shelve,time,sys
__title__ = "快递更新查询程序"
__version__ = '0.0.2'
__log__ = """
0.0.1 2018年3月4日
0.0.2 2018-03-07 修正了数据尚未更新的404错误,返回空列表
"""
class ExpressChecker:
"""检查快递更新状态的类
"""
def __init__(self,metadata):
self.metadata = metadata
def getInfo(self, rss=""):
'''从网络API获取信息'''
response = requests.get(rss)
content = response.content
xml = lxml.etree.XML(content)
clist = xml.xpath('//channel/item/title')
rlist = []
for x in clist:
if "中英字幕" in x.text:
rlist.append(x.text)
return rlist
def checkExpress(self,number='12045301',type_='auto',appcode='4ec8774252c'):
try:
import urllib.request
host = 'http://jisukdcx.market.alicloudapi.com'
path = '/express/query'
method = 'GET'
appcode = appcode
querys = 'number=' + number + '&type=' + type_
bodys = {}
url = host + path + '?' + querys
request = urllib.request.Request(url)
request.add_header('Authorization', 'APPCODE ' + appcode)
response = urllib.request.urlopen(request)
content = response.read()
if (content):
dict = content.decode('utf-8','ignore')
return 1,'查询成功',dict
else:
return 0,'错误,未返回数据',''
except:
return 0, "错误,未返回数据", ""
def checkData(self,meta):
import json
for x in range(3):
code,_,result = self.checkExpress(number=str(meta.info).strip())
if code == 1: break
if code == 0: return [],[],1
result = json.loads(result)
ilist = result["result"]["list"]
company = result["result"]["type"]
issign = result["result"]["issign"]
wlist = []
plist = []
p2list = []
for item in ilist:
suminfo = str(item["time"] + "::::::" + item["status"]).strip()
wlist.append(suminfo)
for item in wlist:
if not item in meta.data:
plist.append(item)
for item in plist:
p2list.append("[快递状态更新]"+" %s:\n"%meta.name + item.split("::::::")[0]+ " " +item.split("::::::")[1] + " | %s:%s"%(company,meta.info))
if len(p2list) > 0:
p2list = p2list[0]
if isinstance(p2list,str):
p2list = [p2list]
#因为快递只需要更新最近状态即可 必须返回数组,因为需要遍历
return wlist,p2list,0 if issign == "1" else 1
if __name__ == "__main__":
from pprint import pprint
meta = {
"name":"孔夫子书籍",
"id":24322234,
"status":1,
"rate":30,
"type":"express",
"info":"5083078",
"data":[""]
}
checker = ExpressChecker(metadata=meta)
a,b,c = checker.checkData(meta=meta)
print(a,b,c) | 2.265625 | 2 |
test.py | NiklasMWeber/CreditCycleForecasting | 0 | 12799786 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 17 15:42:09 2022
@author: nikth
"""
from train import select_hatm_cv
from tools import add_time
from train import SignatureRegressionNik
from dataGeneration import GeneratorFermanianDependentMax
from sklearn.model_selection import train_test_split
#Xtrain, Ytrain, Xval, Yval = get_train_test_data(X_type, ntrain=ntrain, nval=nval, Y_type=Y_type,
# npoints=npoints, d=d, scale_X=scale_X)
dimPath = 5
nPaths = 1000
num = 101
G = GeneratorFermanianDependentMax(dimPath = dimPath,nPaths = nPaths,num = num)
G.generatePath()
X = G.X
G.generateResponse()
Y = G.Y
X_train, X_test, Y_train, Y_test = \
train_test_split(X,Y,test_size = 0.5)
Xtimetrain = add_time(X_train)
Xtimeval = add_time(X_test)
hatm = select_hatm_cv(Xtimetrain, Y_train, scaling = True)
sig_reg = SignatureRegressionNik(hatm, normalizeFeatures = True)
sig_reg.fit(Xtimetrain, Y_train)
print("val.error", sig_reg.get_loss(Xtimeval, Y_test))
print("training.error", sig_reg.get_loss(Xtimetrain, Y_train))
print("val.R", sig_reg.score(Xtimeval, Y_test))
print("training.R", sig_reg.score(Xtimetrain, Y_train))
| 2.390625 | 2 |
neuralizer/tests/test_data_process.py | BeckResearchLab/Neuralizer | 1 | 12799787 | <filename>neuralizer/tests/test_data_process.py
from __future__ import absolute_import,division,print_function
import os
import pandas as pd
import numpy as np
import numpy.testing as npt
import data_process as dp
def test_read_file():
X,Y = dp.read_file('test.tsv',["A","B"],"y")
npt.assert_equal(X,np.array[[0,1],[3,2],[4,3]])
npt.assert_equal(Y,np.array[0,1,5])
def test_data_info():
data = {"filename":"test.tsv","X_var":["A","B"],"Y_var":"y"}
X,Y,input_dim,output_dim = dp.data_info(data)
assert input_dim == 2,"Dimension of input layer is not correct"
assert output_dim == 1 , "Dimension of output layer is not correct"
| 2.9375 | 3 |
quizzes/00.organize.me/Cracking the Coding Interview/tree_search_by_stack.py | JiniousChoi/encyclopedia-in-code | 2 | 12799788 | <filename>quizzes/00.organize.me/Cracking the Coding Interview/tree_search_by_stack.py<gh_stars>1-10
#!/usr/bin/env python3
import unittest
class Tree:
def __init__(self, v):
self.v = v
self.left = None
self.right = None
def is_terminal(self):
return self.left==None and self.right==None
def preorder(root):
''' return [node.v] '''
stack = [root]
visited = set()
res = []
while stack:
node = stack[-1]
if node not in visited:
visited.add(node)
res.append(node.v)
if node.left and node.left not in visited:
stack.append(node.left)
continue
if node.right and node.right not in visited:
stack.append(node.right)
continue
stack.pop(-1)
return res
def inorder(root):
''' return [node.v] '''
stack = [root]
visited = set()
res = []
while stack:
node = stack[-1]
if node.left and node.left not in visited:
stack.append(node.left)
continue
if node not in visited:
visited.add(node)
res.append(node.v)
if node.right and node.right not in visited:
stack.append(node.right)
continue
stack.pop(-1)
return res
def postorder(root):
''' return [node.v] '''
stack = [root]
visited = set()
res = []
while stack:
node = stack[-1]
if node.left and node.left not in visited:
stack.append(node.left)
continue
if node.right and node.right not in visited:
stack.append(node.right)
continue
if node not in visited:
visited.add(node)
res.append(node.v)
stack.pop(-1)
return res
class XTest(unittest.TestCase):
def test_sample1(self):
root, left, right = Tree(0), Tree(1), Tree(2)
root.left, root.right = left, right
self.assertEqual(preorder(root), [0,1,2])
self.assertEqual(inorder(root), [1,0,2])
self.assertEqual(postorder(root), [1,2,0])
def test_dfs_and_bfs2(self):
n1, n2, n3, n4, n5 = Tree(1), Tree(2), Tree(3), Tree(4), Tree(5)
n1.left = n2
n2.left = n3
n2.right = n4
n4.right = n5
self.assertEqual(preorder(n1), [1,2,3,4,5])
self.assertEqual(inorder(n1), [3,2,4,5,1])
self.assertEqual(postorder(n1), [3,5,4,2,1])
if __name__=="__main__":
unittest.main()
| 4.09375 | 4 |
Models/Subject.py | erikhalperin/Scheduler | 1 | 12799789 | import itertools
class Subject:
__id_generator = itertools.count(0, 1)
@staticmethod
def get_all_subjects():
return ['Eng1', 'Eng2', 'Eng3', 'Eng4', 'Alg1', 'Alg2', 'Geo', 'PreC',
'Phys', 'Chem', 'SciE', 'Bio', 'Civ1', 'Civ2', 'Civ3', 'Civ4']
def __init__(self, name: str):
self.name = name
self.id = next(self.__id_generator)
| 3.234375 | 3 |
itemdb/itemdb_2_csv.py | Katorone/Astrox-Imperium | 0 | 12799790 | #!/usr/bin/python3
# More information, as well as the (non)licence can be found at: https://github.com/Katorone/Astrox-Imperium
# This script exports 2 files to a csv:
# - MOD/items/items_database.txt -> itemdb.csv
# - MOD/items/specs_database.txt -> docdb.csv
# It will also do some sanity checking, which should be useful for mod & modpack creators:
# - Each file can only contain unique IDs (the exported csv will only contain the first match)
# - Every ID between items and documents needs to be unique (the script will warn)
# - Warns when an item doesn't have a doc for crafting
# - Check if the .png for an item/doc exists
# - Orphaned documents
# Example for windows: c:\path\to\Astrox\MOD\items\
source = '/home/user/.steam/steam/steamapps/common/Astrox Imperium/Astrox Imperium_Data/MOD/items/'
itemfile = 'items_database.txt'
docfile = 'specs_database.txt'
# Delimiter to use in the exported csv
delimiter = ';'
# List of item IDs that don't have a crafting document
ignoreUncraftable = [
"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", # Resources - Raw
"11", "20", "21", "22", "23", "24", "25", "26", "27", "28", # Resources - Loot
"29", "100", "101", "103", "114", "102", "104", "109", "118", "113", # Materials
"105", "106", "107", "108", "110" , "2000", "111", "115", "112", # Materials
"121", "117", "116", "124", "119", "123", "120", "122", # Materials
"150", "151", "152", "153", "164", "155", "156", "157", "158", "168", # Components - Class A
"160", "161", "162", "163", "154", "159", "165", "166", "167", "169", # Components - Class B
"170", "200", "201", "202", "203", "204", "205", "206", "207", "208", # Components - Class C
"209", "210", "211", "212", "213", "214", "215", "216", "217", "218", # Components - Class D
"219", "2001", "2002", "2003", "2004", "2005", "2006", "2007", "2008", "2009", # Components - Class E
"2010", "2011", "2012", "2013", "2014", "2015", "2016", "2017", "2018", "2019", # Components - Class F
"2020", "2021", "2022", "2023", "2024", "2025", "2026", "2027", "2028", "2029", # Components - Class G
"2030", "2031", "2032", "2033", "2034", "2035", "2036", "2037", "2038", "2039", # Components - Class H
"2040", "2041", "2042", "2043", "2044", "2045", "2046", "2047", "2048", "2049", # Components - Class I
"2050", "2051", "2052", "2053", "2054", "2055", "2056", "2057", "2058", "2059", # Components - Class J
"2080", "2081", "2082", "400", "401", "402", # Components - Class M
"302", "300", "301", "351", "353", "350", "352", "330", "332", "331", # Trade Goods
"333", "341", "342", "340", "343", "303", "304", "305", "322", "324", # Trade Goods
"320", "321", "323", "325", "311", "310", "312", "313", "403", "404", # Trade Goods
"405", "406", "407", "408", # Trade Goods
"600", "601", "602", "603", "604", "605", "606", "607", "608", "609", # Life Support - Food
"620", "621", "622", "623", "624", "625", "626", "627", "628", "629", # Life Support - Water
"640", "641", "642", "643", "644", "645", "646", "647", "648", "649", # Life Support - Thermal
"660", "661", "662", "663", "664", "665", "666", "667", "668", "669", # Life Support - Waste
"690", "670", "671", "691", "672", "673", "692", "674", "675", "693", # Consumables
"676", "677", "700", "678", "679", "701", "680", "681", "710", "711", # Consumables
"712", "702", "703", "735", "736", "737", "738", # Consumables
]
## These settings tell the script which title it needs to look for when examining data.
header = {}
# You probably won't need to change this, unless Momo changes this in an update.
# Unique sorting key of items (items_database.txt)
header['itemId'] = '1 ITEM ID'
# Unique sorting key of documents (specs_database.txt)
header['docId'] = '1 DOC ID'
# Name of the item's image
header['itemImage'] = '6 icon image'
# Name of the document's image
header['docImage'] = '6 doc image'
# The item ID that a doc would craft:
header['docItemId'] = '9 CRAFTS ID'
### End of configuration ###
### Code starts here ###
import os
# reads data from path
def readFile(path):
fh = open(path, 'r', encoding='utf8', newline='\n')
data = fh.readlines()
fh.close()
return data
# Writes a list of data to path
def writeFile(path, dataList):
fh = open(path, 'w', encoding='utf8', newline='')
for line in dataList:
fh.write(line+'\r\n')
fh.close()
print("✔️ Finished writing: "+path)
# Takes a string and returns a list
def cleanLine(line, strip, delim):
line = line.strip()
if line == "": return line
if line[-1] == delim: line = line[0:-1]
return [x.strip(strip) for x in line.split(delim)]
# Finds the header, which is the last commented line at the start of a file
def getHeader(data):
for idx, line in enumerate(data):
if line[:2] != '//':
return data[idx-1][2:]
# Gets the index of the identifier from the header[list]
def getIdentifierIndex(header, identifier):
if identifier not in header: return -1
return header.index(identifier)
def parseFile(file, identifier, ):
lines = readFile(os.path.join(source, file))
header = cleanLine(getHeader(lines), '\t ', ';')
identifierIndex = getIdentifierIndex(header, identifier)
if identifierIndex == -1:
print("🛑 couldn't locate '"+identifier+"' in '"+source+"'")
quit()
# Parse the items, stored as item[id]
data = {}
data[delimiter+'header'+delimiter] = header # store the header for future use
doubles = {} # stores the ID that are duplicates
for line in lines:
if line[:2] == '//': continue # Ignore comments
line = cleanLine(line, '\t ', ';')
if line == "": continue # Ignore empty lines
id = line[identifierIndex]
if id in data: # Duplicate checking
doubles[id] = 2 if id not in doubles else doubles[id] + 1
else: # No duplicate, add the line.
data[id] = line
if len(doubles) > 0:
for id in doubles:
print("❌ The unique identifier '"+id+"' matched "+str(doubles[id])+" different lines.")
print("❌ Duplicates were found. The script will only use the first match per duplicate.")
print("------------------------------")
else:
print("✔️ There were no duplicate keys in: "+file)
return data
def composeCsv(data, target):
lines = []
for item in data: # data is a dictionary-type, which is guarantueed to be ordered by insertion.
joiner = '"'+delimiter+'"'
lines.append('"'+joiner.join(data[item])+'"')
writeFile(target, lines)
# Check itemData and docData for duplicate IDs
def findDuplicateEntries(fn1, data1, fn2, data2):
duplicates = {}
for id in data1.keys() & data2.keys():
if id == delimiter+'header'+delimiter: continue
duplicates[id] = 2 if id not in duplicates else duplicates[id] + 1
if len(duplicates) > 0:
for id in duplicates:
print("❌ The unique identifier '"+id+"' matched "+str(duplicates[id])+" times in "+fn1+" and "+fn2+".")
print("❌ Duplicate IDs were found across "+fn1+" and "+fn2+".")
print("------------------------------")
else:
print("✔️ There were no duplicate keys across: "+fn1+" and "+fn2+".")
# Checks that the column header[itemId] has en entry in the column header[docItemId]
def sanityCheck(items, itemHeader, docs, docsHeader):
itemHeaderIdentifier = getIdentifierIndex(items[delimiter+'header'+delimiter], itemHeader)
if itemHeaderIdentifier == -1:
print("🛑 couldn't locate '"+itemHeader+"' in findMissing(), unable to continue sanity check.")
return
docsHeaderIdentifier = getIdentifierIndex(docs[delimiter+'header'+delimiter], docsHeader)
if docsHeaderIdentifier == -1:
print("🛑 couldn't locate '"+docsHeader+"' in findMissing(), unable to continue sanity check.")
return
itemIDs = []
for i in items:
if i == delimiter+'header'+delimiter: continue
itemIDs.append(items[i][itemHeaderIdentifier])
docIDs = []
for i in docs:
if i == delimiter+'header'+delimiter: continue
docIDs.append(docs[i][docsHeaderIdentifier])
# Let's go over all items in docIDs and make sure they're unique
seen = set()
duplicates = [x for x in docIDs if x in seen or seen.add(x)]
if len(duplicates) > 0:
print("❌ The following item ID(s) have more than one crafting document: "+', '.join(duplicates))
print("------------------------------")
else:
print("✔️ All documents point to a unique item.")
# We have 2 lists of IDs, find the IDs from itemIDS that are missing in docIDs
docSet = set(docIDs)
ignoreSet = set(ignoreUncraftable)
missingDocs = [x for x in itemIDs if x not in docSet and x not in ignoreSet]
if len(missingDocs) > 0:
print("❌ The following item ID(s) do not have a crafting document: "+', '.join(missingDocs))
print(" Items that are uncraftable by design can be added to the 'ignoreUncraftable'-list in itemdb_2_csv.py")
print("------------------------------")
else:
print("✔️ All items have a crafting document attached (with "+str(len(ignoreUncraftable))+" ignored uncraftables).")
# For the orphaned check, we find docIDs that are missing in itemIDs
itemSet = set(itemIDs)
missingItems = [x for x in docIDs if x not in itemSet]
if len(missingItems) > 0:
print("❌ The following item ID(s) have a crafting document, but the item does not exist: "+', '.join(missingItems))
print("------------------------------")
else:
print("✔️ All documents have an existing item attached.")
def checkFileLinks(data, header):
headerIdentifier = getIdentifierIndex(data[delimiter+'header'+delimiter], header)
if headerIdentifier == -1:
print("🛑 couldn't locate '"+header+"' in checkFileLinks(), unable to continue sanity check.")
return
haserror = False
for i in data:
if i == delimiter+'header'+delimiter: continue
file = data[i][headerIdentifier]
if not os.path.isfile(os.path.join(source, file)):
haserror = True
print("❌ Item id '"+i+"' links to '"+file+"', which doesn't exists.")
if not haserror:
print("✔️ All files in column '"+header+"' exist.")
if __name__ == "__main__":
itemData = parseFile(itemfile, header["itemId"])
composeCsv(itemData, 'items_database.csv')
docData = parseFile(docfile, header["docId"])
composeCsv(docData, 'specs_database.csv')
# Check itemData and docData for duplicate IDs
findDuplicateEntries(itemfile, itemData, docfile, docData)
# Sanity checks:
# - Check if all items have a document
# - Check if all documents point to an existing item
# - Check if all documents point to a unique item
sanityCheck(itemData, header["itemId"], docData, header["docItemId"])
# Check if the .png for an item/doc exists
checkFileLinks(itemData, header["itemImage"])
checkFileLinks(docData, header["docImage"])
print("")
input("All done. Press enter to exit.")
| 2.640625 | 3 |
contact_forms/contact/tests/test_config.py | uktrade/dit-contact-forms | 2 | 12799791 | <reponame>uktrade/dit-contact-forms
from django.test import SimpleTestCase
from django.apps import apps
from contact.apps import ContactConfig
class ContactConfigTestCase(SimpleTestCase):
"""
Test app config
"""
def test_apps(self):
self.assertEqual(ContactConfig.name, "contact")
self.assertEqual(apps.get_app_config("contact").name, "contact")
| 2.390625 | 2 |
.k8s/scripts/delete-k8s-objects.py | fossabot/code-du-travail-numerique | 0 | 12799792 | <gh_stars>0
from subprocess import check_output
import hashlib
import os
import json
from urllib import request
# This script compares the active remote branches and active k8s tags.
# If a k8s tag doesn't match an active hashed remote branches name's, we delete all the k8s objects with this k8s tag.
github_token = os.environ["GITHUB_TOKEN"]
hash_size = int(os.environ["HASH_SIZE"])
def get_active_branches():
url = "https://api.github.com/repos/SocialGouv/code-du-travail-numerique/pulls".format(github_token)
req = request.Request(url, None, {"token": github_token})
response = request.urlopen(req)
active_branches = [branch.get("head").get("ref").encode() for branch in json.loads(response.read())]
return [
hashlib.sha1(branche).hexdigest()[:hash_size]
for branche in active_branches
]
def get_active_k8s_tags():
raw_k8s_tag_list = check_output("kubectl get pods -o go-template --template '{{range .items}}{{.metadata.labels.branch}}{{end}}'", shell=True).decode("utf-8")
k8s_tag_list = raw_k8s_tag_list.replace('<no value>','').split('cdtn-')
return [
k8s_tag
for k8s_tag in k8s_tag_list if k8s_tag
]
def delete_k8s_object(label):
k8s_object_list = ["service", "ingress", "configmap", "deployments", "statefulset", "pod"]
for k8s_object in k8s_object_list:
command_to_delete_k8s_object = ('kubectl delete '+ k8s_object +' --selector branch=cdtn-'+label)
check_output(command_to_delete_k8s_object, shell=True)
def get_k8s_tag_to_delete(active_k8s_tag_list=[], active_branch_list=[]):
k8s_tag_list_to_delete = []
active_tags = [
tag for tag in active_k8s_tag_list if tag != ""
]
deletable_tags = [
tag
for tag in active_tags
if tag not in active_branch_list
]
for tag in deletable_tags:
k8s_tag_list_to_delete.append(tag)
return k8s_tag_list_to_delete
if __name__ == '__main__':
for k8s_tag_to_delete in get_k8s_tag_to_delete(get_active_k8s_tags(), get_active_branches()):
delete_k8s_object(k8s_tag_to_delete)
print('k8s objects with label branch=cdtn-'+k8s_tag_to_delete+' have been deleted')
| 2.59375 | 3 |
hossein/contest/52544/52544.py | mhdehghan/quera-answers | 0 | 12799793 | # https://quera.ir/problemset/contest/52544
CORNER_ONE = {(0, 0), (0, 1), (0, 2), (0, 6), (0, 7), (0, 8),
(1, 0), (1, 2), (1, 6), (1, 8),
(2, 0), (2, 1), (2, 2), (2, 6), (2, 7), (2, 8),
(6, 0), (6, 1), (6, 2), (6, 6), (6, 7), (6, 8),
(7, 0), (7, 2), (7, 6), (7, 8),
(8, 0), (8, 1), (8, 2), (8, 6), (8, 7), (8, 8)}
CORNER_ZERO = {(1, 1), (1, 7),
(7, 1), (7, 7)}
result = 1
mat = [input() for i in range(9)]
result_mat = []
for line in mat:
result_mat.append([int(val) for val in line])
mat = result_mat
for line in enumerate(mat):
for in_line in enumerate(line[1]):
if (line[0], in_line[0]) in CORNER_ONE:
if in_line[1] == 0:
print(0)
exit(0)
continue
if (line[0], in_line[0]) in CORNER_ZERO:
if in_line[1] == 1:
print(0)
exit(0)
continue
if in_line[1] == 2:
result *= 2
print(result)
| 3.515625 | 4 |
setup.py | jim22k/metagraph-stellargraph | 0 | 12799794 | from setuptools import setup, find_packages
import versioneer
setup(
name="metagraph-stellargraph",
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description="Stellargraph plugins for Metagraph",
author="<NAME>.",
packages=find_packages(
include=["metagraph_stellargraph", "metagraph_stellargraph.*"]
),
include_package_data=True,
install_requires=["metagraph", "stellargraph"],
entry_points={
"metagraph.plugins": "plugins=metagraph_stellargraph.plugins:find_plugins"
},
)
| 1.140625 | 1 |
sdk/python/pulumi_aws/secretsmanager/__init__.py | pulumi-bot/pulumi-aws | 0 | 12799795 | <filename>sdk/python/pulumi_aws/secretsmanager/__init__.py
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from secret import *
from secret_version import *
from get_secret import *
from get_secret_version import *
| 1.164063 | 1 |
run_test.py | NREL/scout | 0 | 12799796 | <reponame>NREL/scout<gh_stars>0
#!/usr/bin/env python3
""" Tests for running the engine """
# Import code to be tested
import run
# Import needed packages
import unittest
import numpy
import copy
import itertools
import os
class CommonTestMeasures(object):
"""Class of common sample measures for tests.
Attributes:
sample_measure (dict): Sample residential measure #1.
sample_measure2 (dict): Sample residential measure #2.
sample_measure3 (dict): Sample commercial measure #1.
"""
def __init__(self):
self.sample_measure = {
"name": "sample measure 1",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure2 = {
"name": "sample measure 2",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": ["electricity (grid)"]},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": ["lighting"]},
"technology_type": {"primary": "supply",
"secondary": "supply"},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": ["general service (LED)"]},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure3 = {
"name": "sample measure 3 (commercial)",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["assembly"],
"fuel_type": {"primary": ["electricity"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["heating", "cooling"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["resistance heat",
"ASHP", "GSHP", "room AC"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure4 = {
"name": "sample measure 4",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["single family home"],
"fuel_type": {"primary": ["electricity (grid)"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["lighting"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["general service (CFL)"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
self.sample_measure5 = {
"name": "sample measure 5 (commercial)",
"active": 1,
"market_entry_year": None,
"market_exit_year": None,
"market_scaling_fractions": None,
"market_scaling_fractions_source": None,
"measure_type": "full service",
"structure_type": ["new", "existing"],
"climate_zone": ["AIA_CZ1", "AIA_CZ2"],
"bldg_type": ["assembly"],
"fuel_type": {"primary": ["electricity"],
"secondary": None},
"fuel_switch_to": None,
"end_use": {"primary": ["lighting"],
"secondary": None},
"technology_type": {"primary": "supply",
"secondary": None},
"technology": {"primary": ["F32T8"],
"secondary": None},
"markets": {
"Technical potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {},
"mseg_adjust": {
"contributing mseg keys and values": {},
"competed choice parameters": {},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}
}}},
"mseg_out_break": {}}}}
class CommonMethods(object):
"""Define common methods for use in all tests below."""
def dict_check(self, dict1, dict2):
"""Check the equality of two dicts.
Args:
dict1 (dict): First dictionary to be compared
dict2 (dict): Second dictionary to be compared
Raises:
AssertionError: If dictionaries are not equal.
"""
# zip() and zip_longest() produce tuples for the items
# identified, where in the case of a dict, the first item
# in the tuple is the key and the second item is the value;
# in the case where the dicts are not of identical size,
# zip_longest() will use the fill value created below as a
# substitute in the dict that has missing content; this
# value is given as a tuple to be of comparable structure
# to the normal output from zip_longest()
fill_val = ('substituted entry', 5.2)
# In this structure, k and k2 are the keys that correspond to
# the dicts or unitary values that are found in i and i2,
# respectively, at the current level of the recursive
# exploration of dict1 and dict2, respectively
for (k, i), (k2, i2) in itertools.zip_longest(sorted(dict1.items()),
sorted(dict2.items()),
fillvalue=fill_val):
# Confirm that at the current location in the dict structure,
# the keys are equal; this should fail if one of the dicts
# is empty, is missing section(s), or has different key names
self.assertEqual(k, k2)
# If the recursion has not yet reached the terminal/leaf node
if isinstance(i, dict):
# Test that the dicts from the current keys are equal
self.assertCountEqual(i, i2)
# Continue to recursively traverse the dict
self.dict_check(i, i2)
# At the terminal/leaf node, formatted as a numpy array
# (for input uncertainty test cases)
elif isinstance(i, numpy.ndarray):
self.assertTrue(type(i) == type(i2))
for x in range(0, len(i)):
self.assertAlmostEqual(i[x], i2[x], places=2)
# At the terminal/leaf node, formatted as a point value
else:
self.assertAlmostEqual(i, i2, places=2)
class TestMeasureInit(unittest.TestCase):
"""Ensure that measure attributes are correctly initiated.
Attributes:
sample_measure (object): Residential sample measure object.
attribute_dict (dict): Dict of sample measure attributes.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.sample_measure = CommonTestMeasures().sample_measure
measure_instance = run.Measure(handyvars, **cls.sample_measure)
cls.attribute_dict = measure_instance.__dict__
def test_attributes(self):
"""Compare object attributes to keys from input dict."""
for key in self.sample_measure.keys():
self.assertEqual(
self.attribute_dict[key], self.sample_measure[key])
class OutputBreakoutDictWalkTest(unittest.TestCase, CommonMethods):
"""Test operation of 'out_break_walk' function.
Verify that function properly applies a climate zone/building
type/end use partition to a total energy or carbon
market/savings value.
Attributes:
a_run (object): Sample analysis engine object.
ok_total (dict): Sample unpartitioned measure results data.
ok_partitions (dict): Sample results partitioning fraction.
ok_out (dict): Sample partitioned measure results data.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure
measure_list = [run.Measure(handyvars, **sample_measure)]
cls.a_run = run.Engine(handyvars, measure_list)
cls.ok_total = {"2009": 100, "2010": 100}
cls.ok_partitions = {
"AIA CZ1": {
"Residential": {
"Heating": {"2009": .10, "2010": .10},
"Cooling": {"2009": .15, "2010": .15}},
"Commercial": {
"Heating": {"2009": .20, "2010": .20},
"Cooling": {"2009": .25, "2010": .25}}},
"AIA CZ2": {
"Residential": {
"Heating": {"2009": .30, "2010": .30},
"Cooling": {"2009": .35, "2010": .35}},
"Commercial": {
"Heating": {"2009": .40, "2010": .40},
"Cooling": {"2009": .45, "2010": .45}}}}
cls.ok_out = {
"AIA CZ1": {
"Residential": {
"Heating": {"2009": 10, "2010": 10},
"Cooling": {"2009": 15, "2010": 15}},
"Commercial": {
"Heating": {"2009": 20, "2010": 20},
"Cooling": {"2009": 25, "2010": 25}}},
"AIA CZ2": {
"Residential": {
"Heating": {"2009": 30, "2010": 30},
"Cooling": {"2009": 35, "2010": 35}},
"Commercial": {
"Heating": {"2009": 40, "2010": 40},
"Cooling": {"2009": 45, "2010": 45}}}}
def test_ok(self):
"""Test for correct function output given valid inputs."""
dict1 = self.a_run.out_break_walk(
self.ok_partitions, self.ok_total)
dict2 = self.ok_out
self.dict_check(dict1, dict2)
class PrioritizationMetricsTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'calc_savings_metrics' function.
Verify that measure master microsegment inputs yield expected savings
and financial metrics outputs.
Attributes:
handyvars (object): Useful variables across the class.
sample_measure_res (object): Sample residential measure data.
sample_measure_com (object): Sample commercial measure data.
test_adopt_scheme (string): Sample consumer adoption scheme.
ok_rate (float): Sample discount rate.
ok_master_mseg_point (dict): Sample measure master microsegment
including all point values at terminal leaf nodes.
ok_master_mseg_dist1 (dict): Sample measure master microsegment
including energy, carbon, and energy/carbon cost arrays.
ok_master_mseg_dist2 (dict): Sample measure master microsegment
including stock cost array.
ok_master_mseg_dist3 (dict): Sample measure master microsegment
including measure lifetime array.
ok_master_mseg_dist4 (dict): Sample measure master microsegment
including stock cost and measure lifetime array.
ok_out_point_res (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_point' with a residential sample
measure.
ok_out_point_com (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_point' with a residential sample
measure.
ok_out_dist1 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist1' with a residential sample
measure.
ok_out_dist2 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist2' with a residential sample
measure.
ok_out_dist3 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist3' with a residential sample
measure.
ok_out_dist4 (dict): Measure attribute update status, savings,
and portfolio/consumer-level financial metrics that should be
generated given 'ok_master_mseg_dist4' with a residential sample
measure.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
# Reset aeo_years
cls.handyvars.aeo_years = ["2009", "2010"]
cls.sample_measure_res = CommonTestMeasures().sample_measure4
cls.sample_measure_com = CommonTestMeasures().sample_measure5
cls.test_adopt_scheme = 'Max adoption potential'
cls.ok_rate = 0.07
cls.ok_master_mseg_point = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist1 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {
"2009": numpy.array([16, 27, 31, 6, 51]),
"2010": numpy.array([106, 95, 81, 11, 124])}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {
"2009": numpy.array([6, 7, 1, 16, 1]),
"2010": numpy.array([36, 45, 61, 5, 54])}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {
"2009": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),
"2010": numpy.array(
[100.6, 108.7, 105.1, 105, 106.1])}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {
"2009": numpy.array([50.6, 57.7, 58.1, 50, 51.1]),
"2010": numpy.array(
[100.6, 108.7, 105.1, 105, 106.1])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {
"2009": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {
"2009": numpy.array([9.1, 8.7, 7.7, 11.2, 12.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {
"2009": numpy.array(
[25.1, 24.7, 23.7, 31.2, 18.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {
"2009": numpy.array(
[25.1, 24.7, 23.7, 31.2, 18.5]),
"2010": numpy.array(
[20.1, 18.7, 21.7, 21.2, 22.5])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist2 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 2}}
cls.ok_master_mseg_dist3 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {"2009": 15, "2010": 25}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}}
cls.ok_master_mseg_dist4 = {
"stock": {
"total": {
"all": {"2009": 10, "2010": 20},
"measure": {"2009": 15, "2010": 25}},
"competed": {
"all": {"2009": 5, "2010": 10},
"measure": {"2009": 5, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 0, "2010": 50}}},
"carbon": {
"total": {
"baseline": {"2009": 200, "2010": 300},
"efficient": {"2009": 50, "2010": 100}},
"competed": {
"baseline": {"2009": 100, "2010": 150},
"efficient": {"2009": 50, "2010": 100}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}},
"competed": {
"baseline": {"2009": 10, "2010": 15},
"efficient": {
"2009": numpy.array(
[15.1, 12.7, 14.1, 14.2, 15.5]),
"2010": numpy.array([20.1, 18.7, 21.7, 19.2, 20.5])
}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}},
"competed": {
"baseline": {"2009": 20, "2010": 35},
"efficient": {"2009": 10, "2010": 20}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}},
"competed": {
"baseline": {"2009": 30, "2010": 40},
"efficient": {"2009": 25, "2010": 25}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": numpy.array([0.5, 1.2, 2.1, 2.2, 4.6])}}
cls.ok_out_point_res = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {"2009": -0.01602415, "2010": -0.01111353},
"cce (w/ carbon cost benefits)": {
"2009": -0.04935749, "2010": -0.08611353},
"ccc": {"2009": -1.602415e-08, "2010": -1.111353e-08},
"ccc (w/ energy cost benefits)": {
"2009": -8.269082e-08, "2010": -8.611353e-08}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 0.4345794),
"2010": numpy.pmt(0.07, 2, 0.2009346)},
"commercial": {"2009": None, "2010": None}},
"energy cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 1.808018),
"2010": numpy.pmt(0.07, 2, 1.356014)},
"commercial": {"2009": None, "2010": None}},
"carbon cost": {
"residential": {
"2009": numpy.pmt(0.07, 2, 0.9040091),
"2010": numpy.pmt(0.07, 2, 1.356014)},
"commercial": {"2009": None, "2010": None}}},
"irr (w/ energy costs)": {
"2009": 3.45, "2010": 2.44},
"irr (w/ energy and carbon costs)": {
"2009": 4.54, "2010": 4.09},
"payback (w/ energy costs)": {
"2009": 0.25, "2010": 0.33},
"payback (w/ energy and carbon costs)": {
"2009": 0.2, "2010": 0.22}}]
cls.ok_out_point_com = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {"2009": -0.01602415, "2010": -0.01111353},
"cce (w/ carbon cost benefits)": {
"2009": -0.04935749, "2010": -0.08611353},
"ccc": {"2009": -1.602415e-08, "2010": -1.111353e-08},
"ccc (w/ energy cost benefits)": {
"2009": -8.269082e-08, "2010": -8.611353e-08}},
{
"anpv": {
"stock cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, -0.4090909),
"rate 2": numpy.pmt(1.0, 2, 0),
"rate 3": numpy.pmt(0.45, 2, 0.1896552),
"rate 4": numpy.pmt(0.25, 2, 0.3),
"rate 5": numpy.pmt(0.15, 2, 0.3695652),
"rate 6": numpy.pmt(0.065, 2, 0.4389671),
"rate 7": -0.25},
"2010": {
"rate 1": numpy.pmt(10.0, 2, -0.4318182),
"rate 2": numpy.pmt(1.0, 2, -0.125),
"rate 3": numpy.pmt(0.45, 2, 0.01724138),
"rate 4": numpy.pmt(0.25, 2, 0.1),
"rate 5": numpy.pmt(0.15, 2, 0.1521739),
"rate 6": numpy.pmt(0.065, 2, 0.2042254),
"rate 7": -0.125}}},
"energy cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, 0.09917355),
"rate 2": numpy.pmt(1.0, 2, 0.75),
"rate 3": numpy.pmt(0.45, 2, 1.165279),
"rate 4": numpy.pmt(0.25, 2, 1.44),
"rate 5": numpy.pmt(0.15, 2, 1.625709),
"rate 6": numpy.pmt(0.065, 2, 1.820626),
"rate 7": -1},
"2010": {
"rate 1": numpy.pmt(10.0, 2, 0.07438017),
"rate 2": numpy.pmt(1.0, 2, 0.5625),
"rate 3": numpy.pmt(0.45, 2, 0.8739596),
"rate 4": numpy.pmt(0.25, 2, 1.08),
"rate 5": numpy.pmt(0.15, 2, 1.219282),
"rate 6": numpy.pmt(0.065, 2, 1.36547),
"rate 7": -0.75}}},
"carbon cost": {
"residential": {"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": numpy.pmt(10.0, 2, 0.04958678),
"rate 2": numpy.pmt(1.0, 2, 0.375),
"rate 3": numpy.pmt(0.45, 2, 0.5826397),
"rate 4": numpy.pmt(0.25, 2, 0.72),
"rate 5": numpy.pmt(0.15, 2, 0.8128544),
"rate 6": numpy.pmt(0.065, 2, 0.9103132),
"rate 7": -0.5},
"2010": {
"rate 1": numpy.pmt(10.0, 2, 0.07438017),
"rate 2": numpy.pmt(1.0, 2, 0.5625),
"rate 3": numpy.pmt(0.45, 2, 0.8739596),
"rate 4": numpy.pmt(0.25, 2, 1.08),
"rate 5": numpy.pmt(0.15, 2, 1.219282),
"rate 6": numpy.pmt(0.065, 2, 1.36547),
"rate 7": -0.75}}}},
"irr (w/ energy costs)": {
"2009": 3.45, "2010": 2.44},
"irr (w/ energy and carbon costs)": {
"2009": 4.54, "2010": 4.09},
"payback (w/ energy costs)": {
"2009": 0.25, "2010": 0.33},
"payback (w/ energy and carbon costs)": {
"2009": 0.2, "2010": 0.22}}]
cls.ok_out_dist1 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {
"2009": numpy.array([184, 173, 169, 194, 149]),
"2010": numpy.array([194, 205, 219, 289, 176])},
"savings (annual)": {
"2009": numpy.array([94, 93, 99, 84, 99]),
"2010": numpy.array([114, 105, 89, 145, 96])},
"cost savings (total)": {
"2009": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),
"2010": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])},
"cost savings (annual)": {
"2009": numpy.array([10.9, 11.3, 12.3, 8.8, 7.5]),
"2010": numpy.array([14.9, 16.3, 13.3, 13.8, 12.5])}},
"carbon": {
"savings (total)": {
"2009": numpy.array([149.4, 142.3, 141.9, 150.0, 148.9]),
"2010": numpy.array([199.4, 191.3, 194.9, 195.0, 193.9])},
"savings (annual)": {
"2009": numpy.array([49.4, 42.3, 41.9, 50.0, 48.9]),
"2010": numpy.array([49.4, 41.3, 44.9, 45.0, 43.9])},
"cost savings (total)": {
"2009": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),
"2010": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])},
"cost savings (annual)": {
"2009": numpy.array([4.9, 5.3, 6.3, -1.2, 11.5]),
"2010": numpy.array([19.9, 21.3, 18.3, 18.8, 17.5])}}},
{
"cce": {
"2009": numpy.array([
-0.01306317, -0.01389378, -0.01422262,
-0.01238981, -0.01613170]),
"2010": numpy.array([
-0.01145724, -0.01084246, -0.01014934,
-0.007691022, -0.01262901])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
-0.0396936, -0.04452961, -0.05150073,
-0.006204243, -0.09331291]),
"2010": numpy.array([
-0.1140346, -0.11474490, -0.09371098,
-0.072742925, -0.11206083])},
"ccc": {
"2009": numpy.array([
-1.608851e-08, -1.689124e-08, -1.693885e-08,
-1.602415e-08, -1.614253e-08]),
"2010": numpy.array([
-1.114697e-08, -1.161895e-08, -1.140434e-08,
-1.139849e-08, -1.146315e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-8.904701e-08, -9.630094e-08, -1.036196e-07,
-7.469082e-08, -6.651191e-08]),
"2010": numpy.array([
-8.587114e-08, -9.682543e-08, -7.964446e-08,
-8.216772e-08, -7.592937e-08])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 1.97074),
numpy.pmt(0.07, 2, 2.043061),
numpy.pmt(0.07, 2, 2.223862),
numpy.pmt(0.07, 2, 1.591056),
numpy.pmt(0.07, 2, 1.356014)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.346974),
numpy.pmt(0.07, 2, 1.473535),
numpy.pmt(0.07, 2, 1.202332),
numpy.pmt(0.07, 2, 1.247533),
numpy.pmt(0.07, 2, 1.130011)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.8859289),
numpy.pmt(0.07, 2, 0.9582496),
numpy.pmt(0.07, 2, 1.139051),
numpy.pmt(0.07, 2, -0.2169622),
numpy.pmt(0.07, 2, 2.079221)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.798978),
numpy.pmt(0.07, 2, 1.925539),
numpy.pmt(0.07, 2, 1.654337),
numpy.pmt(0.07, 2, 1.699537),
numpy.pmt(0.07, 2, 1.582016)])
},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)
}}},
"irr (w/ energy costs)": {
"2009": numpy.array([
3.648926, 3.737086, 3.956335, 3.180956, 2.886001]),
"2010": numpy.array([
2.425032, 2.584709, 2.240438, 2.298386, 2.147181])},
"irr (w/ energy and carbon costs)": {
"2009": numpy.array([
4.713113, 4.884221, 5.309580, 2.908860, 5.394281]),
"2010": numpy.array([
4.601286, 4.897553, 4.260683, 4.367373, 4.089454])},
"payback (w/ energy costs)": {
"2009": numpy.array([
0.2392344, 0.2347418, 0.2242152, 0.2659574,
0.2857143]),
"2010": numpy.array([
0.3344482, 0.3194888, 0.3533569, 0.3472222,
0.3636364])},
"payback (w/ energy and carbon costs)": {
"2009": numpy.array([
0.1937984, 0.1879699, 0.1748252, 0.2840909,
0.1724138]),
"2010": numpy.array([
0.2008032, 0.1901141, 0.2145923, 0.2100840,
0.2222222])}}]
cls.ok_out_dist2 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])},
"cost savings (annual)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
-0.01565543, -0.02450490, -0.01934271, -0.01897398,
-0.01418052]),
"2010": numpy.array([
-0.02466428, -0.02853592, -0.02023954, -0.02715319,
-0.02355809])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
-0.04898876, -0.05783823, -0.05267604,
-0.05230731, -0.04751385]),
"2010": numpy.array([
-0.09966428, -0.10353592, -0.09523954, -0.10215319,
-0.09855809])},
"ccc": {
"2009": numpy.array([
-1.565543e-08, -2.450490e-08, -1.934271e-08,
-1.897398e-08, -1.418052e-08]),
"2010": numpy.array([
-2.466428e-08, -2.853592e-08, -2.023954e-08,
-2.715319e-08, -2.355809e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-8.232209e-08, -9.117156e-08, -8.600937e-08,
-8.564064e-08, -8.084718e-08]),
"2010": numpy.array([
-9.966428e-08, -1.035359e-07, -9.523954e-08, -1.021532e-07,
-9.855809e-08])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.4245794),
numpy.pmt(0.07, 2, 0.6645794),
numpy.pmt(0.07, 2, 0.5245794),
numpy.pmt(0.07, 2, 0.5145794),
numpy.pmt(0.07, 2, 0.3845794)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 0.4459346),
numpy.pmt(0.07, 2, 0.5159346),
numpy.pmt(0.07, 2, 0.3659346),
numpy.pmt(0.07, 2, 0.4909346),
numpy.pmt(0.07, 2, 0.4259346)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091)]),
"2010": numpy.array([
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([
3.370236, 6.877566, 4.335205, 4.218185, 3.081800]),
"2010": numpy.array([
5.345834, 7.580577, 3.931585, 6.612039, 4.915578])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([
4.442382, 8.824726, 5.647891, 5.501689, 4.082098]),
"2010": numpy.array([
8.446248, 11.795815, 6.327488, 10.343948, 7.801544])},
"payback (w/ energy costs)":
{"2009": numpy.array([
0.255, 0.1350000, 0.2050000, 0.21, 0.2750000]),
"2010": numpy.array([
0.1700000, 0.1233333, 0.2233333, 0.1400000, 0.1833333])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([
0.2040000, 0.10800000, 0.1640000, 0.16800000, 0.2200000]),
"2010": numpy.array([
0.1133333, 0.08222222, 0.1488889, 0.09333333,
0.1222222])}}]
cls.ok_out_dist3 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {"2009": -5, "2010": -10},
"cost savings (annual)": {"2009": -5, "2010": -10}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
0.03566667, 0.03566667, -0.01602415,
-0.01602415, -0.04694426]),
"2010": numpy.array([
0.05350000, 0.05350000, -0.01111353,
-0.01111353, -0.04976366])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
0.002333333, 0.002333333, -0.04935749,
-0.04935749, -0.0802776]),
"2010": numpy.array([
-0.021500000, -0.021500000, -0.08611353,
-0.08611353, -0.1247637])},
"ccc": {
"2009": numpy.array([
3.566667e-08, 3.566667e-08, -1.602415e-08,
-1.602415e-08, -4.694426e-08]),
"2010": numpy.array([
5.350000e-08, 5.350000e-08, -1.111353e-08,
-1.111353e-08, -4.976366e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-3.10e-08, -3.10e-08, -8.269082e-08,
-8.269082e-08, -1.136109e-07]),
"2010": numpy.array([
-2.15e-08, -2.15e-08, -8.611353e-08,
-8.611353e-08, -1.247637e-07])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 2, 0.4345794),
numpy.pmt(0.07, 5, 2.887211)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 1, -0.5),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 2, 0.2009346),
numpy.pmt(0.07, 5, 2.040408)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 5, 4.100197)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 5, 2.050099)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([1.00, 1.00, 3.45, 3.45, 4.00]),
"2010": numpy.array([0.50, 0.50, 2.44, 2.44, 2.99])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([2.00, 2.00, 4.54, 4.54, 5.00]),
"2010": numpy.array([2.00, 2.00, 4.09, 4.09, 4.50])},
"payback (w/ energy costs)":
{"2009": numpy.array([0.50, 0.50, 0.25, 0.25, 0.25]),
"2010": numpy.array([0.67, 0.67, 0.33, 0.33, 0.33])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([0.33, 0.33, 0.20, 0.20, 0.20]),
"2010": numpy.array([0.33, 0.33, 0.22, 0.22, 0.22])}}]
cls.ok_out_dist4 = [{
"savings and portfolio metrics": {
"Technical potential": {
"uncompeted": True, "competed": True},
"Max adoption potential": {
"uncompeted": False, "competed": True}},
"consumer metrics": False},
{
"stock": {
"cost savings (total)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])},
"cost savings (annual)": {
"2009": numpy.array([-5.1, -2.7, -4.1, -4.2, -5.5]),
"2010": numpy.array([-5.1, -3.7, -6.7, -4.2, -5.5])}},
"energy": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 100, "2010": 100},
"cost savings (total)": {"2009": 10, "2010": 15},
"cost savings (annual)": {"2009": 10, "2010": 15}},
"carbon": {
"savings (total)": {"2009": 150, "2010": 200},
"savings (annual)": {"2009": 50, "2010": 50},
"cost savings (total)": {"2009": 5, "2010": 15},
"cost savings (annual)": {"2009": 5, "2010": 15}}},
{
"cce": {
"2009": numpy.array([
0.036380, 0.019260, -0.01934271,
-0.01897398, -0.04613129]),
"2010": numpy.array([
0.027285, 0.019795, -0.02023954,
-0.02715319, -0.05525120])},
"cce (w/ carbon cost benefits)": {
"2009": numpy.array([
0.003046667, -0.01407333, -0.05267604,
-0.05230731, -0.07946463]),
"2010": numpy.array([
-0.047715000, -0.05520500, -0.09523954,
-0.10215319, -0.13025120])},
"ccc": {
"2009": numpy.array([
3.6380e-08, 1.9260e-08, -1.934271e-08,
-1.897398e-08, -4.613129e-08]),
"2010": numpy.array([
2.7285e-08, 1.9795e-08, -2.023954e-08,
-2.715319e-08, -5.525120e-08])},
"ccc (w/ energy cost benefits)": {
"2009": numpy.array([
-3.028667e-08, -4.740667e-08, -8.600937e-08,
-8.564064e-08, -1.127980e-07]),
"2010": numpy.array([
-4.771500e-08, -5.520500e-08, -9.523954e-08,
-1.021532e-07, -1.302512e-07])}},
{
"anpv": {
"stock cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, -0.51),
numpy.pmt(0.07, 1, -0.27),
numpy.pmt(0.07, 2, 0.5245794),
numpy.pmt(0.07, 2, 0.5145794),
numpy.pmt(0.07, 5, 2.837211)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, -0.255),
numpy.pmt(0.07, 1, -0.185),
numpy.pmt(0.07, 2, 0.3659346),
numpy.pmt(0.07, 2, 0.4909346),
numpy.pmt(0.07, 5, 2.265408)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"energy cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 1, 0.9345794),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 2, 1.808018),
numpy.pmt(0.07, 5, 4.100197)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}},
"carbon cost": {
"residential": {
"2009": numpy.array([
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 1, 0.4672897),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 2, 0.9040091),
numpy.pmt(0.07, 5, 2.050099)]),
"2010": numpy.array([
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 1, 0.7009346),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 2, 1.356014),
numpy.pmt(0.07, 5, 3.075148)])},
"commercial": {
"2009": numpy.repeat(None, 5),
"2010": numpy.repeat(None, 5)}}},
"irr (w/ energy costs)":
{"2009": numpy.array([
0.9607843, 2.703704, 4.335205, 4.218185, 3.631559]),
"2010": numpy.array([
1.9411765, 3.054054, 3.931585, 6.612039, 5.452729])},
"irr (w/ energy and carbon costs)":
{"2009": numpy.array([
1.941176, 4.555556, 5.647891, 5.501689, 4.543007]),
"2010": numpy.array([
4.882353, 7.108108, 6.327488, 10.343948, 8.181351])},
"payback (w/ energy costs)":
{"2009": numpy.array([
0.51, 0.2700000, 0.2050000, 0.21, 0.2750000]),
"2010": numpy.array([
0.34, 0.2466667, 0.2233333, 0.14, 0.1833333])},
"payback (w/ energy and carbon costs)":
{"2009": numpy.array([
0.34, 0.1800000, 0.1640000, 0.16800000, 0.2200000]),
"2010": numpy.array([
0.17, 0.1233333, 0.1488889, 0.09333333, 0.1222222])}}]
cls.ok_savings_mkts_comp_schemes = ["competed", "uncompeted"]
def test_metrics_ok_point_res(self):
"""Test output given residential measure with point value inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_point'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_point
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# For first test case, verify correct adoption/competition scenario
# keys for measure markets/savings/portfolio metrics
for adopt_scheme in self.handyvars.adopt_schemes:
# Markets
self.assertEqual(list(sorted(
engine_instance.measures[0].markets[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Savings
self.assertEqual(list(sorted(
engine_instance.measures[0].savings[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Portfolio metrics
self.assertEqual(list(sorted(engine_instance.measures[
0].portfolio_metrics[adopt_scheme].keys())),
self.ok_savings_mkts_comp_schemes)
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_point_res[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_res[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_res[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_point_res[3])
def test_metrics_ok_point_com(self):
"""Test output given commercial measure with point value inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_point'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_com)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_point
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_point_com[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_com[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_point_com[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_point_com[3])
def test_metrics_ok_distrib1(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist1'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist1
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist1[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist1[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist1[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist1[3])
def test_metrics_ok_distrib2(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist2'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist2
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist2[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist2[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist2[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist2[3])
def test_metrics_ok_distrib3(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist3'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist3
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist3[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist3[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist3[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist3[3])
def test_metrics_ok_distrib4(self):
"""Test output given residential measure with array inputs."""
# Initialize test measure and assign it a sample 'uncompeted'
# market ('ok_master_mseg_dist4'), the focus of this test suite
test_meas = run.Measure(self.handyvars, **self.sample_measure_res)
test_meas.markets[self.test_adopt_scheme]["uncompeted"][
"master_mseg"] = self.ok_master_mseg_dist4
# Create Engine instance using test measure, run function on it
engine_instance = run.Engine(self.handyvars, [test_meas])
engine_instance.calc_savings_metrics(
self.test_adopt_scheme, "uncompeted")
# Verify test measure results update status
self.dict_check(engine_instance.measures[
0].update_results, self.ok_out_dist4[0])
# Verify test measure savings
self.dict_check(engine_instance.measures[0].savings[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist4[1])
# Verify test measure portfolio-level financial metrics
self.dict_check(engine_instance.measures[0].portfolio_metrics[
self.test_adopt_scheme]["uncompeted"], self.ok_out_dist4[2])
# Verify test measure consumer-level metrics
self.dict_check(engine_instance.measures[
0].consumer_metrics, self.ok_out_dist4[3])
class MetricUpdateTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'metrics_update' function.
Verify that cashflow inputs generate expected prioritization metric
outputs.
Attributes:
handyvars (object): Useful variables across the class.
measure_list (list): List for Engine including one sample
residential measure.
ok_num_units (int): Sample number of competed units.
ok_base_life (int): Sample baseline technology lifetime.
ok_product_lifetime (float): Sample measure lifetime.
ok_life_ratio (int): Sample measure->baseline lifetime ratio.
ok_base_scost (int): Sample baseline stock cost.
ok_scostsave (int): Sample baseline->measure stock cost delta.
ok_esave (int): Sample measure energy savings.
ok_ecostsave (int): Sample measure energy cost savings.
ok_csave (int): Sample measure avoided carbon emissions.
ok_ccostsave (int): Sample measure avoided carbon costs.
ok_out_dicts (list): Output annuity equivalent Net Present Value
dicts that should be generated given valid sample inputs.
ok_out_array (list): Other financial metric values that should
be generated given valid sample inputs.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure4
cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]
cls.ok_base_life = 3
cls.ok_product_lifetime = 6.2
cls.ok_life_ratio = 2
cls.ok_base_scost = 1
cls.ok_meas_sdelt = -1
cls.ok_esave = 7.5
cls.ok_ecostsave = 0.5
cls.ok_csave = 50
cls.ok_ccostsave = 1
cls.ok_out_array = [
numpy.pmt(0.07, 6, -0.1837021),
numpy.pmt(0.07, 6, 2.38327), numpy.pmt(0.07, 6, 4.76654),
None, None, None, 0.62, 1.59, 2, 0.67, 0.005,
-0.13, 7.7e-10, -9.2e-9]
def test_metric_updates(self):
"""Test for correct outputs given valid inputs."""
# Create an Engine instance using sample_measure list
engine_instance = run.Engine(self.handyvars, self.measure_list)
# Record the output for the test run of the 'metric_update'
# function
function_output = engine_instance.metric_update(
self.measure_list[0], self.ok_base_life,
int(self.ok_product_lifetime), self.ok_base_scost,
self.ok_meas_sdelt, self.ok_esave, self.ok_ecostsave,
self.ok_csave, self.ok_ccostsave)
# Test that valid inputs yield correct anpv, irr, payback, and
# cost of conserved energy/carbon outputs
for ind, x in enumerate(self.ok_out_array):
if x is not None:
self.assertAlmostEqual(function_output[ind], x, places=2)
else:
self.assertEqual(function_output[ind], x)
class PaybackTest(unittest.TestCase):
"""Test the operation of the 'payback' function.
Verify cashflow input generates expected payback output.
Attributes:
handyvars (object): Useful variables across the class.
measure_list (list): List for Engine including one sample
residential measure.
ok_cashflows (list): Set of sample input cash flows.
ok_out (list): Outputs that should be generated for each
set of sample cash flows.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
sample_measure = CommonTestMeasures().sample_measure
cls.measure_list = [run.Measure(cls.handyvars, **sample_measure)]
cls.ok_cashflows = [[-10, 1, 1, 1, 1, 5, 7, 8], [-10, 14, 2, 3, 4],
[-10, 0, 1, 2], [10, 4, 7, 8, 10], [-100, 0, 1]]
cls.ok_out = [5.14, 0.71, 6.5, 0, 999]
def test_cashflow_paybacks(self):
"""Test for correct outputs given valid inputs."""
# Create an Engine instance using sample_measure list
engine_instance = run.Engine(self.handyvars, self.measure_list)
# Test that valid input cashflows yield correct output payback values
for idx, cf in enumerate(self.ok_cashflows):
self.assertAlmostEqual(engine_instance.payback(cf),
self.ok_out[idx], places=2)
class ResCompeteTest(unittest.TestCase, CommonMethods):
"""Test 'compete_res_primary,' and 'htcl_adj'.
Verify that 'compete_res_primary' correctly calculates primary market
shares and updates master microsegments for a series of competing
residential measures; and that 'htcl_adj' properly accounts for
heating and cooling supply-demand overlaps.
Attributes:
handyvars (object): Useful variables across the class.
test_adopt_scheme (string): Sample consumer adoption scheme.
test_htcl_adj (dict): Sample dict with supply-demand overlap data.
adjust_key1 (string): First sample string for competed demand-side and
supply-side market microsegment key chain being tested.
adjust_key2 (string): Second sample string for competed demand-side and
supply-side market microsegment key chain being tested.
compete_meas1 (dict): Sample residential demand-side cooling measure 1.
compete_meas1_dist (dict): Alternative version of sample residential
demand-side cooling measure 1 including lists of energy/carbon and
associated cost input values instead of point values.
compete_meas2 (dict): Sample residential demand-side cooling measure 2.
compete_meas3 (dict): Sample residential supply-side cooling measure 1.
compete_meas3_dist (dict): Alternative version of sample residential
supply-side cooling measure 1 including lists of stock cost input
values instead of point values.
compete_meas4 (dict): Sample residential supply-side cooling measure 2.
compete_meas5 (dict): Sample residential supply-side cooling measure 3.
measures_all (list): List of all competing/interacting sample Measure
objects with point value inputs.
measures_demand (list): Demand-side subset of 'measures_all'.
measures_supply (list): Supply-side subset of 'measures_all'.
measures_overlap1 (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand' Measure objects.
measures_overlap2 (dict): List of demand-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_supply' Measure objects.
a_run (object): Analysis engine object incorporating all
'measures_all' objects.
measures_all_dist (list): List including competing/interacting sample
Measure objects with array inputs.
measures_demand_dist (list): Demand-side subset of 'measures_all_dist'.
measures_supply_dist (list): Supply-side subset of 'measures_all_dist'.
measures_overlap1_dist (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand_dist' Measure objects.
measures_overlap2_dist (dict): List of demand-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_supply_dist' Measure objects.
a_run_dist (object): Engine object incorporating all
'measures_all_dist' objects.
measure_master_msegs_out (dict): Master market microsegments
that should be generated for each Measure object in 'measures_all'
following competition and supply-demand overlap adjustments.
measure_master_msegs_out_dist (dict): Master market microsegments
that should be generated for each Measure object in
'measures_all_dist' following competition and supply-demand overlap
adjustments.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.handyvars.aeo_years = ["2009", "2010"]
cls.handyvars.retro_rate = 0
cls.test_adopt_scheme = "Max adoption potential"
cls.adjust_key1 = str(
('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))
cls.adjust_key2 = str(
('primary', 'AIA_CZ1', 'single family home', 'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))
cls.test_htcl_adj = {
"supply": {
"['AIA_CZ1', 'single family home', 'existing']": {
"total": {
yr: 10 for yr in cls.handyvars.aeo_years},
"total affected": {
yr: 5 for yr in cls.handyvars.aeo_years},
"affected savings": {
yr: 5 for yr in cls.handyvars.aeo_years}},
},
"demand": {
"['AIA_CZ1', 'single family home', 'existing']": {
"total": {
yr: 10 for yr in cls.handyvars.aeo_years},
"total affected": {
yr: 5 for yr in cls.handyvars.aeo_years},
"affected savings": {
yr: 5 for yr in cls.handyvars.aeo_years}},
}}
cls.compete_meas1 = {
"name": "sample compete measure r1",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas1_dist = {
"name": "sample compete measure r1 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array(
[15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array(
[20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key1: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key1: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array([15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array([20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array([15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array([20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": numpy.array(
[15, 16, 17]),
"2010": numpy.array(
[15, 16, 17])}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": numpy.array(
[20, 21, 22]),
"2010": numpy.array(
[20, 21, 22])}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key1: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key1: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}}}}
cls.compete_meas2 = {
"name": "sample compete measure r2",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["windows"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key1: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key1: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas3 = {
"name": "sample compete measure r3",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}
},
"mseg_out_break": {}}}}
cls.compete_meas3_dist = {
"name": "sample compete measure r3 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "demand", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array([0, 1, 2]),
"2010": numpy.array([0, 1, 2])}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array(
[5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array(
[0, 1, 2]),
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key2: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key2: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array([5, 6, 7]),
"2010": numpy.array([5, 6, 7])}},
"competed": {
"baseline": {"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array([0, 1, 2]),
"2010": numpy.array([0, 1, 2])}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": numpy.array(
[5, 6, 7]),
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": numpy.array(
[0, 1, 2]),
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}},
"supply-demand adjustment": {
"savings": {
cls.adjust_key2: {
"2009": 0, "2010": 0}},
"total": {
cls.adjust_key2: {
"2009": 100, "2010": 100}}}},
"mseg_out_break": {}}}}
cls.compete_meas4 = {
"name": "sample compete measure r4",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}}}}
cls.compete_meas5 = {
"name": "sample compete measure r5",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["single family home"],
"end_use": {"primary": ["cooling"], "secondary": None},
"technology": ["ASHP"],
"technology_type": {"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.adjust_key2: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.adjust_key2: {
"b1": {"2009": -0.95, "2010": -0.95},
"b2": {"2009": -0.10, "2010": -0.10}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {},
"original energy (competed and captured)": {},
"adjusted energy (total captured)": {},
"adjusted energy (competed and captured)": {}}}},
"mseg_out_break": {}}}}
cls.measures_all = [run.Measure(cls.handyvars, **x) for x in [
cls.compete_meas1, copy.deepcopy(cls.compete_meas2),
cls.compete_meas3, copy.deepcopy(cls.compete_meas4),
copy.deepcopy(cls.compete_meas5)]]
cls.measures_demand = cls.measures_all[0:2]
cls.measures_supply = cls.measures_all[2:5]
cls.measures_overlap1 = {
"measures": cls.measures_all[2:5],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))]]}
cls.measures_overlap2 = {
"measures": cls.measures_all[0:2],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))]]}
cls.a_run = run.Engine(cls.handyvars, cls.measures_all)
# Set information needed to finalize point value test measure
# consumer metrics
consumer_metrics_final = [{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 100,
"2010": 100},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -200,
"2010": -200},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -100,
"2010": -100},
"commercial": {
"2009": None,
"2010": None}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run.measures):
m.consumer_metrics['anpv'] = consumer_metrics_final[ind]
cls.measures_all_dist = [run.Measure(cls.handyvars, **x) for x in [
cls.compete_meas1_dist, copy.deepcopy(cls.compete_meas2),
cls.compete_meas3_dist, copy.deepcopy(cls.compete_meas4),
copy.deepcopy(cls.compete_meas5)]]
cls.measures_demand_dist = cls.measures_all_dist[0:2]
cls.measures_supply_dist = cls.measures_all_dist[2:5]
cls.supply_demand_adjust1_dist = cls.measures_all_dist[0:2]
cls.supply_demand_adjust2_dist = cls.measures_all_dist[2:5]
cls.measures_overlap1_dist = {
"measures": cls.measures_all_dist[2:5],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'supply', 'ASHP', 'existing'))]]}
cls.measures_overlap2_dist = {
"measures": cls.measures_all_dist[0:2],
"keys": [[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))],
[str(('primary', 'AIA_CZ1', 'single family home',
'electricity (grid)',
'cooling', 'demand', 'windows', 'existing'))]]}
cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics_final_dist = [{
"stock cost": {
"residential": {
"2009": 95,
"2010": 95},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": numpy.array([-150, -200, -100]),
"2010": numpy.array([-150, -200, -100])},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": numpy.array([-150, -200, -100]),
"2010": numpy.array([-50, -100, -10])},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": numpy.array([95, 100, 90]),
"2010": numpy.array([95, 100, 90])},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -150,
"2010": -150},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -150,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 120,
"2010": 120},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -400,
"2010": -400},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -50,
"2010": -50},
"commercial": {
"2009": None,
"2010": None}}},
{
"stock cost": {
"residential": {
"2009": 100,
"2010": 100},
"commercial": {
"2009": None,
"2010": None}},
"energy cost": {
"residential": {
"2009": -200,
"2010": -200},
"commercial": {
"2009": None,
"2010": None}},
"carbon cost": {
"residential": {
"2009": -100,
"2010": -100},
"commercial": {
"2009": None,
"2010": None}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run_dist.measures):
m.consumer_metrics['anpv'] = consumer_metrics_final_dist[ind]
cls.measures_master_msegs_out = [{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 2.23, "2010": 2.23}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 1.11, "2010": 1.11}}},
"energy": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.670251, "2010": 1.670251}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"carbon": {
"total": {
"baseline": {"2009": 3.340502, "2010": 3.340502},
"efficient": {"2009": 2.227001, "2010": 2.227001}},
"competed": {
"baseline": {"2009": 1.670251, "2010": 1.670251},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.113501, "2010": 1.113501}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 2.227001, "2010": 2.227001},
"efficient": {"2009": 1.670251, "2010": 1.670251}},
"competed": {
"baseline": {"2009": 1.113501, "2010": 1.113501},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}},
"carbon": {
"total": {
"baseline": {"2009": 3.340502, "2010": 3.340502},
"efficient": {"2009": 2.227001, "2010": 2.227001}},
"competed": {
"baseline": {"2009": 1.670251, "2010": 1.670251},
"efficient": {"2009": 0.5567503, "2010": 0.5567503}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 17.77, "2010": 17.77}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.89, "2010": 8.89}}},
"energy": {
"total": {
"baseline": {"2009": 27.77300, "2010": 27.77300},
"efficient": {"2009": 20.82975, "2010": 20.82975}},
"competed": {
"baseline": {"2009": 13.88650, "2010": 13.88650},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"carbon": {
"total": {
"baseline": {"2009": 41.65950, "2010": 41.65950},
"efficient": {"2009": 27.77300, "2010": 27.77300}},
"competed": {
"baseline": {"2009": 20.82975, "2010": 20.82975},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 17.77300, "2010": 17.77300},
"efficient": {"2009": 8.886499, "2010": 8.886499}},
"competed": {
"baseline": {"2009": 8.886499, "2010": 8.886499},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 27.77300, "2010": 27.77300},
"efficient": {"2009": 20.82975, "2010": 20.82975}},
"competed": {
"baseline": {"2009": 13.88650, "2010": 13.88650},
"efficient": {"2009": 6.943250, "2010": 6.943250}}},
"carbon": {
"total": {
"baseline": {"2009": 41.65950, "2010": 41.65950},
"efficient": {"2009": 27.77300, "2010": 27.77300}},
"competed": {
"baseline": {"2009": 20.82975, "2010": 20.82975},
"efficient": {"2009": 6.943250, "2010": 6.943250}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 1.73, "2010": 1.73}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0.87, "2010": 0.87}}},
"energy": {
"total": {
"baseline": {"2009": 1.73179114, "2010": 1.73179114},
"efficient": {"2009": 1.29884336, "2010": 1.29884336}},
"competed": {
"baseline": {"2009": 0.865895571, "2010": 0.865895571},
"efficient": {"2009": 0.432947785, "2010": 0.432947785}}},
"carbon": {
"total": {
"baseline": {"2009": 2.59768671, "2010": 2.59768671},
"efficient": {"2009": 1.73179114, "2010": 1.73179114}},
"competed": {
"baseline": {"2009": 1.29884336, "2010": 1.29884336},
"efficient": {"2009": 0.432947785, "2010": 0.432947785}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 1.73179114, "2010": 1.73179114},
"efficient": {
"2009": 0.865895571, "2010": 0.865895571}},
"competed": {
"baseline": {"2009": 0.865895571, "2010": 0.865895571},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 1.73179114, "2010": 1.73179114},
"efficient": {
"2009": 1.29884336, "2010": 1.29884336}},
"competed": {
"baseline": {
"2009": 0.865895571, "2010": 0.865895571},
"efficient": {
"2009": 0.432947785, "2010": 0.432947785}}},
"carbon": {
"total": {
"baseline": {
"2009": 2.59768671, "2010": 2.59768671},
"efficient": {
"2009": 1.73179114, "2010": 1.73179114}},
"competed": {
"baseline": {
"2009": 1.29884336, "2010": 1.29884336},
"efficient": {
"2009": 0.432947785, "2010": 0.432947785}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 16.04, "2010": 16.04}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.02, "2010": 8.02}}},
"energy": {
"total": {
"baseline": {"2009": 26.04455, "2010": 26.04455},
"efficient": {"2009": 19.53341, "2010": 19.53341}},
"competed": {
"baseline": {"2009": 13.02227, "2010": 13.02227},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"carbon": {
"total": {
"baseline": {"2009": 39.06682, "2010": 39.06682},
"efficient": {"2009": 26.04455, "2010": 26.04455}},
"competed": {
"baseline": {"2009": 19.53341, "2010": 19.53341},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 16.04455, "2010": 16.04455},
"efficient": {"2009": 8.022273, "2010": 8.022273}},
"competed": {
"baseline": {"2009": 8.022273, "2010": 8.022273},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 26.04455, "2010": 26.04455},
"efficient": {"2009": 19.53341, "2010": 19.53341}},
"competed": {
"baseline": {"2009": 13.02227, "2010": 13.02227},
"efficient": {"2009": 6.511136, "2010": 6.511136}}},
"carbon": {
"total": {
"baseline": {"2009": 39.06682, "2010": 39.06682},
"efficient": {"2009": 26.04455, "2010": 26.04455}},
"competed": {
"baseline": {"2009": 19.53341, "2010": 19.53341},
"efficient": {"2009": 6.511136, "2010": 6.511136}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 22.22, "2010": 22.22}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 11.11, "2010": 11.11}}},
"energy": {
"total": {
"baseline": {"2009": 42.22366, "2010": 42.22366},
"efficient": {"2009": 31.66775, "2010": 31.66775}},
"competed": {
"baseline": {"2009": 21.11183, "2010": 21.11183},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"carbon": {
"total": {
"baseline": {"2009": 63.33550, "2010": 63.33550},
"efficient": {"2009": 42.22366, "2010": 42.22366}},
"competed": {
"baseline": {"2009": 31.66775, "2010": 31.66775},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 22.22366, "2010": 22.22366},
"efficient": {"2009": 11.11183, "2010": 11.11183}},
"competed": {
"baseline": {"2009": 11.11183, "2010": 11.11183},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 42.22366, "2010": 42.22366},
"efficient": {"2009": 31.66775, "2010": 31.66775}},
"competed": {
"baseline": {"2009": 21.11183, "2010": 21.11183},
"efficient": {"2009": 10.55592, "2010": 10.55592}}},
"carbon": {
"total": {
"baseline": {"2009": 63.33550, "2010": 63.33550},
"efficient": {"2009": 42.22366, "2010": 42.22366}},
"competed": {
"baseline": {"2009": 31.66775, "2010": 31.66775},
"efficient": {"2009": 10.55592, "2010": 10.55592}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
cls.measures_master_msegs_out_dist = [{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([2.23, 9.77, 0.02]),
"2010": numpy.array([2.23, 9.77, 0.02])}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {
"2009": numpy.array([1.11, 4.89, 0.01]),
"2010": numpy.array([1.11, 4.89, 0.01])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.670251, 7.816181, 0.01637724]),
"2010": numpy.array([
1.670251, 7.816181, 0.01637724])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
3.340502, 14.65534, 0.02890102]),
"2010": numpy.array([
3.340502, 14.65534, 0.02890102])},
"efficient": {
"2009": numpy.array([
2.227001, 10.25874, 0.02119408]),
"2010": numpy.array([
2.227001, 10.25874, 0.02119408])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.670251, 7.32767, 0.01445051]),
"2010": numpy.array([
1.670251, 7.32767, 0.01445051])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
2.227001, 9.770226, 0.01926735]),
"2010": numpy.array([
2.227001, 9.770226, 0.01926735])},
"efficient": {
"2009": numpy.array([
1.670251, 7.816181, 0.01637724]),
"2010": numpy.array([
1.670251, 7.816181, 0.01637724])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.113501, 4.885113, 0.009633673]),
"2010": numpy.array([
1.113501, 4.885113, 0.009633673])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
3.340502, 14.65534, 0.02890102]),
"2010": numpy.array([
3.340502, 14.65534, 0.02890102])},
"efficient": {
"2009": numpy.array([
2.227001, 10.25874, 0.02119408]),
"2010": numpy.array([
2.227001, 10.25874, 0.02119408])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.670251, 7.32767, 0.01445051]),
"2010": numpy.array([
1.670251, 7.32767, 0.01445051])},
"efficient": {
"2009": numpy.array([
0.5567503, 2.931068, 0.006743571]),
"2010": numpy.array([
0.5567503, 2.931068, 0.006743571])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": numpy.array([17.77, 10.23, 19.98]),
"2010": numpy.array([17.77, 10.23, 19.98])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([8.89, 5.11, 9.99]),
"2010": numpy.array([8.89, 5.11, 9.99])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])},
"efficient": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.88650, 10.11489, 14.99037]),
"2010": numpy.array([
13.88650, 10.11489, 14.99037])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
41.65950, 30.34466, 44.97110]),
"2010": numpy.array([
41.65950, 30.34466, 44.97110])},
"efficient": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])}},
"competed": {
"baseline": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
17.77300, 10.22977, 19.98073]),
"2010": numpy.array([
17.77300, 10.22977, 19.98073])},
"efficient": {
"2009": numpy.array([
8.886499, 5.114887, 9.990366]),
"2010": numpy.array([
8.886499, 5.114887, 9.990366])}},
"competed": {
"baseline": {
"2009": numpy.array([
8.886499, 5.114887, 9.990366]),
"2010": numpy.array([
8.886499, 5.114887, 9.990366])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])},
"efficient": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.88650, 10.11489, 14.99037]),
"2010": numpy.array([
13.88650, 10.11489, 14.99037])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
41.65950, 30.34466, 44.97110]),
"2010": numpy.array([
41.65950, 30.34466, 44.97110])},
"efficient": {
"2009": numpy.array([
27.77300, 20.22977, 29.98073]),
"2010": numpy.array([
27.77300, 20.22977, 29.98073])}},
"competed": {
"baseline": {
"2009": numpy.array([
20.82975, 15.17233, 22.48555]),
"2010": numpy.array([
20.82975, 15.17233, 22.48555])},
"efficient": {
"2009": numpy.array([
6.943250, 5.057443, 7.495183]),
"2010": numpy.array([
6.943250, 5.057443, 7.495183])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([1.73, 0.02, 9.60]),
"2010": numpy.array([1.73, 0.02, 9.60])}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {
"2009": numpy.array([0.87, 0.01, 4.80]),
"2010": numpy.array([0.87, 0.01, 4.80])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
2.59768671, 0.02713253, 14.40498233]),
"2010": numpy.array([
2.59768671, 0.02713253, 14.40498233])},
"efficient": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
0.865895571, 0.01085301, 6.722325]),
"2010": numpy.array([
0.865895571, 0.01085301, 6.722325])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0, 0.001808835, 1.920664]),
"2010": numpy.array([
0, 0.001808835, 1.920664])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])},
"efficient": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])}},
"competed": {
"baseline": {
"2009": numpy.array([
0.865895571, 0.009044176, 4.801660776]),
"2010": numpy.array([
0.865895571, 0.009044176, 4.801660776])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
2.59768671, 0.02713253, 14.40498233]),
"2010": numpy.array([
2.59768671, 0.02713253, 14.40498233])},
"efficient": {
"2009": numpy.array([
1.73179114, 0.01808835, 9.60332155]),
"2010": numpy.array([
1.73179114, 0.01808835, 9.60332155])}},
"competed": {
"baseline": {
"2009": numpy.array([
1.29884336, 0.01356626, 7.20249116]),
"2010": numpy.array([
1.29884336, 0.01356626, 7.20249116])},
"efficient": {
"2009": numpy.array([
0.432947785, 0.004522088, 2.400830388]),
"2010": numpy.array([
0.432947785, 0.004522088, 2.400830388])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": numpy.array([16.04, 17.30, 10.29]),
"2010": numpy.array([16.04, 17.30, 10.29])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": numpy.array([8.02, 8.65, 5.14]),
"2010": numpy.array([8.02, 8.65, 5.14])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])},
"efficient": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.02227, 13.64868, 10.14500]),
"2010": numpy.array([
13.02227, 13.64868, 10.14500])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
39.06682, 40.94604, 30.43499]),
"2010": numpy.array([
39.06682, 40.94604, 30.43499])},
"efficient": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])}},
"competed": {
"baseline": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
16.04455, 17.29736, 10.29000]),
"2010": numpy.array([
16.04455, 17.29736, 10.29000])},
"efficient": {
"2009": numpy.array([
8.022273, 8.648681, 5.144998]),
"2010": numpy.array([
8.022273, 8.648681, 5.144998])}},
"competed": {
"baseline": {
"2009": numpy.array([
8.022273, 8.648681, 5.144998]),
"2010": numpy.array([
8.022273, 8.648681, 5.144998])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])},
"efficient": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])}},
"competed": {
"baseline": {
"2009": numpy.array([
13.02227, 13.64868, 10.14500]),
"2010": numpy.array([
13.02227, 13.64868, 10.14500])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
39.06682, 40.94604, 30.43499]),
"2010": numpy.array([
39.06682, 40.94604, 30.43499])},
"efficient": {
"2009": numpy.array([
26.04455, 27.29736, 20.29000]),
"2010": numpy.array([
26.04455, 27.29736, 20.29000])}},
"competed": {
"baseline": {
"2009": numpy.array([
19.53341, 20.47302, 15.21750]),
"2010": numpy.array([
19.53341, 20.47302, 15.21750])},
"efficient": {
"2009": numpy.array([
6.511136, 6.824341, 5.072499]),
"2010": numpy.array([
6.511136, 6.824341, 5.072499])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {
"2009": numpy.array([22.22, 22.68, 20.11]),
"2010": numpy.array([22.22, 22.68, 20.11])}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {
"2009": numpy.array([11.11, 11.34, 10.05]),
"2010": numpy.array([11.11, 11.34, 10.05])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])},
"efficient": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])}},
"competed": {
"baseline": {
"2009": numpy.array([
21.11183, 21.34227, 20.05334]),
"2010": numpy.array([
21.11183, 21.34227, 20.05334])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
63.33550, 64.02682, 60.16002]),
"2010": numpy.array([
63.33550, 64.02682, 60.16002])},
"efficient": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])}},
"competed": {
"baseline": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": numpy.array([
22.22366, 22.68455, 20.10668]),
"2010": numpy.array([
22.22366, 22.68455, 20.10668])},
"efficient": {
"2009": numpy.array([
11.11183, 11.34227, 10.05334]),
"2010": numpy.array([
11.11183, 11.34227, 10.05334])}},
"competed": {
"baseline": {
"2009": numpy.array([
11.11183, 11.34227, 10.05334]),
"2010": numpy.array([
11.11183, 11.34227, 10.05334])},
"efficient": {
"2009": numpy.array([0, 0, 0]),
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])},
"efficient": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])}},
"competed": {
"baseline": {
"2009": numpy.array([
21.11183, 21.34227, 20.05334]),
"2010": numpy.array([
21.11183, 21.34227, 20.05334])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}},
"carbon": {
"total": {
"baseline": {
"2009": numpy.array([
63.33550, 64.02682, 60.16002]),
"2010": numpy.array([
63.33550, 64.02682, 60.16002])},
"efficient": {
"2009": numpy.array([
42.22366, 42.68455, 40.10668]),
"2010": numpy.array([
42.22366, 42.68455, 40.10668])}},
"competed": {
"baseline": {
"2009": numpy.array([
31.66775, 32.01341, 30.08001]),
"2010": numpy.array([
31.66775, 32.01341, 30.08001])},
"efficient": {
"2009": numpy.array([
10.55592, 10.67114, 10.02667]),
"2010": numpy.array([
10.55592, 10.67114, 10.02667])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
def test_compete_res(self):
"""Test outcomes given valid sample measures w/ point value inputs."""
# Run the measure competition routine on sample demand-side measures
self.a_run.compete_res_primary(
self.measures_demand, self.adjust_key1, self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run.htcl_adj(
self.measures_demand, self.test_adopt_scheme, self.test_htcl_adj)
# Run the measure competition routine on sample supply-side measures
self.a_run.compete_res_primary(
self.measures_supply, self.adjust_key2, self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run.htcl_adj(
self.measures_supply, self.test_adopt_scheme, self.test_htcl_adj)
# Check updated competed master microsegments for each sample measure
# following competition/supply-demand overlap adjustments
for ind, d in enumerate(self.a_run.measures):
self.dict_check(
self.measures_master_msegs_out[ind],
self.a_run.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
def test_compete_res_dist(self):
"""Test outcomes given valid sample measures w/ some array inputs."""
# Run the measure competition routine on sample demand-side measures
self.a_run_dist.compete_res_primary(
self.measures_demand_dist, self.adjust_key1,
self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run_dist.htcl_adj(
self.measures_demand_dist, self.test_adopt_scheme,
self.test_htcl_adj)
# Run the measure competition routine on sample supply-side measures
self.a_run_dist.compete_res_primary(
self.measures_supply_dist, self.adjust_key2,
self.test_adopt_scheme)
# Remove any market overlaps across the supply and demand sides of
# heating and cooling
self.a_run_dist.htcl_adj(
self.measures_supply_dist, self.test_adopt_scheme,
self.test_htcl_adj)
# Check updated competed master microsegments for each sample measure
# following competition/supply-demand overlap adjustments
for ind, d in enumerate(self.a_run_dist.measures):
self.dict_check(
self.measures_master_msegs_out_dist[ind],
self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
class ComCompeteTest(unittest.TestCase, CommonMethods):
"""Test 'compete_com_primary' and 'secondary_adj' functions.
Verify that 'compete_com_primary' correctly calculates primary market
shares and updates master microsegments for a series of competing
commercial measures; and that 'secondary_adj' correctly adjusts any
secondary markets associated with these primary market microsegments.
Attributes:
handyvars (object): Useful variables across the class.
test_adopt_scheme (string): Sample consumer adoption scheme.
overlap_key (string): First sample string for competed primary market
microsegment key chain being tested.
overlap_key_scnd (string): Second sample string for secondary market
microsegment key chain being tested.
secnd_adj_key (string): Key used to link primary and secondary market
microsegments (by climate, building type, structure type).
compete_meas1 (dict): Sample commercial supply-side lighting measure 1.
compete_meas2 (dict): Sample commercial supply-side lighting measure 2.
compete_meas3 (dict): Sample commercial supply-side lighting measure 3.
compete_meas_dist (dict): Alternative version of sample commercial
supply-side lighting measure 1 including lists stock cost input
values instead of point values.
measures_all (list): List of all competing measures with point
value inputs.
measures_secondary (list): Subset of 'measures_all' with secondary
microsegments to adjust.
a_run (object): Analysis engine object incorporating all
'measures_primary' objects.
measures_all_dist (list): List of competing measures including
some measures with array inputs.
measures_secondary_dist (list): Subset of 'measures_all_dist' with
secondary microsegments to adjust.
a_run_dist (object): Analysis engine object incorporating all
'measures_primary_dist' objects.
measures_overlap (dict): List of supply-side Measure objects and
associated contributing microsegment keys that overlap with
'measures_demand' Measure objects.
measure_master_msegs_out (dict): Master market microsegments
that should be generated for each Measure object in 'measures_all'
following competition and supply-demand overlap adjustments.
measure_master_msegs_out_dist (dict): Master market microsegments
that should be generated for each Measure object in
'measures_all_dist' following competition and supply-demand overlap
adjustments.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.handyvars.retro_rate = 0
cls.handyvars.aeo_years = ["2009", "2010"]
cls.test_adopt_scheme = "Max adoption potential"
cls.overlap_key = str(
('primary', 'AIA_CZ1', 'assembly', 'electricity (grid)',
'lighting', 'reflector (LED)', 'existing'))
cls.overlap_key_scnd = str(
('secondary', 'AIA_CZ1', 'assembly', 'electricity (grid)',
'cooling', 'demand', 'lighting gain', 'existing'))
cls.secnd_adj_key = str(('AIA_CZ1', 'assembly', 'existing'))
cls.compete_meas1 = {
"name": "sample compete measure c1",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": None},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 20, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 30, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 10, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 40, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 10, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.compete_meas2 = {
"name": "sample compete measure c2",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": ["heating", "secondary heating", "cooling"]},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": "demand"},
"market_entry_year": 2010,
"market_exit_year": None,
"yrs_on_mkt": ["2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 10}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 10}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.compete_meas2_dist = {
"name": "sample compete measure c2 dist",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": ["heating", "secondary heating", "cooling"]},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": "demand"},
"market_entry_year": 2010,
"market_exit_year": None,
"yrs_on_mkt": ["2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": 20,
"2010": numpy.array([10, 12, 14])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 2, 4])}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0,
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5,
"2010": numpy.array([
0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}},
"supply-demand adjustment": {
"savings": {},
"total": {}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 20}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {
"2009": 20,
"2010": numpy.array([10, 12, 14])}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 2, 4])}}},
"energy": {
"total": {
"baseline": {"2009": 40, "2010": 40},
"efficient": {"2009": 40, "2010": 30}},
"competed": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 10}}},
"carbon": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 60, "2010": 40}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 10}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 0,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0,
"2010": numpy.array(
[0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
cls.overlap_key_scnd: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 0, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 20, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 30, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 15, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10,
"2010": numpy.array(
[5, 6, 7])}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 5,
"2010": numpy.array([
0, 1, 2])}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 20, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 10, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 30, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 15, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}},
cls.overlap_key_scnd: {
"rate distribution": {}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}},
"supply-demand adjustment": {
"savings": {},
"total": {}}},
"mseg_out_break": {}}}}
cls.compete_meas3 = {
"name": "sample compete measure c3",
"climate_zone": ["AIA_CZ1"],
"bldg_type": ["assembly"],
"end_use": {
"primary": ["lighting"],
"secondary": None},
"technology": ["reflector (LED)"],
"technology_type": {
"primary": "supply", "secondary": None},
"market_entry_year": 2009,
"market_exit_year": None,
"yrs_on_mkt": ["2009", "2010"],
"markets": {
"Technical potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}},
"Max adoption potential": {
"master_mseg": {
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 30, "2010": 30}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 15, "2010": 15}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 60, "2010": 60},
"efficient": {"2009": 45, "2010": 45}},
"competed": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 15, "2010": 15}}},
"carbon": {
"total": {
"baseline": {"2009": 90, "2010": 90},
"efficient": {"2009": 60, "2010": 60}},
"competed": {
"baseline": {"2009": 45, "2010": 45},
"efficient": {"2009": 15, "2010": 15}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
"mseg_adjust": {
"contributing mseg keys and values": {
cls.overlap_key: {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1}},
str(('primary', 'AIA_CZ2', 'single family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
str(('primary', 'AIA_CZ2', 'multi family home',
'electricity (grid)', 'lighting',
'reflector (LED)')): {
"stock": {
"total": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 10, "2010": 10}},
"competed": {
"all": {"2009": 5, "2010": 5},
"measure": {"2009": 5, "2010": 5}}},
"energy": {
"total": {
"baseline": {"2009": 20, "2010": 20},
"efficient": {"2009": 15, "2010": 15}},
"competed": {
"baseline": {"2009": 10, "2010": 10},
"efficient": {"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {"2009": 30, "2010": 30},
"efficient": {"2009": 20, "2010": 20}},
"competed": {
"baseline": {"2009": 15, "2010": 15},
"efficient": {"2009": 5, "2010": 5}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}},
"competed": {
"baseline": {
"2009": 5, "2010": 5},
"efficient": {
"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {
"2009": 20, "2010": 20},
"efficient": {
"2009": 15, "2010": 15}},
"competed": {
"baseline": {
"2009": 10, "2010": 10},
"efficient": {
"2009": 5, "2010": 5}}},
"carbon": {
"total": {
"baseline": {
"2009": 30, "2010": 30},
"efficient": {
"2009": 20, "2010": 20}},
"competed": {
"baseline": {
"2009": 15, "2010": 15},
"efficient": {
"2009": 5, "2010": 5}}}},
"lifetime": {
"baseline": {"2009": 1, "2010": 1},
"measure": 1},
"sub-market scaling": 1},
"competed choice parameters": {
cls.overlap_key: {
"rate distribution": {
"2009": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4],
"2010": [
0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.4]}}},
"secondary mseg adjustments": {
"market share": {
"original energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"original energy (competed and captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (total captured)": {
cls.secnd_adj_key: {"2009": 0, "2010": 0}},
"adjusted energy (competed and captured)": {
cls.secnd_adj_key: {
"2009": 0, "2010": 0}}}}},
"mseg_out_break": {}}}}
cls.measures_all = [run.Measure(
cls.handyvars, **x) for x in [
copy.deepcopy(cls.compete_meas1), cls.compete_meas2,
copy.deepcopy(cls.compete_meas3)]]
cls.measures_secondary = [cls.measures_all[1]]
# Instantiate engine object based on above measures
cls.a_run = run.Engine(cls.handyvars, cls.measures_all)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics = [{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160},
"2010": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400},
"2010": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75},
"2010": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115},
"2010": {
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370},
"2010": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170},
"2010": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110},
"2010": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200},
"2010": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120},
"2010": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120}}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run.measures):
m.consumer_metrics['anpv'] = consumer_metrics[ind]
cls.measures_all_dist = [run.Measure(
cls.handyvars, **x) for x in [
copy.deepcopy(cls.compete_meas1),
cls.compete_meas2_dist,
copy.deepcopy(cls.compete_meas3)]]
cls.measures_secondary_dist = [cls.measures_all_dist[1]]
cls.a_run_dist = run.Engine(cls.handyvars, cls.measures_all_dist)
# Set information needed to finalize array test measure consumer
# metrics
consumer_metrics_dist = [{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160},
"2010": {
"rate 1": 100, "rate 2": 110,
"rate 3": 120, "rate 4": 130,
"rate 5": 140, "rate 6": 150,
"rate 7": 160}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400},
"2010": {
"rate 1": -350, "rate 2": -60,
"rate 3": -70, "rate 4": -380,
"rate 5": -390, "rate 6": -150,
"rate 7": -400}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75},
"2010": {
"rate 1": -40, "rate 2": -50,
"rate 3": -55, "rate 4": -60,
"rate 5": -65, "rate 6": -70,
"rate 7": -75}}}},
{
"stock cost": {
"residential": {
"2009": None,
"2010": None
},
"commercial": {
"2009": None,
"2010": numpy.array([
{
"rate 1": 85, "rate 2": 90, "rate 3": 95,
"rate 4": 100, "rate 5": 105,
"rate 6": 110, "rate 7": 115},
{
"rate 1": 205, "rate 2": 100, "rate 3": 105,
"rate 4": 110, "rate 5": 115,
"rate 6": 120, "rate 7": 125},
{
"rate 1": 105, "rate 2": 110, "rate 3": 115,
"rate 4": 120, "rate 5": 125,
"rate 6": 10, "rate 7": 135}])}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370},
"2010": {
"rate 1": -435, "rate 2": -440,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -370}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170},
"2010": {
"rate 1": -135, "rate 2": -140,
"rate 3": -145,
"rate 4": -150, "rate 5": -155,
"rate 6": -160,
"rate 7": -170}}}},
{
"stock cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110},
"2010": {
"rate 1": 50, "rate 2": 60, "rate 3": 70,
"rate 4": 80, "rate 5": 90, "rate 6": 100,
"rate 7": 110}}},
"energy cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200},
"2010": {
"rate 1": -190, "rate 2": -195,
"rate 3": -190,
"rate 4": -205, "rate 5": -180,
"rate 6": -230,
"rate 7": -200}}},
"carbon cost": {
"residential": {
"2009": None, "2010": None},
"commercial": {
"2009": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120},
"2010": {
"rate 1": -90, "rate 2": -95,
"rate 3": -100,
"rate 4": -105, "rate 5": -110,
"rate 6": -115,
"rate 7": -120}}}}]
# Adjust/finalize point value test measure consumer metrics
for ind, m in enumerate(cls.a_run_dist.measures):
m.consumer_metrics['anpv'] = consumer_metrics_dist[ind]
cls.measures_master_msegs_out = [{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 17, "2010": 12}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 8.5, "2010": 6}}},
"energy": {
"total": {
"baseline": {"2009": 34, "2010": 24},
"efficient": {"2009": 25.5, "2010": 18}},
"competed": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 51, "2010": 36},
"efficient": {"2009": 34, "2010": 24}},
"competed": {
"baseline": {"2009": 25.5, "2010": 18},
"efficient": {"2009": 8.5, "2010": 6}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}},
"competed": {
"baseline": {"2009": 8.5, "2010": 6},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 34, "2010": 24},
"efficient": {"2009": 25.5, "2010": 18}},
"competed": {
"baseline": {"2009": 17, "2010": 12},
"efficient": {"2009": 8.5, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 51, "2010": 36},
"efficient": {"2009": 34, "2010": 24}},
"competed": {
"baseline": {"2009": 25.5, "2010": 18},
"efficient": {"2009": 8.5, "2010": 6}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {"2009": 0, "2010": 16}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {"2009": 0, "2010": 8}}},
"energy": {
"total": {
"baseline": {"2009": 0, "2010": 24},
"efficient": {"2009": 0, "2010": 18}},
"competed": {
"baseline": {"2009": 0, "2010": 12},
"efficient": {"2009": 0, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 0, "2010": 36},
"efficient": {"2009": 0, "2010": 24}},
"competed": {
"baseline": {"2009": 0, "2010": 18},
"efficient": {"2009": 0, "2010": 6}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 10, "2010": 16},
"efficient": {"2009": 20, "2010": 8}},
"competed": {
"baseline": {"2009": 5, "2010": 8},
"efficient": {"2009": 10, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 0, "2010": 24},
"efficient": {"2009": 0, "2010": 18}},
"competed": {
"baseline": {"2009": 0, "2010": 12},
"efficient": {"2009": 0, "2010": 6}}},
"carbon": {
"total": {
"baseline": {"2009": 0, "2010": 36},
"efficient": {"2009": 0, "2010": 24}},
"competed": {
"baseline": {"2009": 0, "2010": 18},
"efficient": {"2009": 0, "2010": 6}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 30, "2010": 30},
"measure": {"2009": 23, "2010": 22}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {"2009": 11.5, "2010": 11}}},
"energy": {
"total": {
"baseline": {"2009": 46, "2010": 44},
"efficient": {"2009": 34.5, "2010": 33}},
"competed": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}}},
"carbon": {
"total": {
"baseline": {"2009": 69, "2010": 66},
"efficient": {"2009": 46, "2010": 44}},
"competed": {
"baseline": {"2009": 34.5, "2010": 33},
"efficient": {"2009": 11.5, "2010": 11}}},
"cost": {
"stock": {
"total": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}},
"competed": {
"baseline": {"2009": 11.5, "2010": 11},
"efficient": {"2009": 0, "2010": 0}}},
"energy": {
"total": {
"baseline": {"2009": 46, "2010": 44},
"efficient": {"2009": 34.5, "2010": 33}},
"competed": {
"baseline": {"2009": 23, "2010": 22},
"efficient": {"2009": 11.5, "2010": 11}}},
"carbon": {
"total": {
"baseline": {"2009": 69, "2010": 66},
"efficient": {"2009": 46, "2010": 44}},
"competed": {
"baseline": {"2009": 34.5, "2010": 33},
"efficient": {"2009": 11.5, "2010": 11}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
cls.measures_master_msegs_out_dist = [{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": 17,
"2010": numpy.array([12, 13, 16])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"energy": {
"total": {
"baseline": {
"2009": 34,
"2010": numpy.array([24, 26, 32])},
"efficient": {
"2009": 25.5,
"2010": numpy.array([18, 19.5, 24])}},
"competed": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"carbon": {
"total": {
"baseline": {
"2009": 51,
"2010": numpy.array([36, 39, 48])},
"efficient": {
"2009": 34,
"2010": numpy.array([24, 26, 32])}},
"competed": {
"baseline": {
"2009": 25.5,
"2010": numpy.array([18.0, 19.5, 24.0])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6, 6.5, 8])}},
"competed": {
"baseline": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])},
"efficient": {
"2009": 0,
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": 34,
"2010": numpy.array([24, 26, 32])},
"efficient": {
"2009": 25.5,
"2010": numpy.array([18, 19.5, 24])}},
"competed": {
"baseline": {
"2009": 17,
"2010": numpy.array([12, 13, 16])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}},
"carbon": {
"total": {
"baseline": {
"2009": 51,
"2010": numpy.array([36, 39, 48])},
"efficient": {
"2009": 34,
"2010": numpy.array([24, 26, 32])}},
"competed": {
"baseline": {
"2009": 25.5,
"2010": numpy.array([18.0, 19.5, 24.0])},
"efficient": {
"2009": 8.5,
"2010": numpy.array([6.0, 6.5, 8.0])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {"2009": 20, "2010": 20},
"measure": {
"2009": 0,
"2010": numpy.array([16, 15, 13])}},
"competed": {
"all": {"2009": 10, "2010": 10},
"measure": {
"2009": 0,
"2010": numpy.array([8.0, 7.5, 6.5])}}},
"energy": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([24, 20, 12])},
"efficient": {
"2009": 0,
"2010": numpy.array([18, 15, 9])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([12, 10, 6])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"carbon": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([36, 30, 18])},
"efficient": {
"2009": 0,
"2010": numpy.array([24, 20, 12])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([18, 15, 9])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 10,
"2010": numpy.array([16, 15, 13])},
"efficient": {
"2009": 20,
"2010": numpy.array([8, 9, 9.1])}},
"competed": {
"baseline": {
"2009": 5,
"2010": numpy.array([8.0, 7.5, 6.5])},
"efficient": {
"2009": 10,
"2010": numpy.array([0, 1.5, 2.6])}}},
"energy": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([24, 20, 12])},
"efficient": {
"2009": 0,
"2010": numpy.array([18, 15, 9])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([12, 10, 6])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}},
"carbon": {
"total": {
"baseline": {
"2009": 0,
"2010": numpy.array([36, 30, 18])},
"efficient": {
"2009": 0,
"2010": numpy.array([24, 20, 12])}},
"competed": {
"baseline": {
"2009": 0,
"2010": numpy.array([18, 15, 9])},
"efficient": {
"2009": 0,
"2010": numpy.array([6, 5, 3])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}},
{
"stock": {
"total": {
"all": {
"2009": 30, "2010": 30},
"measure": {
"2009": 23,
"2010": numpy.array([22, 22, 21])}},
"competed": {
"all": {"2009": 15, "2010": 15},
"measure": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"energy": {
"total": {
"baseline": {
"2009": 46,
"2010": numpy.array([44, 44, 42])},
"efficient": {
"2009": 34.5,
"2010": numpy.array([33, 33, 31.5])}},
"competed": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"carbon": {
"total": {
"baseline": {
"2009": 69,
"2010": numpy.array([66, 66, 63])},
"efficient": {
"2009": 46,
"2010": numpy.array([44, 44, 42])}},
"competed": {
"baseline": {
"2009": 34.5,
"2010": numpy.array([33.0, 33.0, 31.5])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"cost": {
"stock": {
"total": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11, 11, 10.5])}},
"competed": {
"baseline": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])},
"efficient": {
"2009": 0,
"2010": numpy.array([0, 0, 0])}}},
"energy": {
"total": {
"baseline": {
"2009": 46,
"2010": numpy.array([44, 44, 42])},
"efficient": {
"2009": 34.5,
"2010": numpy.array([33, 33, 31.5])}},
"competed": {
"baseline": {
"2009": 23,
"2010": numpy.array([22, 22, 21])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}},
"carbon": {
"total": {
"baseline": {
"2009": 69,
"2010": numpy.array([66, 66, 63])},
"efficient": {
"2009": 46,
"2010": numpy.array([44, 44, 42])}},
"competed": {
"baseline": {
"2009": 34.5,
"2010": numpy.array([33.0, 33.0, 31.5])},
"efficient": {
"2009": 11.5,
"2010": numpy.array([11.0, 11.0, 10.5])}}}},
"lifetime": {"baseline": {"2009": 1, "2010": 1},
"measure": 1}}]
def test_compete_com(self):
"""Test outcomes given sample measures w/ point value inputs."""
# Run measure competition routine on sample measures
self.a_run.compete_com_primary(
self.measures_all, self.overlap_key, self.test_adopt_scheme)
# Run secondary microsegment adjustments on sample measure
self.a_run.secondary_adj(
self.measures_secondary, self.overlap_key_scnd,
self.secnd_adj_key, self.test_adopt_scheme)
# Check updated competed master microsegments for each sample measure
# following competition/secondary microsegment adjustments
for ind, d in enumerate(self.a_run.measures):
self.dict_check(
self.measures_master_msegs_out[ind],
self.a_run.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
def test_compete_com_dist(self):
"""Test outcomes given valid sample measures w/ some array inputs."""
# Run measure competition routine on sample measures
self.a_run_dist.compete_com_primary(
self.measures_all_dist, self.overlap_key, self.test_adopt_scheme)
# Run secondary microsegment adjustments on sample measure
self.a_run_dist.secondary_adj(
self.measures_secondary_dist, self.overlap_key_scnd,
self.secnd_adj_key, self.test_adopt_scheme)
# Check updated competed master microsegments for each sample measure
# following competition/secondary microsegment adjustments
for ind, d in enumerate(self.a_run_dist.measures):
self.dict_check(
self.measures_master_msegs_out_dist[ind],
self.a_run_dist.measures[ind].markets[self.test_adopt_scheme][
"competed"]["master_mseg"])
class NumpyConversionTest(unittest.TestCase, CommonMethods):
"""Test the operation of the 'convert_to_numpy' function.
Verify that the function converts terminal/leaf node lists in a dict to
numpy arrays.
Attributes:
handyvars (object): Useful variables across the class.
sample_measure (object): Sample measure data with lists to convert.
"""
@classmethod
def setUpClass(cls):
"""Define objects/variables for use across all class functions."""
base_dir = os.getcwd()
cls.handyvars = run.UsefulVars(base_dir, run.UsefulInputFiles())
cls.sample_measure = {
"market_entry_year": None,
"market_exit_year": None,
"markets": {
"Technical potential": {
"key 1": {
"nested key 1":
[1, 2, 3, 4, 5],
"nested key 2": 5},
"key 2": 10.8},
"Max adoption potential": {
"key 1": {
"nested key 1":
[0.5, 0.2, 0.3, 0.4, 0.5],
"nested key 2": 2},
"key 2": 5.8}}}
def test_numpy_convert(self):
"""Test for correct function output given valid input."""
# Instantiate measure
measure_instance = run.Measure(self.handyvars, **self.sample_measure)
# Test for correct data types in measure markets attribute
for adopt_scheme in self.handyvars.adopt_schemes:
for comp_scheme in ["uncompeted", "competed"]:
tested_data = \
measure_instance.markets[adopt_scheme][comp_scheme]
self.assertTrue(
all([isinstance(x, y) for x, y in zip([
tested_data["key 1"]["nested key 1"],
tested_data["key 1"]["nested key 2"],
tested_data["key 2"]], [numpy.ndarray, int, float])]))
# Offer external code execution (include all lines below this point in all
# test files)
def main():
"""Trigger default behavior of running all test fixtures in the file."""
unittest.main()
if __name__ == "__main__":
main()
| 2.09375 | 2 |
gocd_tools/cli/secrets.py | rasmunk/gocd-tools | 0 | 12799797 | def add_secrets_groups(parser):
_ = parser.add_argument_group(title="Secrets arguments")
| 1.53125 | 2 |
tap_mailchimp/streams/reports_email_activity.py | icebug/tap-mailchimp | 0 | 12799798 | <gh_stars>0
from tap_mailchimp.streams.base import BaseStream
import singer
from datetime import datetime
from dateutil.parser import parse
LOGGER = singer.get_logger()
class ReportsEmailActivityStream(BaseStream):
API_METHOD = "GET"
TABLE = "reports_email_activity"
response_key = "emails"
def sync_data(self):
LOGGER.info("Syncing data for {}".format(self.TABLE))
total_campaigns = 100
count = 1000
offset = 0
campaign_ids = []
while offset < total_campaigns:
campaign_params = {
"count": count,
"offset": offset,
"since_send_time": (parse(self.config.get('start_date'))).isoformat(),
"sort_field": "send_time",
"sort_dir": "ASC"
}
response = self.client.make_request(path='/campaigns', method='GET', params=campaign_params)
total_campaigns = response['total_items']
data = response['campaigns']
campaign_ids += list(map(lambda x: x['id'], data))
offset += count
LOGGER.info('Number of campaigns: {}'.format(len(campaign_ids)))
operations = []
for campaign_id in campaign_ids:
operations.append(
{
'method': self.API_METHOD,
'path': '/reports/{}/email-activity'.format(campaign_id),
'operation_id': campaign_id,
'params': {
'since': self.get_start_date(self.TABLE).isoformat(),
'exclude_fields': '_links,emails._links'
}
}
)
self.batch_sync_data(operations)
def get_stream_data(self, response, operation_id=None):
transformed = []
for record in response[self.response_key]:
for activity in record.get('activity', []):
new_activity = dict(record)
del new_activity['activity']
for key, value in activity.items():
new_activity[key] = value
new_activity = self.transform_record(new_activity)
new_activity['report_date'] = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
transformed.append(new_activity)
return transformed
| 2.296875 | 2 |
generic_testing/isclose.py | steverpalmer/GenericTesting | 0 | 12799799 | #!/usr/bin/env python3
# Copyright 2021 <NAME>
"""Extension to math.isclose and cmath.isclose."""
import cmath
import logging
import math
import numbers
LOG = logging.getLogger("isclose")
try:
import version as _version
if not _version.version.is_backwards_compatible_with("1.0.0"):
raise ImportError
except ImportError:
_version = type("_version", (object,), {"Version": lambda self, s: s})()
__all__ = ("version", "isclose", "IsClose")
version = _version.Version("1.1.0")
def isclose(a, b, **kwargs) -> bool:
"""polymorphic, parameterized isclose.
>>> isclose(1.0, 1.0)
True
>>> isclose(0.0, 1.0)
False
>>> isclose(1.0j, 1.0j)
True
>>> isclose(-1.0j, 1.0j)
False
"""
type_a = type(a)
type_b = type(b)
if type_a != type_b and issubclass(type_b, type_a):
x, y = b, a
else:
x, y = a, b
result = NotImplemented
try:
result = x.isclose(y, **kwargs)
except Exception:
pass
if result is NotImplemented:
try:
result = y.isclose(x, **kwargs)
except Exception:
pass
if result is NotImplemented:
rel_tol = kwargs.get("rel_tol", None)
abs_tol = kwargs.get("abs_tol", None)
try:
if isinstance(a, numbers.Real) and isinstance(b, numbers.Real):
result = math.isclose(
float(a),
float(b),
rel_tol=isclose.default_rel_tol
if rel_tol is None
else float(rel_tol),
abs_tol=isclose.default_abs_tol
if abs_tol is None
else float(abs_tol),
)
elif isinstance(a, numbers.Complex) and isinstance(b, numbers.Complex):
result = cmath.isclose(
complex(a),
complex(b),
rel_tol=isclose.default_rel_tol
if rel_tol is None
else float(rel_tol),
abs_tol=isclose.default_abs_tol
if abs_tol is None
else float(abs_tol),
)
elif a is b or a == b:
result = True
else:
difference = abs(a - b)
abs_result = abs_tol is not None and difference <= abs_tol
rel_result = rel_tol is not None and difference <= rel_tol * max(
abs(a), abs(b)
)
result = abs_result or rel_result
except Exception:
pass
if result is NotImplemented and not kwargs.get("return_NotImplemented", None):
raise TypeError(f"cannot compare {a!r} and {b!r}")
return result
isclose.default_rel_tol = 1e-9
isclose.default_abs_tol = 0.0
class IsClose:
"""Allows pre-defined closeness on polymorphic isclose."""
def __init__(self, **kwargs) -> None:
self._kwargs = kwargs
@property
def kwargs(self):
return self._kwargs
def __call__(self, a, b) -> bool:
"""Apply IsClose().
>>> myisclose = IsClose()
>>> myisclose(1.0, 1.0)
True
"""
return isclose(a, b, **self._kwargs)
def close(self):
"""close function.
>>> myisclose = IsClose()
>>> callable(myisclose.close)
True
"""
return self
def notclose(self):
"""not close function.
>>> myisclose = IsClose()
>>> callable(myisclose.notclose)
True
"""
return lambda a, b: not self(a, b)
def much_less_than(self):
"""definitely less function."""
return lambda a, b: a < b and not self(a, b)
def less_than_or_close(self):
"""less or close function."""
return lambda a, b: a < b or self(a, b)
def much_greater_than(self):
"""definitely greater function."""
return lambda a, b: a > b and not self(a, b)
def greater_than_or_close(self):
"""greater or close function."""
return lambda a, b: a > b or self(a, b)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 2.578125 | 3 |
Comportamentais/Template Method/main.py | DionVitor/design_pattern | 0 | 12799800 | <gh_stars>0
from abc import ABC, abstractmethod
def print_abstract(string):
print(f'\033[31m{string}\033[0;0m')
def print_concrete(string):
print(f'\033[32m{string}\033[0;0m')
class AbstractClass(ABC):
def template_method(self):
self.operation_one()
self.required_operation_one()
self.operation_two()
self.hook1()
self.required_operation_two()
self.operation_three()
self.hook2()
@staticmethod
def operation_one():
print_abstract('| Classe abstrata | Estou executando a operação 1.')
@staticmethod
def operation_two():
print_abstract('| Classe abstrata | Estou executando a operação 2.')
@staticmethod
def operation_three():
print_abstract('| Classe abstrata | Estou executando a operação 3.')
@abstractmethod
def required_operation_one(self):
pass
@abstractmethod
def required_operation_two(self):
pass
def hook1(self):
pass
def hook2(self):
pass
class ConcreteClass1(AbstractClass):
def required_operation_one(self):
print_concrete('| Classe concreta 1 | Operação requerida 1 implementada.')
def required_operation_two(self):
print_concrete('| Classe concreta 1 | Operação requerida 2 implementada.')
class ConcreteClass2(AbstractClass):
def required_operation_one(self):
print_concrete('| Classe concreta 2 | Operação requerida 1 implementada.')
def required_operation_two(self):
print_concrete('| Classe concreta 2 | Operação requerida 2 implementada.')
def hook1(self):
print_concrete('| Classe concreta 2 | Hook 1 implementado.')
def run(concrete_class): # Deve receber uma subclasse de AbstractClass!
concrete_class.template_method()
if __name__ == '__main__':
run(ConcreteClass1())
print('')
run(ConcreteClass2())
| 3.171875 | 3 |
server/danesfield_server/workflow_steps/run_danesfield_imageless.py | Kitware/Danesfield-App | 25 | 12799801 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc. and Contributors
# Distributed under the Apache License, 2.0 (apache.org/licenses/LICENSE-2.0)
# See accompanying Copyright.txt and LICENSE files for details
###############################################################################
import os
import tempfile
from typing import Dict
from danesfield_server.algorithms.generate_point_cloud import ResultRunDockerCommand
from danesfield_server.workflow import DanesfieldWorkflowException
from docker.types import DeviceRequest
from girder.models.collection import Collection
from girder.models.folder import Folder
from girder.models.user import User
from girder_worker.docker.tasks import docker_run
from girder_worker.docker.transforms.girder import (
GirderUploadVolumePathToFolder,
)
from girder_worker.docker.transforms import BindMountVolume, VolumePath
from danesfield_server.algorithms.common import (
addJobInfo,
createDockerRunArguments,
createGirderClient,
)
from ..constants import DanesfieldStep, DockerImage
from ..workflow_step import DanesfieldWorkflowStep
from ..workflow_utilities import getWorkingSet
from ..models.workingSet import WorkingSet
class RunDanesfieldImageless(DanesfieldWorkflowStep):
"""
Step that generates a point cloud.
Supports the following options:
- aoiBBox (required)
"""
def __init__(self):
super(RunDanesfieldImageless, self).__init__("Imageless")
self.addDependency(DanesfieldStep.GENERATE_POINT_CLOUD)
def run(self, jobInfo, outputFolder):
gc = createGirderClient(jobInfo.requestInfo)
baseWorkingSet: Dict = getWorkingSet(DanesfieldStep.INIT, jobInfo)
# Get point cloud working set
pointCloudWorkingSet: Dict = getWorkingSet(
DanesfieldStep.GENERATE_POINT_CLOUD, jobInfo
)
core3dCollection = Collection().createCollection(
name="core3d",
creator=User().getAdmins().next(),
description="",
public=True,
reuseExisting=True,
)
modelsFolder = Folder().findOne(
{
"parentId": core3dCollection["_id"],
"name": "models",
}
)
if modelsFolder is None:
raise DanesfieldWorkflowException(
"Models folder has not been created and populated"
)
# Download models folder
models_folder = tempfile.mkdtemp()
modelsFolderVolume = BindMountVolume(models_folder, models_folder)
gc.downloadFolderRecursive(modelsFolder["_id"], models_folder)
# Get single file, there will only be one
point_cloud_path = tempfile.mktemp(suffix=".las")
pointCloudFile = self.getFiles(pointCloudWorkingSet)[0]
gc.downloadFile(str(pointCloudFile["_id"]), point_cloud_path)
pointCloudFileVolume = BindMountVolume(point_cloud_path, point_cloud_path)
# Create output dir
outputDir = tempfile.mkdtemp()
outputDirVolume = BindMountVolume(host_path=outputDir, container_path=outputDir)
# Create config file
config_file, config_file_path = tempfile.mkstemp(suffix=".ini")
configFileVolume = BindMountVolume(config_file_path, config_file_path)
with open(config_file, "w") as in_config_file:
# Configure paths
paths_section = (
"[paths]\n"
+ f"p3d_fpath = {point_cloud_path}\n"
+ f"work_dir = {outputDir}\n"
# Supply empty dir so no errors are generated
+ f"rpc_dir = {tempfile.mkdtemp()}\n"
)
in_config_file.write(f"{paths_section}\n")
# Set name prefix for output files
aoi_section = (
"[aoi]\n" + f"name = {baseWorkingSet['name'].replace(' ', '_')}"
)
in_config_file.write(f"{aoi_section}\n")
# Ground sample distancy of output imagery in meters per pixel
# Default is 0.25
params_section = "[params]\n" + "gsd = 0.25\n"
in_config_file.write(f"{params_section}\n")
# Parameters for the roof geon extraction step
roof_section = (
"[roof]\n"
+ f"model_dir = {models_folder}/Columbia Geon Segmentation Model\n"
+ "model_prefix = dayton_geon"
)
in_config_file.write(f"{roof_section}\n")
# Ensure folder exists
existing_folder_id = baseWorkingSet.get("output_folder_id")
if existing_folder_id is None:
output_folder = Folder().createFolder(
parent=core3dCollection,
parentType="collection",
name=f"(Imageless) {baseWorkingSet['name']}",
reuseExisting=True,
)
existing_folder_id = output_folder["_id"]
baseWorkingSet["output_folder_id"] = output_folder["_id"]
WorkingSet().save(baseWorkingSet)
containerArgs = [
"python",
"/danesfield/tools/run_danesfield.py",
config_file_path,
]
resultHooks = [
# - Fix output folder permissions
ResultRunDockerCommand(
DockerImage.DANESFIELD,
command=["chown", "-R", f"{os.getuid()}:{os.getgid()}", outputDir],
volumes=outputDirVolume._repr_json_(),
),
# Upload results
GirderUploadVolumePathToFolder(
VolumePath(".", volume=outputDirVolume),
existing_folder_id,
),
]
asyncResult = docker_run.delay(
device_requests=[DeviceRequest(count=-1, capabilities=[["gpu"]])],
shm_size="8G",
volumes=[
pointCloudFileVolume,
configFileVolume,
outputDirVolume,
modelsFolderVolume,
],
**createDockerRunArguments(
image=f"{DockerImage.DANESFIELD}:latest",
containerArgs=containerArgs,
jobTitle=f"Run imageless workflow on [{baseWorkingSet['name']}]",
jobType=self.name,
user=jobInfo.requestInfo.user,
resultHooks=resultHooks,
),
)
# Add info for job event listeners
job = asyncResult.job
job = addJobInfo(
job,
jobId=jobInfo.jobId,
stepName=self.name,
workingSetId=baseWorkingSet["_id"],
)
return job
| 1.65625 | 2 |
api_v1/migrations/0001_initial.py | crazy-coding/django-rest-swagger-master | 1 | 12799802 | <gh_stars>1-10
# Generated by Django 3.1.1 on 2020-09-23 07:05
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=254)),
('permission_page', models.TextField(null=True)),
],
options={
'db_table': 'group',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('email', models.EmailField(max_length=254, unique=True)),
('name', models.CharField(max_length=254, null=True)),
('password', models.CharField(max_length=128)),
('is_admin', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('date_joined', models.DateField(auto_now=True)),
('group', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='api_v1.group')),
],
options={
'db_table': 'user',
},
),
]
| 1.953125 | 2 |
xos/xos/urls.py | wathsalav/xos | 0 | 12799803 | from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
# This is the generated API
from xosapi import *
from core.views.legacyapi import LegacyXMLRPC
from core.views.services import ServiceGridView
#from core.views.analytics import AnalyticsAjaxView
from core.models import *
from rest_framework import generics
from core.dashboard.sites import SitePlus
from django.http import HttpResponseRedirect
#from core.xoslib import XOSLibDataView
admin.site = SitePlus()
admin.autodiscover()
def redirect_to_apache(request):
""" bounce a request back to the apache server that is running on the machine """
apache_url = "http://%s%s" % (request.META['HOSTNAME'], request.path)
return HttpResponseRedirect(apache_url)
urlpatterns = patterns('',
# Examples:
url(r'^stats', 'core.views.stats.Stats', name='stats'),
url(r'^observer', 'core.views.observer.Observer', name='observer'),
url(r'^serviceGrid', ServiceGridView.as_view(), name='serviceGrid'),
url(r'^docs/', include('rest_framework_swagger.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^', include(admin.site.urls)),
#url(r'^profile/home', 'core.views.home'),
# url(r'^admin/xoslib/(?P<name>\w+)/$', XOSLibDataView.as_view(), name="xoslib"),
url(r'^xmlrpc/legacyapi/$', 'core.views.legacyapi.LegacyXMLRPC', name='xmlrpc'),
# url(r'^analytics/(?P<name>\w+)/$', AnalyticsAjaxView.as_view(), name="analytics"),
url(r'^files/', redirect_to_apache),
#Adding in rest_framework urls
url(r'^xos/', include('rest_framework.urls', namespace='rest_framework')),
# XOSLib rest methods
url(r'^xoslib/', include('core.xoslib.methods', namespace='xoslib')),
) + get_REST_patterns()
| 1.992188 | 2 |
EKMRC/build_graph_concepts/build_conceptnet/retrieve_1hop.py | yyHaker/EKMRC-is-your-need | 4 | 12799804 | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : retrieve_1hop.py
@Author : yyhaker
@Contact : <EMAIL>
@Time : 2020/04/07 16:33:58
'''
"""
检索知识图谱:对于某个token,分别检索出三部分:
1. sub-graph
(1) 检索出头或者尾部包含该词的三元组,构建子图G
2. sub-graph triples
3. core_entity
"""
import sys
sys.path.append(".")
import random
import pickle
import argparse
import os
import nltk
import logging
import string
from tqdm import tqdm
from nltk.corpus import wordnet as wn
from multiprocessing import Pool
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
PROCESSES = 60
def extract_en_triples(conceptnet_path):
"""检索出所有英文的三元组"""
en_triples = []
with open(conceptnet_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split('\t')
if ls[2].startswith('/c/en/') and ls[3].startswith('/c/en/'):
"""
Some preprocessing:
- Remove part-of-speech encoding.
- Split("/")[-1] to trim the "/c/en/" and just get the entity name, convert all to
- Lowercase for uniformity.
"""
rel = ls[1].split("/")[-1].lower()
head = del_pos(ls[2]).split("/")[-1].lower()
tail = del_pos(ls[3]).split("/")[-1].lower()
if not head.replace("_", "").replace("-", "").isalpha():
continue
if not tail.replace("_", "").replace("-", "").isalpha():
continue
triple = (head, rel, tail)
en_triples.append(triple)
return en_triples
def extract_triples(conceptnet_path):
"""检索出conceptnet中的三元组"""
conceptnet_triples = []
with open(conceptnet_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(",")
head = ls[0].strip()
rel = ls[1].strip()
tail = ls[2].strip()
triple = (head, rel, tail)
conceptnet_triples.append(triple)
return conceptnet_triples
# def build_mapping(triples, entity_path, relation_path):
# """build mapping of entities and triples"""
# entity2id = {}
# relation2id = {}
# for triple in triples:
# head, rel, tail = triple[0], triple[1], triple[2]
# if head not in entity2id.keys():
# entity2id[head] = len(entity2id)
# if tail not in entity2id.keys():
# entity2id[tail] = len(entity2id)
# if rel not in relation2id.keys():
# relation2id[rel] = len(relation2id)
# with open(entity_path, 'w') as f_e:
# for entity, idx in entity2id.items():
# f_e.write(entity + " " + str(idx))
# f_e.write('\n')
# with open(relation_path, 'w') as f_r:
# for relation, idx in relation2id.items():
# f_r.write(relation + " " + str(idx))
# f_r.write('\n')
# id2entity = {v:k for k,v in entity2id.items()}
# id2relation = {v:k for k,v in relation2id.items()}
# return entity2id, id2entity, relation2id, id2relation
def get_concept_mapping(entity_path, relation_path):
"""read entity and relation mapping file"""
entity2id = {}
relation2id = {}
with open(entity_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(" ")
# pass first line
if len(ls) <= 1:
continue
entity = ls[0].strip()
idx = int(ls[1].strip())
entity2id[entity] = idx
with open(relation_path, 'r', encoding="utf-8") as f:
for line in f.readlines():
ls = line.split(" ")
# pass first line
if len(ls) <= 1:
continue
rel = ls[0].strip()
idx = int(ls[1].strip())
relation2id[rel] = idx
return entity2id, relation2id
def search_triples(token, conceptnet_triples, limit=20):
"""检索出头或者尾部包含该词的三元组"""
triples = []
core_entitys = set()
# search triples
for triple in conceptnet_triples:
head, rel, tail = triple[0], triple[1], triple[2]
if token in head.split("_") or token in tail.split("_"):
triples.append(triple)
# limit retrieved knowledge here
if len(triples) > limit:
break
if token in head.split("_"):
core_entitys.add(head)
if token in tail.split("_"):
core_entitys.add(tail)
# define core entity, choose the shortest
core_entitys = list(core_entitys)
if len(core_entitys) != 0:
min_len = len(core_entitys[0])
min_entity = core_entitys[0]
for entity in core_entitys:
if len(entity) < min_len:
min_len = len(entity)
min_entity = entity
core_entity = min_entity
else:
core_entity = None
return triples, core_entity
def search_triple_neighbor(cur_triple, conceptnet_triples):
"""检索出三元组的相邻的三元组"""
neighbor_triples = []
cur_head, cur_rel, cur_tail = cur_triple[0], cur_triple[1], cur_triple[2]
for triple in conceptnet_triples:
if triple == cur_triple:
continue
head, rel, tail = triple[0], triple[1], triple[2]
if cur_head == head or cur_head == tail or cur_tail == head or cur_tail == tail:
neighbor_triples.append(triple)
return neighbor_triples
def build_graph(triples):
"""连接相同的实体构建子图, 返回子图G"""
# x : [num_nodes, num_node_features]
# edge : [2, num_edges]
# edge_attr : [num_edges, num_edge_features]
nodes = []
edges = []
edges_attr = []
token_triples = []
for triple in triples:
head, rel, tail = triple[0], triple[1], triple[2]
# remove empty entity triple
if head == "" or head == " ":
continue
if tail == "" or tail == " ":
continue
# add nodes
if head not in nodes:
nodes.append(head)
if tail not in nodes:
nodes.append(tail)
# add edge
edges.append([head, tail])
edges.append([tail, head])
edges_attr.append(rel)
edges_attr.append(rel)
token_triples.append(triple)
assert len(edges) == len(edges_attr)
return nodes, edges, edges_attr, token_triples
def build_graph_for_token(token, conceptnet_triples):
"""根据给定的token,构建子图"""
contained_triples, core_entity = search_triples(token, conceptnet_triples)
nodes, edges, edges_attr, token_triples = build_graph(contained_triples)
return nodes, edges, edges_attr, token_triples, core_entity
def retrieve_tokens_graph(index, token_part, conceptnet_triples, stopwords, args):
"""retrieve tokens graph"""
logger.info("begin run function {} at process {}".format(retrieve_tokens_graph, os.getpid()))
token2datas = {}
for token in tqdm(token_part):
if token in set(string.punctuation):
logger.info('{} is punctuation, skipped!'.format(token))
# punctuation_cnt += 1
continue
if args.no_stopwords and token in stopwords:
logger.info('{} is stopword, skipped!'.format(token))
# stopword_cnt += 1
continue
if args.ignore_length > 0 and len(token) <= args.ignore_length:
logger.info('{} is too short, skipped!'.format(token))
continue
# build graph for token here
nodes, edges, edges_attr, token_triples, core_entity = build_graph_for_token(token, conceptnet_triples)
token2data = {}
token2data["sub_graph"] = (nodes, edges, edges_attr)
token2data["graph_triples"] = token_triples
token2data["core_entity"] = core_entity
token2datas[token] = token2data
with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(index)), 'wb') as fout:
pickle.dump(token2datas, fout)
logger.info('Finished dumping retrieved token graphs {}'.format(index))
def del_pos(s):
"""
Deletes part-of-speech encoding from an entity string, if present.
:param s: Entity string.
:return: Entity string with part-of-speech encoding removed.
"""
if s.endswith("/n") or s.endswith("/a") or s.endswith("/v") or s.endswith("/r"):
s = s[:-2]
return s
def retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb):
"""retrieve entity and relation embeddings"""
entity2emb = {}
relation2emb = {}
for token, data in token2datas.items():
graph_triples = data["graph_triples"]
for triple in graph_triples:
head, rel, tail = triple[0], triple[1], triple[2]
if head not in entity2emb:
entity2emb[head] = entity_emb[entity2id[head]]
if rel not in relation2emb:
relation2emb[rel] = relation_emb[relation2id[rel]]
if tail not in entity2emb:
entity2emb[tail] = entity_emb[entity2id[tail]]
return entity2emb, relation2emb
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--train_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/train.tokenization.cased.data',
help='token file of train set')
parser.add_argument('--eval_token', type=str, default='EKMRC/data/ReCoRD_tokenization/tokens_self/dev.tokenization.cased.data',
help='token file of dev set')
parser.add_argument('--conceptnet_path', type=str, default='EKMRC/data/conceptnet/conceptNet_process.txt', help='conceptnet triple path')
parser.add_argument('--entity_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2id.txt', help="entity2id path")
parser.add_argument('--relation_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2id.txt', help="relation2id path")
parser.add_argument('--entity_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity_emb.pkl', help="entity emb path")
parser.add_argument('--relation_emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/rel_emb.pkl', help="relation emb path")
parser.add_argument('--entity2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/entity2emb.txt', help="entity2emb path")
parser.add_argument('--relation2emb_path', type=str, default='EKMRC/build_graph_concepts/concept_embs/relation2emb.txt', help='relation2emb path')
parser.add_argument('--output_dir', type=str, default='EKMRC/build_graph_concepts/retrieve_result/one_hop', help='output directory')
parser.add_argument('--no_stopwords', action='store_true', default=True, help='ignore stopwords')
parser.add_argument('--ignore_length', type=int, default=0, help='ignore words with length <= ignore_length')
args = parser.parse_args()
# load ConceptNet here
logger.info("Begin loading concept triples...")
conceptnet_triples = extract_triples(args.conceptnet_path)
logger.info('Finished loading concept english triples.')
logger.info("sample five triples...")
for i in range(5):
triple = random.choice(conceptnet_triples)
logger.info(triple)
# # build mappings of entities and relations(all ConceptNet)
# entity2id, id2entity, relation2id, id2relation = build_mapping(conceptnet_triples, args.entity_path, args.relation_path)
# logger.info("Finished mapping of relations and entities.")
# get concept mapping
logger.info("get concept mapping...")
entity2id, relation2id = get_concept_mapping(args.entity_path, args.relation_path)
# load pickled samples
logger.info('Begin to load tokenization results...')
train_samples = pickle.load(open(args.train_token, 'rb'))
dev_samples = pickle.load(open(args.eval_token, 'rb'))
logger.info('Finished loading tokenization results.')
# build token set
all_token_set = set()
for sample in train_samples + dev_samples:
for token in sample['query_tokens'] + sample['document_tokens']:
all_token_set.add(token)
logger.info('Finished making tokenization results into token set.')
# load stopwords
stopwords = set(nltk.corpus.stopwords.words('english'))
logger.info('Finished loading stopwords list.')
# mk directory
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# retrive neighbor triples and build sub-graph
logger.info('Begin to retrieve neighbor triples and build sub-graph...')
# token2graph = dict()
# stopword_cnt = 0
# punctuation_cnt = 0
all_token_set = list(all_token_set)
# split all_token_set to processes parts and deal with multi-processing
all_token_parts = []
part_token_nums = int(len(all_token_set) / PROCESSES)
for i in range(PROCESSES):
if i != PROCESSES - 1:
cur_token_set = all_token_set[i * part_token_nums: (i+1) * part_token_nums]
else:
cur_token_set = all_token_set[i * part_token_nums: ]
all_token_parts.append(cur_token_set)
# multi-processing
logger.info("Begin to deal with {} processes...".format(PROCESSES))
p = Pool(PROCESSES)
for i, part in enumerate(all_token_parts):
p.apply_async(retrieve_tokens_graph, args=(i, part, conceptnet_triples, stopwords, args,))
p.close()
p.join()
logger.info("all processes done!")
# combine all results
logger.info('Finished retrieving token graphs, combine all result...')
token2datas = {}
for i in range(PROCESSES):
with open(os.path.join(args.output_dir, 'retrived_token_graphs_{}.data'.format(i)), 'rb') as fin:
token2data = pickle.load(fin)
token2datas.update(token2data)
logger.info("combine all results done!")
logger.info('{} / {} tokens retrieved at lease 1 graph.'.format(len(token2datas), len(all_token_set)))
with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'wb') as fout:
pickle.dump(token2datas, fout)
logger.info('Finished dumping retrieved token graphs.')
# with open(os.path.join(args.output_dir, 'retrived_token_graphs_1hop.data'), 'rb') as f_in:
# token2datas = pickle.load(f_in)
logger.info("save retrieved entity and relation embeddings...")
with open(args.entity_emb_path, 'rb') as f1:
entity_emb = pickle.load(f1)
with open(args.relation_emb_path, 'rb') as f2:
relation_emb = pickle.load(f2)
entity2emb, relation2emb = retrieved_entity_rel_emb(token2datas, entity2id, relation2id, entity_emb, relation_emb)
with open(args.entity2emb_path, 'w', encoding='utf-8') as f:
for entity, emb in entity2emb.items():
assert len(emb) == 100
if entity == "" or entity == " ":
logger.info("empty entity: {}".format(entity))
f.write(entity + " " + " ".join(map(str, emb)) + "\n")
with open(args.relation2emb_path, 'w', encoding="utf-8") as f:
for rel, emb in relation2emb.items():
assert len(emb) == 100
f.write(rel + " " + " ".join(map(str, emb)) + "\n")
logger.info("For all KG, {}/{} retrieved entities used, {}/{} retrieved relations used.".format(
len(entity2emb), len(entity_emb), len(relation2emb), len(relation_emb)))
if __name__ == '__main__':
main() | 2.203125 | 2 |
fauth/face.py | shadownetz/fauth | 0 | 12799805 | <gh_stars>0
import base64
import face_recognition
from django.core.files.base import ContentFile
from django.core.files import File
from django.conf import settings
class FauthImage:
def __init__(self, dataURI: str, *, name: str = 'temp'):
self.image_uri = dataURI
self.image_format, self.image_str = dataURI.split(';base64,')
self.image_ext = self.image_format.split('/')[-1]
self.image_dir = settings.FAUTH_IMAGE_DIR + f'/image.{self.image_ext}'
self.default_name = f'{name}.{self.image_ext}'
def get_file(self) -> File:
image = ContentFile(base64.b64decode(self.image_str), name=self.default_name)
return File(image)
def get_path(self) -> str:
try:
file = open(self.image_dir, 'xb')
except FileExistsError:
file = open(self.image_dir, 'wb')
file.write(base64.b64decode(self.image_str))
file.close()
return self.image_dir
def get_face_locations_from_base64(base64String: str) -> list:
fauthImage = FauthImage(base64String)
# image_path = fauthImage.get_path() # image dir
image_file = fauthImage.get_file() # image file object
image = face_recognition.load_image_file(image_file)
face_locations = face_recognition.face_locations(image)
return face_locations
def compare_faces(image1, image2) -> dict:
first_image = face_recognition.load_image_file(image1)
first_image_encodings = face_recognition.face_encodings(first_image)
second_image = face_recognition.load_image_file(image2)
second_image_encodings = face_recognition.face_encodings(second_image)
# No face found in unknown image
if second_image_encodings and first_image_encodings:
return {
'result': face_recognition.compare_faces([first_image_encodings[0]], second_image_encodings[0], 0.4),
'message': ''
}
return {
'result': [],
'message': 'No face detected!'
}
| 2.625 | 3 |
engine/utils/xml_json_process.py | torrotitans/torro_community | 1 | 12799806 | <filename>engine/utils/xml_json_process.py
#!/usr/bin/python
# -*- coding: UTF-8 -*
import json
from json import JSONDecodeError
import xmltodict
from common.common_response_code import response_code
def xml_to_json(xml_str):
"""
:param xml_str:
:return:
"""
xml_parse = xmltodict.parse(xml_str)
json_str = json.dumps(xml_parse, indent=1).replace('\\', '\\\\')
return json_str
def json_to_xml(json_str):
"""
:param json_str:
:return:
"""
xml_str = xmltodict.unparse(json_str, pretty=1)
return xml_str
def is_none(request_param):
"""
:param request_param:
:return:
"""
if isinstance(request_param, list):
for index, a in enumerate(request_param):
if isinstance(a, str):
b = request_param.copy()
if a == None:
del b[index]
else:
c = a.copy()
for k, v in c.items():
if v == None:
del a[k]
if isinstance(v, list):
b = v.copy()
for index, a in enumerate(b):
if a == None:
del v[index]
if isinstance(request_param, dict):
c = request_param.copy()
for k, v in c.items():
if v == None:
del request_param[k]
if isinstance(v, list):
b = v.copy()
for index, a in enumerate(b):
if a == None:
del v[index]
return request_param
| 2.984375 | 3 |
demo/plugins/bootstrap_button/cms_plugins.py | andrewschoen/django-cms-demo | 7 | 12799807 | from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from django.utils.translation import ugettext as _
from django.conf import settings
from models import BootstrapButtonPlugin
class BootstrapButtonPlugin(CMSPluginBase):
model = BootstrapButtonPlugin
name = _("Button")
text_enabled = True
render_template = "plugins/bootstrap_button.html"
def render(self, context, instance, placeholder):
if instance.mailto:
link = u"mailto:%s" % _(instance.mailto)
elif instance.url:
link = _(instance.url)
elif instance.page_link:
link = instance.page_link.get_absolute_url()
else:
link = ""
context.update({
'link': link,
'size': instance.button_size,
'type': instance.button_type,
'label': instance.label,
'new_window': instance.new_window,
})
return context
def icon_src(self, instance):
return settings.STATIC_URL + u"cms/images/plugins/link.png"
plugin_pool.register_plugin(BootstrapButtonPlugin) | 1.921875 | 2 |
test_model/models.py | Lairion/defaultproject | 0 | 12799808 | from django.db import models
# Create your models here.
class Category(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
class Meta:
pass
class Skill(models.Model):
"""
Description: Model Description
"""
name = models.CharField(max_length=50)
category = models.ForeignKey('Category', on_delete=models.CASCADE)
class Meta:
pass | 2.515625 | 3 |
test/test_images.py | mwang87/SMART_NMR | 0 | 12799809 | <reponame>mwang87/SMART_NMR
import sys
sys.path.insert(0, "..")
import smart_utils
import os
import glob
def test_draw():
test_files = glob.glob("Data/*")
for test_file in test_files:
print(test_file)
smart_utils.draw_nmr(test_file, "{}.png".format(os.path.basename(test_file))) | 2.734375 | 3 |
modneat/modneat_settings.py | katomasahiro10/modpy | 0 | 12799810 | # nn parameters
INPUT_NUM = 2
OUTPUT_NUM = 1
NORMAL_NUM_UPPER_LIMIT = 5
NORMAL_NUM_LOWER_LIMIT = 2
MODULATION_NUM_UPPER_LIMIT = 2
MODULATION_NUM_LOWER_LIMIT = 2
NEURON_NUM_UPPER_LIMIT = 5
CONNECTION_NUM_UPPER_LIMIT = 10
CONNECTION_NUM_LOWER_LIMIT = 1
WEIGHT_UPPER_LIMIT = 1.0
WEIGHT_LOWER_LIMIT = -1.0
BIAS_UPPER_LIMIT = 1.0
BIAS_LOWER_LIMIT = -1.0
EVOLUTION_PARAM_UPPER_LIMIT = 1.0
EVOLUTION_PARAM_LOWER_LIMIT = -1.0
EPSIRON_LOWER_LIMIT = 0.01
EPSIRON_UPPER_LIMIT = 1.0
| 1.726563 | 2 |
evaluate_recordings/config.py | technologiestiftung/otc-toolkit | 4 | 12799811 | from os.path import dirname, abspath, join
DIR_PATH = dirname(abspath(__file__))
OTC_TOOLKIT_PATH = abspath(join(DIR_PATH, '..'))
PATH_TO_RECORDINGS = "data"
STATIONS = ['ecdf', 'citylab']
BOARDS = ['nano', 'tx2', 'xavier']
COUNTER_LINE_COORDS = {'ecdf':
# {'ecdf-lindner': {"point1": {"x": 718, "y": 173}, Coords from first run, bad lines
# "point2": {"x": 702, "y": 864}},
# "cross": {"point1": {"x": 515, "y": 494},
# "point2": {"x": 932, "y": 377}}},
{"bundesstrasse": {"point1": {"x": 1046, "y": 132}, "point2": {"x": 1211, "y": 226}},
"lindner": {"point1": {"x": 393, "y": 166}, "point2": {"x": 718, "y": 72}},
"walking_bundesstrasse": {"point1": {"x": 1104, "y": 200}, "point2": {"x": 975, "y": 258}},
"walking_lindner": {"point1": {"x": 568, "y": 150}, "point2": {"x": 642, "y": 235}}},
# 'citylab':
# {"point1": {"x": 34, "y": 740}, "point2": {"x": 1433,
# "y": 103}}
"citylab": {
"platzderluftbruecke": {"point1": {"x": 541, "y": 445}, "point2": {"x": 960, "y": 179}}}
}
# tx2: same line for both directions going across two lanes
CLASSES = ["car", "truck", "bicycle", "bus", "motorbike"]
# CLASSES = ["car", "truck", "person", "bus"] # changed for second ecdf-recording
# COUNTER_LINE_NAMES = {
# "ecdf": {"a4ad8491-c790-4078-9092-94ac1e3e0b46": "ecdf-lindner", "882e3178-408a-4e3e-884f-d8d2290b47f0": "cross"}}
COUNTER_LINE_NAMES = {"ecdf": {
"c9f71c06-6baf-47c3-9ca2-4c26676b7336": "bundesstrasse",
"6c393a8f-a84f-4e31-8670-bfeb9e1cfadc": "lindner",
"240885bb-636e-41f2-8448-bfcdbabd42b5": "walking_bundesstrasse",
"25b11f4a-0d23-4878-9050-5b5a06834adc": "walking_lindner"
},
"citylab": {"a7317e7a-85da-4f08-8efc-4e90a2a2b2b8": "platzderluftbruecke"}
}
| 2.09375 | 2 |
examples/management/get_user.py | ZygusPatryk/amqpstorm | 140 | 12799812 | <gh_stars>100-1000
from amqpstorm import management
if __name__ == '__main__':
# If using a self-signed certificate, change verify=True to point at your CA bundle.
# You can disable certificate verification for testing by passing in verify=False.
API = management.ManagementApi('https://rmq.amqpstorm.io:15671', 'guest',
'guest', verify=True)
API.user.create('my_user', 'password')
# Get a user
print(API.user.get('my_user'))
# User that does not exist throws an exception
API.user.delete('my_user')
try:
API.user.get('NOT_FOUND')
except management.ApiError as why:
if why.error_code == 404:
print('User not found')
| 2.0625 | 2 |
setup.py | hurali97/makeReact | 0 | 12799813 | <gh_stars>0
import re
from setuptools import setup
version = re.search(
'^__version__\s*=\s*"(.*)"',
open('makeReact/script.py').read(),
re.M
).group(1)
with open("README.md", "r") as f:
long_description = f.read()
setup(
name = "makeReact",
packages = ["makeReact"],
entry_points = {
"console_scripts": ['makeReact = makeReact.script:main']
},
version = version,
description = "makeReact is a python package which helps react and react-native developer to speed-up their develoment process.",
long_description=long_description,
long_description_content_type="text/markdown",
author = "<NAME>",
author_email = "<EMAIL>",
) | 1.601563 | 2 |
tests/test_archive_operations.py | jpenney/pdar | 3 | 12799814 | <reponame>jpenney/pdar<gh_stars>1-10
# This file is part of pdar.
#
# Copyright 2011 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
import tests
import pdar
import os
import random
import shutil
from pkg_resources import parse_version
class ArchiveTest(tests.ArchiveTestCase):
def test_0001_basics(self):
'''ensure PDArchive was created, and contains patches'''
self.assertIsNotNone(self.pdarchive)
self.assertGreater(len(self.pdarchive.patches),0)
def test_0002_targets(self):
'''validate correct targets based on dataset'''
targets = [patch.target for patch in self.pdarchive.patches]
changed_files = self.changed_files
self.assertItemsNotIn(self.same_files, targets,
"unchanged files should not have patches in "
"pdar")
self.assertItemsIn(changed_files, targets,
"all modified files should have patches in "
"pdar")
self.assertEqual(len(changed_files), len(targets),
"number of modified items sholud match "
"number of patches in pdar")
def test_0003_digest_values(self):
'''validate `orig_digest` does not ever match `dest_digest`'''
for entry in self.pdarchive.patches:
self.assertNotEqual(entry.orig_digest, entry.dest_digest)
def test_0003_digest_orig(self):
'''validate `orig_digest` against files'''
for entry in self.pdarchive.patches:
path = os.path.join(self.orig_dir, entry.target)
self.assertFileHashEqual(
path, entry.orig_digest, entry.hash_type,
'orig hash mismatch: %s (%s): %s'
% (entry.target, entry.type_code, str(entry.__dict__)))
def test_0003_digest_dest(self):
'''validate `dest_digest` against files'''
for entry in self.pdarchive.patches:
path = os.path.join(self.mod_dir, entry.target)
self.assertFileHashEqual(
path, entry.dest_digest, entry.hash_type,
'dest hash mismatch: %s (%s): %s'
% (entry.target, entry.type_code, str(entry.__dict__)))
def test_0004_apply_archive(self):
'''Apply in memory pdar and validate results
- clone original dataset
- apply loaded pdar file to cloned dataset
- filecmp.cmpfiles against destination dataset
'''
self._test_apply_pdarchive(self.pdarchive)
class ArchiveFileTest(tests.ArchiveFileTestCase):
def test_0001_basics(self):
'''ensure pdar file was written to disk'''
self.assertTrue(os.path.exists(self.pdarchive_path))
class LoadedArchiveFileTest(tests.ArchiveFileTestCase):
def setUp(self):
super(LoadedArchiveFileTest, self).setUp()
self._loaded_pdarchive = self.load_pdarchive()
@property
def loaded_pdarchive(self):
return self._loaded_pdarchive
def test_0001_basics(self):
'''Ensure pdar file was loaded'''
self.assertIsNotNone(self.loaded_pdarchive)
self.assertTrue(isinstance(self.loaded_pdarchive, pdar.PDArchive))
def test_0002_count(self):
'''Compare number of entries'''
self.assertEqual(len(self.pdarchive.patches),
len(self.loaded_pdarchive.patches))
def test_0003_targets(self):
'''Compare `target` values for ecah entry'''
self.assertItemsEqual(
[entry.target for entry in self.loaded_pdarchive.patches],
[entry.target for entry in self.pdarchive.patches])
def test_0003_orig_digests(self):
'''Compare `orig_digest` values for each entry'''
self.assertItemsEqual(
[entry.orig_digest for entry in self.loaded_pdarchive.patches],
[entry.orig_digest for entry in self.pdarchive.patches])
def test_0003_dest_digests(self):
'''Compare `dest_digest` values for each entry'''
self.assertItemsEqual(
[entry.dest_digest for entry in self.loaded_pdarchive.patches],
[entry.dest_digest for entry in self.pdarchive.patches])
def test_0004_apply_archive(self):
'''Apply loaded pdar file and validate results
- clone original dataset
- apply loaded pdar file to cloned dataset
- filecmp.cmpfiles against destination dataset
'''
self._test_apply_pdarchive(self.loaded_pdarchive)
if __name__ == "__main__":
tests.main()
| 2.21875 | 2 |
Dataset/Leetcode/valid/56/149.py | kkcookies99/UAST | 0 | 12799815 | class Solution(object):
def XXX(self, intervals):
"""
轻松秒杀,根据结果集合最后一个和新的元素之间关系判断是否要修改
"""
if not intervals: return []
intervals = sorted(intervals)
res = [intervals[0]]
for i in range(1, len(intervals)):
if intervals[i][0] <= res[-1][1]:
res[-1] = [res[-1][0],max(res[-1][1], intervals[i][1])]
else:
res.append(intervals[i])
return res
| 3.25 | 3 |
experiments/examplereader.py | abdelabdalla/deepmind-research | 0 | 12799816 | <reponame>abdelabdalla/deepmind-research
import functools
import json
import os
import tensorflow as tf
from learning_to_simulate import reading_utils
def _read_metadata(data_path):
with open(os.path.join(data_path, 'metadata.json'), 'rt') as fp:
return json.loads(fp.read())
data_path = "/tmp/WaterDrop"
metadata = _read_metadata(data_path)
ds = tf.data.TFRecordDataset([os.path.join(data_path, 'test.tfrecord')])
ds = ds.map(functools.partial(
reading_utils.parse_serialized_simulation_example, metadata=metadata))
n = ds.make_one_shot_iterator().get_next()
sess = tf.Session()
end = sum(1 for _ in tf.python_io.tf_record_iterator(os.path.join(data_path, 'test.tfrecord')))
value = []
for i in range(0, end):
print(str(i))
v = sess.run(n)
value.append(v)
| 2.53125 | 3 |
serving/keras_load_saved_model.py | oushu1zhangxiangxuan1/HolmesNER | 0 | 12799817 | <reponame>oushu1zhangxiangxuan1/HolmesNER
# import tensorflow as tf
from tensorflow.python.keras.saving.saved_model.load import KerasObjectLoader
from tensorflow.python.saved_model.load import load_internal
from tensorflow.python.keras.saving.saved_model.load import RevivedModel
from tensorflow.python.keras.saving import saving_utils
from tensorflow.python.saved_model import loader_impl
model_path = 'output/saved_model/cls/1599723701'
loader_impl.parse_saved_model(model_path)
model = load_internal(model_path,
tags=['serve'], loader_cls=KerasObjectLoader)
if not isinstance(model, RevivedModel):
raise RuntimeError("Can not load model")
if model._training_config is None:
raise RuntimeError("Model _training_config is None")
model.compile(
**saving_utils.compile_args_from_training_config(model._training_config))
test_data = [[], [], [], []]
model.predict(test_data)
| 2.3125 | 2 |
update.py | lilshim/collective-actions-in-tech | 0 | 12799818 | import os
import textwrap
import argparse
import pandas as pd
from pathlib import Path
from utils.action import Action, Actions
from utils.markdown import (
update_markdown_document,
SUMMARY_ID,
MarkdownData,
MarkdownDocument,
)
from utils.files import FileClient
README = Path(
os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, "README.md"))
)
CSV = Path(
os.path.realpath(os.path.join(os.path.abspath(__file__), os.pardir, "actions.csv"))
)
def _get_parser():
parser = argparse.ArgumentParser(
description=textwrap.dedent(
"""
This script is used to:
- clean up files under /actions
- export the actions to a csv
- export the actions to the readme
"""
),
epilog=textwrap.dedent(
"""
# Update files in action folder
$ python update.py --files-cleanup
# Update actions.csv based on files
$ python update.py --files-to-csv
# Update README.md based on files
$ python update.py --files-to-readme
"""
),
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"--files-to-csv",
action="store_true",
help="Update data.csv based on the action folder."
)
parser.add_argument(
"--files-to-readme",
action="store_true",
help="Update the table in the README.md based on the action folder."
)
parser.add_argument(
"--files-cleanup",
action="store_true",
help="Update the action folder by cleaning it up and sorting it."
)
parser.add_argument(
"--csv-to-files",
action="store_true",
help="Update the action folder from the actions.csv."
)
args = parser.parse_args()
return args
def update_files_from_csv():
print(f"Updating files in the /actions folder from actions.csv...")
df = pd.read_csv(CSV)
actions = Actions.read_from_df(df)
actions.to_files()
def update_files():
print(f"Updating files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
actions.to_files()
def update_csv_from_files():
print(f"Updating actions.csv from files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
df = actions.to_df()
df.to_csv(CSV)
def update_readme_from_files():
print(f"Updating README.md from files in the /actions folder...")
fc = FileClient()
files = fc.get_all_files()
actions = Actions.read_from_files(files)
actions.sort()
readme = Path(README)
md_document = readme.read_text()
md_document = update_markdown_document(md_document, Actions.action_id, actions)
readme.write_text(md_document)
if __name__ == "__main__":
args = _get_parser()
if args.files_cleanup:
update_files()
if args.files_to_csv:
update_csv_from_files()
if args.files_to_readme:
update_readme_from_files()
if args.csv_to_files:
update_files_from_csv()
| 2.984375 | 3 |
main.py | Hyrtsi/opencv-tools | 0 | 12799819 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.widgets import Slider
import cv2 as cv
FILE_NAME = 'res/mountain-and-lake.jpg'
# https://matplotlib.org/3.3.1/gallery/widgets/slider_demo.html
# https://sodocumentation.net/matplotlib/topic/6983/animations-and-interactive-plotting
# img:
# image in rbg
#
# satadj:
# 1.0 means no change. Under it converts to greyscale
# and about 1.5 is immensely high
def saturate(img, satadj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
s = s*satadj
s = np.clip(s,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def brightness(img, exp_adj):
imghsv = cv.cvtColor(img, cv.COLOR_RGB2HSV).astype("float32")
(h, s, v) = cv.split(imghsv)
v = v*exp_adj
v = np.clip(v,0,255)
imghsv = cv.merge([h,s,v])
imgrgb = cv.cvtColor(imghsv.astype("uint8"), cv.COLOR_HSV2RGB)
# assume: return rgb
return imgrgb
def plt_hist(ax, img, color):
colors = ['b', 'g', 'r']
k = colors.index(color)
histogram = cv.calcHist([img],[k],None,[256],[0,256])
plt_handle, = ax.plot(histogram, color=color)
return plt_handle
def main():
fig, ax = plt.subplots(1, 2,figsize=(27.0,27.0))
ax1 = ax[0] # The histogram
ax2 = ax[1] # The image
ax2.set_xlim(0.0,1280.0)
fig.suptitle('Image toner', fontsize=16)
# Calculate the initial value for the image
img = cv.imread(cv.samples.findFile(FILE_NAME)) # assume: BGR
img = cv.cvtColor(img, cv.COLOR_BGR2RGB) # plt assumes RGB
# Draw the image
# Take the handle for later
imobj = ax2.imshow(img)
# Axes for the saturation and brightness
ax_sat = plt.axes([0.25, .03, 0.50, 0.02])
ax_exp = plt.axes([0.25, 0.01, 0.50, 0.02])
# Slider
sat_slider = Slider(ax_sat, 'Saturation', 0, 20, valinit=1)
exp_slider = Slider(ax_exp, 'Brightness', -10, 10, valinit=1)
# Histogram
colors = ('r', 'g', 'b')
lines = []
for k,color in enumerate(colors):
histogram = cv.calcHist([img],[k],None,[256],[0,256])
line, = ax1.plot(histogram,color=color)
lines.append(line)
def update_sat(val):
newimg = img
# update image
newimg = saturate(newimg, val)
newimg = brightness(newimg, exp_slider.val)
imobj.set_data(newimg)
# update also the histogram
colors = ('r', 'g', 'b')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
def update_exp(val):
newimg = img
newimg = saturate(newimg, sat_slider.val)
newimg = brightness(newimg, val)
imobj.set_data(newimg)
# update also the histogram
colors = ('b', 'g', 'r')
for k,color in enumerate(colors):
histogram = cv.calcHist([newimg],[k],None,[256],[0,256])
lines[k].set_ydata(histogram)
# redraw canvas while idle
fig.canvas.draw_idle()
# call update function on slider value change
sat_slider.on_changed(update_sat)
exp_slider.on_changed(update_exp)
plt.show()
main()
| 3.3125 | 3 |
mirari/SV/migrations/0006_auto_20190310_1346.py | gcastellan0s/mirariapp | 0 | 12799820 | # Generated by Django 2.0.5 on 2019-03-10 19:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('SV', '0005_auto_20190305_0116'),
]
operations = [
migrations.RemoveField(
model_name='cut',
name='user',
),
migrations.AlterField(
model_name='cut',
name='serial',
field=models.IntegerField(default=1),
),
migrations.AlterField(
model_name='ticketproducts',
name='alias',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='ticketproducts',
name='ieps',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='iva',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='price',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='productName',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='ticketproducts',
name='quantity',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='ticketproducts',
name='total',
field=models.FloatField(default=0),
),
]
| 1.515625 | 2 |
miscellaneous/old_code/resources/template.py | wehriam/awspider | 2 | 12799821 | from twisted.web.resource import Resource
from genshi.template import TemplateLoader
import os
import cStringIO, gzip
class TemplateResource(Resource):
isLeaf = True
def __init__(self, path = None):
self.path = path
loader = TemplateLoader( search_path=[ os.path.join(os.path.dirname(__file__), '../web_templates') ] , auto_reload=True )
def render_GET(self, request):
if self.path is not None:
content = self._render_template( self.path.replace("docs/", "") + ".genshi" )
else:
content = self._render_template( request.path.replace("docs/", "").strip("/") + ".genshi" )
content = content.replace("\t", "")
encoding = request.getHeader("accept-encoding")
if encoding and "gzip" in encoding:
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(None, 'wb', 9, zbuf)
if isinstance( content, unicode ):
zfile.write( unicode(content).encode("utf-8") )
elif isinstance( content, str ):
zfile.write( unicode(content, 'utf-8' ).encode("utf-8") )
else:
zfile.write( unicode(content).encode("utf-8") )
zfile.close()
request.setHeader("Content-encoding","gzip")
return zbuf.getvalue()
else:
return content
def _render_template(self, template, data=None):
if data is None:
data = {}
t = self.loader.load( template )
return t.generate( data=data ).render('xhtml', doctype='xhtml')
| 2.09375 | 2 |
496_Next-Greater-Element-I.py | Coalin/Daily-LeetCode-Exercise | 3 | 12799822 | # Method I:
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
res = []
for num in nums1:
index = 0
cur_num_index = 0
while index <= len(nums2)-1:
if nums2[index] == num:
cur_num_index = index
break
else:
index += 1
# print(cur_num_index)
while cur_num_index <= len(nums2)-1:
if nums2[cur_num_index] > num:
res.append(nums2[cur_num_index])
break
elif cur_num_index == len(nums2)-1:
res.append(-1)
break
else:
cur_num_index += 1
# print(res)
return res
# Method II:
class Solution:
def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:
if not nums1:
return []
res = []
my_stack = []
my_dict = {}
my_stack.append(nums2[0])
for num in nums2[1:]:
while my_stack:
if num > my_stack[-1]:
my_dict[my_stack.pop()] = num
else:
break
my_stack.append(num)
for key in my_stack:
my_dict[key] = -1
for i in nums1:
res.append(my_dict[i])
return res
| 3.28125 | 3 |
main_exe.py | pkudba/SCL | 7 | 12799823 | <gh_stars>1-10
import os
import torch
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import yaml
from tqdm.auto import tqdm
from tensorboardX import SummaryWriter
from torch.utils.data.dataloader import DataLoader
| 1.226563 | 1 |
boost/libs/iterator/doc/generate.py | randolphwong/mcsema | 1,155 | 12799824 | <reponame>randolphwong/mcsema<filename>boost/libs/iterator/doc/generate.py
#!/usr/bin/python
# Copyright <NAME> 2004. Use, modification and distribution is
# subject to the Boost Software License, Version 1.0. (See accompanying
# file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#
# Generate html, TeX, and PDF versions of all the source files
#
import os
import sys
from syscmd import syscmd
from sources import sources
if 0:
for s in sources:
syscmd('boosthtml %s' % s)
else:
extensions = ('html', 'pdf')
if len(sys.argv) > 1:
extensions = sys.argv[1:]
all = [ '%s.%s' % (os.path.splitext(s)[0],ext)
for ext in extensions
for s in sources
]
print 'make %s' % ' '.join(all)
syscmd('make %s' % ' '.join(all))
| 2.125 | 2 |
np_processor/processor/np_ssd_post.py | laobadao/TF_VS_Caffe | 0 | 12799825 | import numpy as np
from platformx.plat_tensorflow.tools.processor.np_utils import shape_utils, \
anchor_generator_builder, box_list_ops, box_list, box_coder_builder, post_processing_builder, \
visualization_utils as vis_util
from platformx.plat_tensorflow.tools.processor.np_utils import standard_fields as fields
from platformx.plat_tensorflow.tools.processor import model_config
import config
from PIL import Image
import matplotlib
matplotlib.use('Agg')
from platformx.plat_tensorflow.tools.processor.np_utils import label_map_util
from scipy import misc
import os
BOX_ENCODINGS = 'box_encodings'
CLASS_PREDICTIONS_WITH_BACKGROUND = 'class_predictions_with_background'
BASE_BoxEncodingPredictor = "_BoxEncodingPredictor"
BASE_ClassPredictor = "_ClassPredictor"
PPN_BoxPredictor_0 = "WeightSharedConvolutionalBoxPredictor_BoxPredictor"
PPN_ClassPredictor_0 = "WeightSharedConvolutionalBoxPredictor_ClassPredictor"
BASE_PPN_BoxPredictor = "_BoxPredictor"
BASE_PPN_ClassPredictor = "WeightSharedConvolutionalBoxPredictor"
PATH_TO_LABELS = config.cfg.POSTPROCESSOR.PATH_TO_LABELS
def run_ssd_tf_post(preprocessed_inputs, result_middle=None):
boxes_encodings_np = []
classes_predictions_with_background_np = []
feature_maps_np = []
for i in range(6):
for key, value in result_middle.items():
if str(i) + BASE_BoxEncodingPredictor in key:
print(str(i) + BASE_BoxEncodingPredictor + ": ", value.shape)
boxes_encodings_np.append(value)
break
if i == 0:
if PPN_BoxPredictor_0 in key:
print("PPN_BoxPredictor_0:", value.shape)
boxes_encodings_np.append(value)
break
else:
if str(i) + BASE_PPN_BoxPredictor in key:
print(str(i) + BASE_PPN_BoxPredictor, value.shape)
boxes_encodings_np.append(value)
break
for key, value in result_middle.items():
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor not in key:
print(str(i) + BASE_ClassPredictor+ ": ", value.shape)
classes_predictions_with_background_np.append(value)
break
if i == 0:
if PPN_ClassPredictor_0 in key:
print(PPN_ClassPredictor_0 + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
else:
if str(i) + BASE_ClassPredictor in key and BASE_PPN_ClassPredictor in key:
print(str(i) + BASE_ClassPredictor + ":", value.shape)
classes_predictions_with_background_np.append(value)
break
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn" not in key:
print("key {} value {}".format(key, value.shape))
feature_maps_np.append(value)
if len(feature_maps_np) < 1:
key_dict = {}
for key, value in result_middle.items():
if "FeatureExtractor" in key and "fpn"in key:
key_dict[key] = value.shape[1]
sorted_key_dict = sorted(key_dict.items(), key=lambda x: x[1], reverse=True)
for key, value in sorted_key_dict:
feature_maps_np.append(result_middle[key])
input_shape = preprocessed_inputs.shape
true_image_shapes = np.array([input_shape[1], input_shape[2], input_shape[3]], dtype=np.int32)
true_image_shapes = true_image_shapes.reshape((1, 3))
post_result = post_deal(boxes_encodings_np, classes_predictions_with_background_np, feature_maps_np,
preprocessed_inputs,
true_image_shapes)
show_detection_result(post_result)
return post_result
def show_detection_result(result):
print("PATH_TO_LABELS:", PATH_TO_LABELS)
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
# NUM_CLASSES
NUM_CLASSES = config.cfg.POSTPROCESSOR.NUM_CLASSES
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
result['detection_classes'] = result[
'detection_classes'][0].astype(np.uint8)
result['detection_boxes'] = result['detection_boxes'][0]
result['detection_scores'] = result['detection_scores'][0]
img_dir = config.cfg.PREPROCESS.IMG_LIST
file_list = os.listdir(img_dir)
IMG_PATH = os.path.join(img_dir, file_list[0])
print("IMG_PATH:", IMG_PATH)
image = Image.open(IMG_PATH)
image_np = load_image_into_numpy_array(image)
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
result['detection_boxes'],
result['detection_classes'],
result['detection_scores'],
category_index,
instance_masks=result.get('detection_masks'),
use_normalized_coordinates=True,
line_thickness=8)
# IMAGE_SIZE = (12, 8)
# plt.figure(figsize=IMAGE_SIZE)
misc.imsave('detection_result_ssd.png', image_np)
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def post_deal(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None,
true_image_shapes=None):
"""
SSD model POST processer
:param boxes_encodings:
:param classes_predictions_with_background:
:param feature_maps:
:param preprocessed_inputs:
:param true_image_shapes:
:return:
"""
prediction_dict, anchors = last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps,
preprocessed_inputs)
postprocessed_tensors = postprocess(anchors, prediction_dict, true_image_shapes)
return _add_output_tensor_nodes(postprocessed_tensors)
def _add_output_tensor_nodes(postprocessed_tensors):
print("------------------ _add_output_tensor_nodes ------------------")
detection_fields = fields.DetectionResultFields
label_id_offset = 1
boxes = postprocessed_tensors.get(detection_fields.detection_boxes)
scores = postprocessed_tensors.get(detection_fields.detection_scores)
classes = postprocessed_tensors.get(
detection_fields.detection_classes) + label_id_offset
keypoints = postprocessed_tensors.get(detection_fields.detection_keypoints)
masks = postprocessed_tensors.get(detection_fields.detection_masks)
num_detections = postprocessed_tensors.get(detection_fields.num_detections)
if isinstance(num_detections, list):
num_detections = num_detections[0]
elif isinstance(num_detections, float):
num_detections = int(num_detections)
elif isinstance(num_detections, np.ndarray):
num_detections = int(num_detections[0])
print("=============== num_detections :", num_detections)
outputs = {}
print("scores:", scores)
scores = scores.flatten()
# todo 读取配置文件 置 0 置 1 操作原始代码
if scores.shape[0] < 100:
raw_shape = 100
else:
raw_shape = scores.shape[0]
scores_1 = scores[0:num_detections]
print("scores_1:", scores_1)
scores_2 = np.zeros(shape=raw_shape - num_detections)
scores = np.hstack((scores_1, scores_2))
scores = np.reshape(scores, (1, scores.shape[0]))
outputs[detection_fields.detection_scores] = scores
classes = classes.flatten()
classes_1 = classes[0:num_detections]
print("classes_1:", classes_1)
classes_2 = np.ones(shape=raw_shape - num_detections)
classes = np.hstack((classes_1, classes_2))
classes = np.reshape(classes, (1, classes.shape[0]))
outputs[detection_fields.detection_classes] = classes
boxes_1 = boxes[:, 0:num_detections]
print("boxes_1:", boxes_1)
boxes_2 = np.zeros(shape=(1, raw_shape - num_detections, 4))
boxes = np.hstack((boxes_1, boxes_2))
outputs[detection_fields.detection_boxes] = boxes
outputs[detection_fields.num_detections] = num_detections
if keypoints is not None:
outputs[detection_fields.detection_keypoints] = keypoints
if masks is not None:
outputs[detection_fields.detection_masks] = masks
return outputs
def last_predict_part(boxes_encodings, classes_predictions_with_background, feature_maps, preprocessed_inputs=None):
print("------------------ last_predict_part ------------------")
"""Predicts unpostprocessed tensors from input tensor.
This function takes an input batch of images and runs it through the forward
pass of the network to yield unpostprocessesed predictions.
A side effect of calling the predict method is that self._anchors is
populated with a box_list.BoxList of anchors. These anchors must be
constructed before the postprocess or loss functions can be called.
Args:
boxes_encodings:
classes_predictions_with_background:
feature_maps:
preprocessed_inputs: a [batch, height, width, channels] image tensor.
true_image_shapes: int32 tensor of shape [batch, 3] where each row is
of the form [height, width, channels] indicating the shapes
of true images in the resized images, as resized images can be padded
with zeros.
"""
anchor_generator = anchor_generator_builder.build()
num_predictions_per_location_list = anchor_generator.num_anchors_per_location()
# print("num_predictions_per_location_list:", num_predictions_per_location_list)
prediction_dict = post_processor(boxes_encodings, classes_predictions_with_background,
feature_maps, num_predictions_per_location_list)
image_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_inputs)
feature_map_spatial_dims = get_feature_map_spatial_dims(
feature_maps)
anchors_list = anchor_generator.generate(
feature_map_spatial_dims,
im_height=image_shape[1],
im_width=image_shape[2])
anchors = box_list_ops.concatenate(anchors_list)
box_encodings = np.concatenate(prediction_dict['box_encodings'], axis=1)
if box_encodings.ndim == 4 and box_encodings.shape[2] == 1:
box_encodings = np.squeeze(box_encodings, axis=2)
class_predictions_with_background = np.concatenate(
prediction_dict['class_predictions_with_background'], axis=1)
predictions_dict = {
'preprocessed_inputs': preprocessed_inputs,
'box_encodings': box_encodings,
'class_predictions_with_background':
class_predictions_with_background,
'feature_maps': feature_maps,
'anchors': anchors.get()
}
return predictions_dict, anchors
def get_feature_map_spatial_dims(feature_maps):
"""Return list of spatial dimensions for each feature map in a list.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i].
Returns:
a list of pairs (height, width) for each feature map in feature_maps
"""
feature_map_shapes = [
shape_utils.combined_static_and_dynamic_shape(
feature_map) for feature_map in feature_maps
]
return [(shape[1], shape[2]) for shape in feature_map_shapes]
def post_processor(boxes_encodings, classes_predictions_with_background, image_features,
num_predictions_per_location_list):
print("------------------ post_processor ------------------")
"""Computes encoded object locations and corresponding confidences.
Args:
image_features: A list of float tensors of shape [batch_size, height_i,
width_i, channels_i] containing features for a batch of images.
num_predictions_per_location_list: A list of integers representing the
number of box predictions to be made per spatial location for each
feature map.
Returns:
box_encodings: A list of float tensors of shape
[batch_size, num_anchors_i, q, code_size] representing the location of
the objects, where q is 1 or the number of classes. Each entry in the
list corresponds to a feature map in the input `image_features` list.
class_predictions_with_background: A list of float tensors of shape
[batch_size, num_anchors_i, num_classes + 1] representing the class
predictions for the proposals. Each entry in the list corresponds to a
feature map in the input `image_features` list.
"""
box_encodings_list = []
class_predictions_list = []
for (image_feature,
num_predictions_per_location,
box_encodings,
class_predictions_with_background) in zip(image_features,
num_predictions_per_location_list,
boxes_encodings,
classes_predictions_with_background):
combined_feature_map_shape = image_feature.shape
box_code_size = config.cfg.POSTPROCESSOR.BOX_CODE_SIZE
new_shape = np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
1, box_code_size])
box_encodings = np.reshape(box_encodings, new_shape)
box_encodings_list.append(box_encodings)
num_classes = config.cfg.POSTPROCESSOR.NUM_CLASSES
num_class_slots = num_classes + 1
class_predictions_with_background = np.reshape(
class_predictions_with_background,
np.stack([combined_feature_map_shape[0],
combined_feature_map_shape[1] *
combined_feature_map_shape[2] *
num_predictions_per_location,
num_class_slots]))
class_predictions_list.append(class_predictions_with_background)
return {BOX_ENCODINGS: box_encodings_list,
CLASS_PREDICTIONS_WITH_BACKGROUND: class_predictions_list}
def postprocess(anchors, prediction_dict, true_image_shapes):
print("------------------ postprocess ------------------")
if ('box_encodings' not in prediction_dict or
'class_predictions_with_background' not in prediction_dict):
raise ValueError('prediction_dict does not contain expected entries.')
preprocessed_images = prediction_dict['preprocessed_inputs']
box_encodings = prediction_dict['box_encodings']
box_encodings = box_encodings
class_predictions = prediction_dict['class_predictions_with_background']
detection_boxes, detection_keypoints = _batch_decode(anchors, box_encodings)
detection_boxes = detection_boxes
detection_boxes = np.expand_dims(detection_boxes, axis=2)
non_max_suppression_fn, score_conversion_fn = post_processing_builder.build(model_config.SSD)
detection_scores_with_background = score_conversion_fn(class_predictions)
detection_scores = detection_scores_with_background[0:, 0:, 1:]
additional_fields = None
if detection_keypoints is not None:
additional_fields = {
fields.BoxListFields.keypoints: detection_keypoints}
(nmsed_boxes, nmsed_scores, nmsed_classes, _, nmsed_additional_fields,
num_detections) = non_max_suppression_fn(
detection_boxes,
detection_scores,
clip_window=_compute_clip_window(
preprocessed_images, true_image_shapes),
additional_fields=additional_fields)
detection_dict = {
fields.DetectionResultFields.detection_boxes: nmsed_boxes,
fields.DetectionResultFields.detection_scores: nmsed_scores,
fields.DetectionResultFields.detection_classes: nmsed_classes,
fields.DetectionResultFields.num_detections:
float(num_detections)
}
if (nmsed_additional_fields is not None and
fields.BoxListFields.keypoints in nmsed_additional_fields):
detection_dict[fields.DetectionResultFields.detection_keypoints] = (
nmsed_additional_fields[fields.BoxListFields.keypoints])
return detection_dict
def _compute_clip_window(preprocessed_images, true_image_shapes):
resized_inputs_shape = shape_utils.combined_static_and_dynamic_shape(
preprocessed_images)
true_heights, true_widths, _ = np.split(true_image_shapes, 3, axis=1)
padded_height = float(resized_inputs_shape[1])
padded_width = float(resized_inputs_shape[2])
cliped_image = np.stack(
[np.zeros_like(true_heights), np.zeros_like(true_widths),
true_heights / padded_height, true_widths / padded_width], axis=1)
cliped_imaged = cliped_image.reshape(1, -1)
return cliped_imaged
def _batch_decode(anchors, box_encodings):
"""Decodes a batch of box encodings with respect to the anchors.
Args:
box_encodings: A float32 tensor of shape
[batch_size, num_anchors, box_code_size] containing box encodings.
Returns:
decoded_boxes: A float32 tensor of shape
[batch_size, num_anchors, 4] containing the decoded boxes.
decoded_keypoints: A float32 tensor of shape
[batch_size, num_anchors, num_keypoints, 2] containing the decoded
keypoints if present in the input `box_encodings`, None otherwise.
"""
combined_shape = shape_utils.combined_static_and_dynamic_shape(
box_encodings)
batch_size = combined_shape[0]
tiled_anchor_boxes = np.tile(
np.expand_dims(anchors.get(), 0), [batch_size, 1, 1])
tiled_anchors_boxlist = box_list.BoxList(
np.reshape(tiled_anchor_boxes, [-1, 4]))
box_coder = box_coder_builder.build("faster_rcnn_box_coder")
decoded_boxes = box_coder.decode(
np.reshape(box_encodings, [-1, box_coder.code_size]),
tiled_anchors_boxlist)
decoded_keypoints = None
if decoded_boxes.has_field(fields.BoxListFields.keypoints):
decoded_keypoints = decoded_boxes.get_field(
fields.BoxListFields.keypoints)
num_keypoints = decoded_keypoints.get_shape()[1]
decoded_keypoints = np.reshape(
decoded_keypoints,
np.stack([combined_shape[0], combined_shape[1], num_keypoints, 2]))
decoded_boxes = np.reshape(decoded_boxes.get(), np.stack(
[combined_shape[0], combined_shape[1], 4]))
return decoded_boxes, decoded_keypoints
| 2.015625 | 2 |
simulations/simulations.py | ashkanbashiri/data_driven_intersection_control | 1 | 12799826 | from vissim_utils.sim import generate_phase_times, run_simulation, printProgressBar
import sys
import time
import datetime
import multiprocessing
import win32com.client as com
import os
def main():
global start_time
start_time = time.time()
fair_scheme = 'fair'
relative_scheme = 'relative'
schemes = [fair_scheme,relative_scheme]
n_phase = 4
c_times = list(xrange(50,155,5))#CYCLE TIMES = [50,55,...,150] seconds
sum_flows = list(xrange(35,101,5)) # TOTAL_FLOWS = [35,...,50,51,....,99,100]% of Saturation Flow Rate
sum_flows = [float(x)/100 for x in sum_flows]
lost_times = list(xrange(4,32,4)) # LOST_TIMES = [4,8,12,16,20] seconds
flow_ratios = [ [0.25, .25, .25, .25], [0.1, 0.4, 0.1, 0.4], [0.2, 0.3, 0.2, 0.3], [0.1, 0.1, 0.4, 0.4],
[0.1, .1, .1, .7], [0.2, 0.2, 0.5, 0.1]]
first_time = 1
num_simulations = len(sum_flows)*len(lost_times)*len(c_times)*len(flow_ratios)*2
print "Starting to run %d simulations" %(num_simulations)
sim_number = 0
last_checkpoint = 1692+489+187+432+1436+324+2657+219+2334+555+1499+637+416+62+336+111+\
99+131+1457+508+183+603+583+1479+503+1890+407+90+1329
global Vissim
close_vis = False
restart_vissim = False
for sum_flow in sum_flows:
for lost_time in lost_times:
for ct in c_times:
for i in xrange(len(flow_ratios)):
ratios = [x * sum_flow for x in flow_ratios[i][:]]
for scheme_number in xrange(2):
_progress(sim_number, num_simulations)
phase_times = generate_phase_times(ct,sum_flow,flow_ratios[i][:],lost_time,n_phase,scheme=schemes[scheme_number])
if sim_number>last_checkpoint:
if close_vis is True:
restart_vissim = True
else:
restart_vissim = False
if sim_number%100 == 0:
close_vis = True
else:
close_vis = False
run_simulation(ct,phase_times,lost_time,ratios,first_time, 'results_sep08_30.csv',
close_vissim=close_vis, reset_vissim=restart_vissim)
first_time = 0
sim_number +=1
sys.stdout.write('\rAll simulations Completed Sucessfully!')
def _progress(count, total_size):
global start_time
elapsed_time = int(time.time() - start_time)
sys.stdout.write('\r%.2f%% Completed, ' % (float(count) / float(total_size) * 100.0) +
'\tElapsed Time: {}'.format(str(datetime.timedelta(seconds=elapsed_time))))
sys.stdout.flush()
if __name__ == "__main__":
main()
'''
while not done:
p = multiprocessing.Process(target=run_simulation, args=(ct,phase_times,
lost_time,ratios,first_time,
'results_sep08_2.csv'))
p.start()
p.join(60)
if p.is_alive():
print "Vissim is Not Responding..."
print "Terminating run #{}".format(sim_number)
p.terminate()
continue
#p.join()
else:
first_time=0
done = True
''' | 2.390625 | 2 |
lib/data/graph_dataset/structural_dataset.py | shamim-hussain/egt_pytorch | 10 | 12799827 | <gh_stars>1-10
import numpy as np
import numba as nb
from .graph_dataset import GraphDataset
NODE_FEATURES_OFFSET = 128
EDGE_FEATURES_OFFSET = 8
@nb.njit
def floyd_warshall(A):
n = A.shape[0]
D = np.zeros((n,n), dtype=np.int16)
for i in range(n):
for j in range(n):
if i == j:
pass
elif A[i,j] == 0:
D[i,j] = 510
else:
D[i,j] = 1
for k in range(n):
for i in range(n):
for j in range(n):
old_dist = D[i,j]
new_dist = D[i,k] + D[k,j]
if new_dist < old_dist:
D[i,j] = new_dist
return D
@nb.njit
def preprocess_data(num_nodes, edges, node_feats, edge_feats):
node_feats = node_feats + np.arange(1,node_feats.shape[-1]*NODE_FEATURES_OFFSET+1,
NODE_FEATURES_OFFSET,dtype=np.int16)
edge_feats = edge_feats + np.arange(1,edge_feats.shape[-1]*EDGE_FEATURES_OFFSET+1,
EDGE_FEATURES_OFFSET,dtype=np.int16)
A = np.zeros((num_nodes,num_nodes),dtype=np.int16)
E = np.zeros((num_nodes,num_nodes,edge_feats.shape[-1]),dtype=np.int16)
for k in range(edges.shape[0]):
i,j = edges[k,0], edges[k,1]
A[i,j] = 1
E[i,j] = edge_feats[k]
D = floyd_warshall(A)
return node_feats, D, E
class StructuralDataset(GraphDataset):
def __init__(self,
distance_matrix_key = 'distance_matrix',
feature_matrix_key = 'feature_matrix',
**kwargs):
super().__init__(**kwargs)
self.distance_matrix_key = distance_matrix_key
self.feature_matrix_key = feature_matrix_key
def __getitem__(self, index):
item = super().__getitem__(index)
num_nodes = int(item[self.num_nodes_key])
edges = item.pop(self.edges_key)
node_feats = item.pop(self.node_features_key)
edge_feats = item.pop(self.edge_features_key)
node_feats, dist_mat, edge_feats_mat = preprocess_data(num_nodes, edges, node_feats, edge_feats)
item[self.node_features_key] = node_feats
item[self.distance_matrix_key] = dist_mat
item[self.feature_matrix_key] = edge_feats_mat
return item
| 2.125 | 2 |
dev/tools/docs/run_doctests.py | awillats/brian2 | 674 | 12799828 | <gh_stars>100-1000
import os
import sphinx
os.chdir('../../../docs_sphinx')
sphinx.main(['sphinx-build', '-b', 'doctest', '.', '../docs', '-D',
'exclude_patterns=reference'])
| 1.320313 | 1 |
mass_flask_core/tests/__init__.py | mass-project/mass_server | 8 | 12799829 | from .flask_test_case import FlaskTestCase
__all__ = [
'FlaskTestCase'
]
| 1.117188 | 1 |
biolearns/metrics/discriminant.py | huangzhii/biolearns | 7 | 12799830 | # Copyright 2020 <NAME>. All rights reserved
# Created on Tue Feb 11 12:29:35 2020
# Author: <NAME>, Purdue University
#
#
# The original code came with the following disclaimer:
#
# This software is provided "as-is". There are no expressed or implied
# warranties of any kind, including, but not limited to, the warranties
# of merchantability and fitness for a given application. In no event
# shall Zhi Huang be liable for any direct, indirect, incidental,
# special, exemplary or consequential damages (including, but not limited
# to, loss of use, data or profits, or business interruption) however
# caused and on any theory of liability, whether in contract, strict
# liability or tort (including negligence or otherwise) arising in any way
# out of the use of this software, even if advised of the possibility of
# such damage.
#
import numpy as np
def fisher_discriminant(H, label):
'''
Parameters
----------
H : Real-valued matrix with columns indicating samples.
label : Class indices.
Returns
-------
E_D : Real scalar value indicating fisher discriminant.
Notes
-----
This fisher discriminant is the equation (3 a,b) in
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3495075
label is further sorted in ascending order, then apply its order to label and H.
Otherwise denominator will be wrong.
References
----------
.. [1] <NAME>, <NAME>, Amari SI. A new discriminant NMF algorithm and its
application to the extraction of subtle emotional differences in speech.
Cognitive neurodynamics. 2012 Dec 1;6(6):525-35.
'''
order = np.argsort(label)
H = H[:,order]
label = label[order]
numerator, denominator = 0, 0
mu_rkn = np.zeros((H.shape[0], 0))
mu_r_all = 1/H.shape[1] * np.sum(H, axis = 1)
for k in np.unique(label):
N_k = np.sum(k == label)
mu_rk_block = np.zeros((0, N_k))
for r in range(H.shape[0]):
mu_r = mu_r_all[r]
mu_rk = 1/N_k * np.sum(H[r, k == label])
mu_rk_block = np.concatenate((mu_rk_block, np.array([mu_rk] * N_k).reshape(1,N_k)), axis = 0)
numerator += N_k * (mu_rk - mu_r) ** 2
mu_rkn = np.concatenate((mu_rkn, mu_rk_block), axis = 1)
denominator = np.sum((H - mu_rkn)**2)
E_D = numerator / denominator
return E_D | 2.140625 | 2 |
tests/trinity/core/p2p-proto/test_server.py | jin10086/py-evm | 0 | 12799831 | <filename>tests/trinity/core/p2p-proto/test_server.py
import asyncio
import pytest
import socket
from eth_keys import keys
from cancel_token import CancelToken
from eth.chains.ropsten import RopstenChain, ROPSTEN_GENESIS_HEADER
from eth.db.chain import ChainDB
from eth.db.backends.memory import MemoryDB
from p2p.auth import HandshakeInitiator, _handshake
from p2p.peer import (
PeerPool,
)
from p2p.kademlia import (
Node,
Address,
)
from trinity.protocol.eth.peer import ETHPeer
from trinity.server import Server
from tests.p2p.auth_constants import eip8_values
from tests.trinity.core.dumb_peer import DumbPeer
from tests.trinity.core.integration_test_helpers import FakeAsyncHeaderDB
def get_open_port():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
port = get_open_port()
NETWORK_ID = 99
SERVER_ADDRESS = Address('127.0.0.1', udp_port=port, tcp_port=port)
RECEIVER_PRIVKEY = keys.PrivateKey(eip8_values['receiver_private_key'])
RECEIVER_PUBKEY = RECEIVER_PRIVKEY.public_key
RECEIVER_REMOTE = Node(RECEIVER_PUBKEY, SERVER_ADDRESS)
INITIATOR_PRIVKEY = keys.PrivateKey(eip8_values['initiator_private_key'])
INITIATOR_PUBKEY = INITIATOR_PRIVKEY.public_key
INITIATOR_ADDRESS = Address('127.0.0.1', get_open_port() + 1)
INITIATOR_REMOTE = Node(INITIATOR_PUBKEY, INITIATOR_ADDRESS)
class MockPeerPool:
is_full = False
connected_nodes = {}
def __init__(self):
self._new_peers = asyncio.Queue()
async def start_peer(self, peer):
self.connected_nodes[peer.remote] = peer
self._new_peers.put_nowait(peer)
def is_valid_connection_candidate(self, node):
return True
def __len__(self):
return len(self.connected_nodes)
async def next_peer(self):
return await self._new_peers.get()
def get_server(privkey, address, peer_class):
base_db = MemoryDB()
headerdb = FakeAsyncHeaderDB(base_db)
chaindb = ChainDB(base_db)
chaindb.persist_header(ROPSTEN_GENESIS_HEADER)
chain = RopstenChain(base_db)
server = Server(
privkey,
address.tcp_port,
chain,
chaindb,
headerdb,
base_db,
network_id=NETWORK_ID,
peer_class=peer_class,
)
return server
@pytest.fixture
async def server():
server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, ETHPeer)
await asyncio.wait_for(server._start_tcp_listener(), timeout=1)
yield server
server.cancel_token.trigger()
await asyncio.wait_for(server._close_tcp_listener(), timeout=1)
@pytest.fixture
async def receiver_server_with_dumb_peer():
server = get_server(RECEIVER_PRIVKEY, SERVER_ADDRESS, DumbPeer)
await asyncio.wait_for(server._start_tcp_listener(), timeout=1)
yield server
server.cancel_token.trigger()
await asyncio.wait_for(server._close_tcp_listener(), timeout=1)
@pytest.mark.asyncio
async def test_server_incoming_connection(monkeypatch, server, event_loop):
# We need this to ensure the server can check if the peer pool is full for
# incoming connections.
monkeypatch.setattr(server, 'peer_pool', MockPeerPool())
use_eip8 = False
token = CancelToken("<PASSWORD>")
initiator = HandshakeInitiator(RECEIVER_REMOTE, INITIATOR_PRIVKEY, use_eip8, token)
reader, writer = await initiator.connect()
# Send auth init message to the server, then read and decode auth ack
aes_secret, mac_secret, egress_mac, ingress_mac = await _handshake(
initiator, reader, writer, token)
initiator_peer = ETHPeer(
remote=initiator.remote, privkey=initiator.privkey, reader=reader,
writer=writer, aes_secret=aes_secret, mac_secret=mac_secret,
egress_mac=egress_mac, ingress_mac=ingress_mac, headerdb=server.headerdb,
network_id=NETWORK_ID)
# Perform p2p/sub-proto handshake, completing the full handshake and causing a new peer to be
# added to the server's pool.
await initiator_peer.do_p2p_handshake()
await initiator_peer.do_sub_proto_handshake()
# wait for peer to be processed
await asyncio.wait_for(server.peer_pool.next_peer(), timeout=1)
assert len(server.peer_pool.connected_nodes) == 1
receiver_peer = list(server.peer_pool.connected_nodes.values())[0]
assert isinstance(receiver_peer, ETHPeer)
assert initiator_peer.sub_proto is not None
assert initiator_peer.sub_proto.name == receiver_peer.sub_proto.name
assert initiator_peer.sub_proto.version == receiver_peer.sub_proto.version
assert receiver_peer.privkey == RECEIVER_PRIVKEY
@pytest.mark.asyncio
async def test_peer_pool_connect(monkeypatch, event_loop, receiver_server_with_dumb_peer):
started_peers = []
async def mock_start_peer(peer):
nonlocal started_peers
started_peers.append(peer)
monkeypatch.setattr(receiver_server_with_dumb_peer, '_start_peer', mock_start_peer)
# We need this to ensure the server can check if the peer pool is full for
# incoming connections.
monkeypatch.setattr(receiver_server_with_dumb_peer, 'peer_pool', MockPeerPool())
pool = PeerPool(DumbPeer, FakeAsyncHeaderDB(MemoryDB()), NETWORK_ID, INITIATOR_PRIVKEY, tuple())
nodes = [RECEIVER_REMOTE]
await pool.connect_to_nodes(nodes)
# Give the receiver_server a chance to ack the handshake.
await asyncio.sleep(0.1)
assert len(started_peers) == 1
assert len(pool.connected_nodes) == 1
# Stop our peer to make sure its pending asyncio tasks are cancelled.
await list(pool.connected_nodes.values())[0].cancel()
| 1.84375 | 2 |
src/inpainting.py | DehuiYan/tumorDetection | 0 | 12799832 | <gh_stars>0
#!/usr/bin/env python
# coding=utf-8
'''
将生成patch聚合为癌症区域并嵌入到normal切块中
'''
import os
import cv2
import random
import tools
import numpy as np
import makevocxml
inputGenedir = '../../mydata/dcgan/dcgan_micro_512/'
inputNordir = '../../mydata/dcgan/normal_part/'
outputdir = '../../mydata/dcgan/virtual_dataset/'
outputbboxdir = '../../mydata/dcgan/virtual_dataset_bbox/'
outputxmldir = '../../mydata/dcgan/virtual_dataset_xml/'
outputmaskdir = '../../mydata/dcgan/virtual_dataset_mask/'
tools.mkdir(outputdir)
tools.mkdir(outputbboxdir)
tools.mkdir(outputxmldir)
tools.mkdir(outputmaskdir)
def make_region(gene_list, w_num, h_num):
vstack = []
for i in range(h_num):
hstack = []
for j in range(w_num):
img = cv2.imread(inputGenedir+gene_list[i*w_num+j])
hstack.append(img)
image = np.concatenate(hstack, axis=1)
vstack.append(image)
img_region = np.concatenate(vstack)
return img_region
def inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number):
x = random.randint(0, nor_size-gene_size*2)
y = random.randint(0, nor_size-gene_size*2)
w_num_max = (nor_size-x)/gene_size
w_num = random.randint(1, w_num_max)
w = w_num * gene_size
h_num_max = (nor_size-y)/gene_size
h_num = random.randint(1, h_num_max)
h = h_num * gene_size
gene_list = random.sample(gene_all_list, w_num*h_num)
nor_list = random.sample(nor_all_list, 1)
img_region = make_region(gene_list, w_num, h_num)
img_part = cv2.imread(inputNordir+nor_list[0])
img_part[y:y+h, x:x+w] = img_region
number += 1
cv2.imwrite(outputdir+str(number)+'.jpg', img_part)
img_part_bbox = cv2.rectangle(img_part, (x,y), (x+w,y+h), (0,255,0), 8)
cv2.imwrite(outputbboxdir+str(number)+'.jpg', img_part_bbox)
bbox_label = []
bbox_label.append([x,y,x+w,y+h,w*h,1, 'Tumor'])
makevocxml.makexml(outputxmldir, number, img_part.shape, bbox_label)
mask = np.zeros((nor_size,nor_size,1), np.uint8)
mask_region = np.zeros((h,w,1), np.uint8)
mask_region[:] = 255
mask[y:y+h, x:x+w] = mask_region
cv2.imwrite(outputmaskdir+str(number)+'.jpg', mask)
return number
if __name__ == "__main__":
gene_size = 64
nor_size = 512
number = 0
total = 20
gene_all_list = []
nor_all_list = []
for parents, dirnames, filenames in os.walk(inputGenedir):
for f in filenames:
gene_all_list.append(f)
for parents, dirnames, filenames in os.walk(inputNordir):
for f in filenames:
nor_all_list.append(f)
for i in range(total):
number = inpainting(gene_all_list, nor_all_list, gene_size, nor_size, number)
| 2.0625 | 2 |
test/test_violence.py | yanzhicong/VAE-GAN | 33 | 12799833 | import os
import sys
sys.path.append('.')
sys.path.append('../')
import numpy as np
import cv2
import tensorflow as tf
import matplotlib.pyplot as plt
from dataset.violence import Violence
from dataset.tianchi_guangdong_defect import TianChiGuangdongDefect
if __name__ == '__main__':
config = {
"output shape" : [224, 224, 3],
"mil" : False,
"use cache" : True,
"one hot" : True,
"show warning" : True
}
dataset = TianChiGuangdongDefect(config)
indices = dataset.get_image_indices('trainval')
print(len(indices))
img_list = []
for ind in indices:
img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
dataset.time1 = 0.0
dataset.count = 0
print("")
print("")
print("round 2")
print("")
print("")
for ind in indices:
img, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
# if img is not None:
# plt.figure(0)
# plt.clf()
# plt.imshow(img)
# plt.pause(1)
config = {
"output shape" : [224, 224, 3],
}
dataset = TianChiGuangdongDefect(config)
indices = dataset.get_image_indices('trainval')
# for ind in indices:
# img_bag, label = dataset.read_image_by_index(ind, phase='trainval', method='supervised')
# print(label)
# if img_bag is not None:
# plt.figure(0)
# plt.clf()
# row = 4
# col = int(len(img_bag) / row)
# print(len(img_bag), row, col)
# for i in range(row):
# for j in range(col):
# plt.subplot(row, col, i * col+j+1)
# plt.imshow(img_bag[i*col+j])
# plt.pause(3)
| 2.578125 | 3 |
rayonix_detector.py | bopopescu/Lauecollect | 0 | 12799834 | <gh_stars>0
from __future__ import with_statement
"""
Remote control of the MAR CCD detector, using <NAME>'s sample remote
control server program "marccd_server_socket" with TCP port number 2222.
Usage example: ccd = rayonix_detector("marccd043.cars.aps.anl.gov:2222")
The server is started from the MarCCD software from the Remote Control
control panel with the second parameter ("Server command" or "Device Database
Server") set to "/home/marccdsource/servers/marccd_server_socket", and the third parameter
("Server Arguments" or "Personal Name") set to "2222".
Or, alternatively from the command line by the commaand "hsserver_lagacy".
The server understand the following commands:
start - Puts the CCD to integration mode, no reply
readout,0,filename - Reads out the detector, corrects the image and saves it to a file
no reply
readout,1 - reads a new background image, no reply
get_state - reply is integer number containing 6 4-bit fields
bits 0-3: state: 0=idle,8=busy
bits 4-7: acquire
bits 8-11: read
bits 12-15: correct
bits 16-19: write
bits 20-23: dezinger
Each filed contains a 4-bit code, with the following meaning:
0=idle, 1=queued, 2=executing, 4=error
The exception is the 'state' field, which has only 0=idle and 8=busy.
writefile,<filename>,1 - Save the last read image, no reply
set_bin,8,8 - Use 512x512-pixel bin mode, no reply
set_bin,2,2 - Use full readout mode (2048x2048 pixels), no reply
(The 1x1 bin mode with 4096x4096 pixels is not used, because the point-spread
function of the fiber optic taper is large compared to the pixel size)
get_bin - reply is two integer numbers, e.g. "2,2"
get_size_bkg - reply is the number of pixels of current the background image, e.g. "2048,2048"
Reference: Rayonix HS detector manual 0.3e
Chapter 9: The Legacy Remote Mode for HS Detector Control
Author: <NAME>
Date created: 2013-09-20
Date last modified: 2018-06-101
"""
__version__ = "4.0.1" # default name "rayonix_detector" may be overridden in subclass
from logging import debug,info,warn,error
import socket
from time import sleep,time
from thread import allocate_lock
class Rayonix_Detector(object):
"""This is to remote control the MAR CCD detector
Using remote protocol version 1"""
name = "rayonix_detector"
from persistent_property import persistent_property
ip_address = persistent_property("ip_address","mx340hs.cars.aps.anl.gov:2222")
ignore_first_trigger = persistent_property("ignore_first_trigger",True)
def __init__(self,name=None):
"""name: used for IP address, in case there is more than one detector"""
if name is not None: self.name = name
self.timeout = 1.0
# This is to make the query method multi-thread safe.
self.lock = allocate_lock()
# If this flag is set 'start' automatically reads a background image
# if there is not valid backgournd image.
self.auto_bkg = True
# Whether to save corrected or raw images.
self.save_raw = False
# For triggred image acquiation
# 0: the rising edge of the trigger initiates frame transfer/readout
# 1: rising edge starts acquisition,
# falling edge initiates frame transfer/readout
self.bulb_mode = 0
# Keep track of when the detector was last read.
self.last_read = 0.0
# Verbose logging: record every command and reply in /tmp/rayonix_detector.log
self.verbose_logging = True
@property
def connected(self):
from tcp_client import connected
return connected(self.ip_address)
online = connected
def write(self,command):
"""Sends a comman that does not generate a reply"""
from tcp_client import write
write(self.ip_address,command)
def query(self,command):
"""Send a command that generates a reply.
Return the reply"""
self.log("query %r" % command)
from tcp_client import query
return query(self.ip_address,command)
def state_code(self):
"""Status information as integer"""
reply = self.query("get_state").strip("\n\0")
if reply == "": return 0
try: status = int(eval(reply))
except Exception,message:
self.log_error("command 'get_state' generated bad reply %r: %s" % (reply,message))
return 0
# bit 8 and 9 of the state code tell whether the task status of "read"
# is either "queued" or "executing"
if (status & 0x00000300) != 0: self.last_read = time()
return status
def is_idle (self):
try: status = self.state_code()
except: return True
# bit mask 0x00444440 masks out error flags
if (status & ~0x0444444F) == 0: return True
else: return False
def is_integrating (self):
"tells whether the chip is integrating mode (not reading, not clearing)"
# "acquire" field is "executing"
if not self.connected: return True
return ((self.state_code() & 0x00000020) != 0)
def is_reading (self):
"tells whether the chip is currently being read out"
# bit 8 and 9 of the state code tell whether the task status of "read"
# is either "queued" or "executing"
return ((self.state_code() & 0x00000300) != 0)
def is_correcting (self):
"tells whether the chip is currently being read out"
# bit 8 and 9 of the state code tell whether the task status of "correct"
# is either "queued" or "executing"
return ((self.state_code() & 0x00003000) != 0)
def state(self):
"""Status information as string: idle,integating,reading,writing"""
try: status = self.state_code()
except: return ""
# bit mask 0x00444440 masks out error flags
if (status & ~0x0444444F) == 0: return "idle"
t = []
if (status & 0x0000000F) == 6: t+= ["unavailable"]
if (status & 0x0000000F) == 7: t+= ["error"]
if (status & 0x0000000F) == 8: t+= ["busy"]
if (status & 0x00000010) != 0: t+= ["integrate queued"]
if (status & 0x00000020) != 0: t+= ["integrating"]
if (status & 0x00000040) != 0: t+= ["integrate error"]
if (status & 0x00000100) != 0: t+= ["read queued"]
if (status & 0x00000200) != 0: t+= ["reading"]
if (status & 0x00000400) != 0: t+= ["read error"]
if (status & 0x00001000) != 0: t+= ["correct queued"]
if (status & 0x00002000) != 0: t+= ["correcting"]
if (status & 0x00004000) != 0: t+= ["correct error"]
if (status & 0x00010000) != 0: t+= ["write queued"]
if (status & 0x00020000) != 0: t+= ["writing"]
if (status & 0x00040000) != 0: t+= ["write error"]
if (status & 0x00100000) != 0: t+= ["dezinger queued"]
if (status & 0x00200000) != 0: t+= ["dezingering"]
if (status & 0x00400000) != 0: t+= ["dezinger error"]
if (status & 0x01000000) != 0: t+= ["series queued"]
if (status & 0x02000000) != 0: t+= ["acquiring series"]
if (status & 0x04000000) != 0: t+= ["series error"]
state = ",".join(t)
return state
def start(self,wait=True):
"""Puts the detector into integration mode by stopping the continuous
clearing.
In case the CCD readout is in progess, execution is delayed until the
last readout is finished.
This also acquires a background image, in case there is no valid background
image (after startup or binning changed).
wait: The is a 0.2 s delay until te detectror enters "integrating" state,
(maybe for the clearing to stop?)
When wait=False, do no wait for this to happen.
"""
##t0 = time()
# Wait for the readout of the previous image to finish.
while self.is_reading():
sleep(0.05)
# Work-around for a bug where the detector remaingns in "reading" state
# forever. <NAME> 27 Mar 2014
##if time()-t0 > 2.0: self.abort()
# Make sure there is a valid background image. Otherwise, the image
# correction will fail.
if self.auto_bkg: self.update_bkg()
self.write("start")
if not wait: return
while not self.is_integrating() and self.connected: sleep (0.05)
def abort(self):
"""Cancel series acquiation mode"""
self.write("abort")
def readout(self,filename=None):
"""Reads the detector.
If a filename is given, the image is saved as a file.
The image file is written in background as a pipelined operation.
The function returns immediately.
The pathname of the file is interpreted in file system of the server,
not locally.
If 'save_raw' is true (default: false), the image raw data is saved
rather than the correct image.
"""
if filename != None: self.make_directory(filename)
if not self.save_raw:
if filename != None:
self.write("readout,0,"+remote(filename))
else: self.write("readout,0")
else:
if filename != None:
self.write("readout,3,"+remote(filename))
else: self.write("readout,3")
##while not self.is_reading(): sleep(0.05)
self.last_read = time()
def readout_and_save_raw(self,filename):
"""Reads the detector and saves the uncorrected image as a file.
The image file is written in background as a pipelined operation.
The function returns immediately.
The pathname of the file is interpreted in file system of the server,
not locally.
"""
self.make_directory(filename)
self.write("readout,3,"+remote(filename))
self.last_read = time()
def readout_raw(self):
"Reads the detector out without correcting and displaying the image."
self.write("readout,3")
self.last_read = time()
def save_image(self,filename):
"""Saves the last read image to a file.
The pathname of the file is interpreted in file system of the server,
not locally.
"""
self.make_directory(filename)
self.write("writefile,"+remote(filename)+",1")
def save_raw_image(self,filename):
"""Saves the last read image without spatial and uniformity correction
to a file.
The pathname of the file is interpreted in file system of the server,
not locally.
"""
self.make_directory(filename)
self.write("writefile,"+remote(filename)+",0")
def acquire_images_triggered(self,filenames):
"""Acquire a series of images timed by an external hardware
trigger signal.
filenames: list of absolute pathnames. Directory part must be
valid pathname on file system of the Rayonix computer"""
# The detector will ignore an "acquire_images_triggered" command if not
# in "idle" state.
if not self.state() == "idle": self.abort()
while self.state() != "idle": sleep(0.05)
# The "start_series_triggered" command does not allow a list of filenames
# to be specified, but uses auto-generated filenames instead.
# As a work-araound generated a series of symbilic link complying to the
# naming scheme imposed by the 'start_series_triggered' command that
# point ot the real filenames. When the rayonix softawre tries to save
# an image the symblix link redirects is to create an image with
# the specified name.
from os.path import dirname,relpath,islink,exists
from os import symlink,remove
from shutil import rmtree
directory = common_topdir(filenames)
tempdir = directory+"/.rayonix_temp"
try: rmtree(tempdir)
except: pass
makedirs(tempdir)
for i in range(0,len(filenames)):
link = tempdir+"/%06d.rx" % (i+1)
if islink(link) or exists(link): remove(link)
try: pathname = relpath(filenames[i],tempdir)
except Exception,msg:
error("Relative path of %r with respect to %r: %s" %
(filenames[i],tempdir,msg))
pathname = filenames[i]
try: symlink(pathname,link)
except Exception,msg:
error("Cannot create of %r to %r: %s" % (pathname,link,msg))
if not exists(dirname(filenames[i])): makedirs(dirname(filenames[i]))
self.start_series_triggered(len(filenames),tempdir+"/",".rx",6)
# Save location of image files for other applications
from DB import dbput
dbput("rayonix_detector_images.filenames",repr(filenames))
def start_series_triggered(self,n_frames,filename_base,
filename_suffix=".rx",number_field_width=6):
"""Acquire a series of images timed by an exteranal hardware
trigger signal
filename_base: Directory part must be valid pathname on file system of
the Rayonix computer
filename_suffix: including the dot (.)
number_field_width: number of digits for the filename sequence number,
e.g. 6 for 'test000001.rx'"""
# Make sure the directory to write the image to exists.
from os.path import dirname
directory = dirname(filename_base)
makedirs(directory)
filename_base = remote(filename_base)
# If already in sequence aquistion mode, cancel it.
if not self.state() == "idle": self.abort()
while self.state() != "idle": sleep(0.05)
# Need a valid background image before starting acquisition.
if self.auto_bkg: self.update_bkg()
if self.bulb_mode == 0 and not self.ignore_first_trigger:
# The detector software does not save to first image, which is a bad
# image, when using triggered frame transfer mode. However, (as of
# Jul 2014, version 0.3.10), the detector still requires 11 trigger pulses
# to aquire 10 images.
# Workaround: Software-trigger the detector once after starting a series.
self.trigger_signal_type = "Software"
# start_series,n_frames,first_frame_number=1,integration_time=0,
# interval_time=0,frame_trigger_type,series_trigger_type=0,
# filename_base,filename_suffix,number_field_width
# 0 = not triggered, 1= triggered frame transfer, 2 = bulb mode, 3 = LCLS mode
frame_trigger_type = 2 if self.bulb_mode else 1
self.write("start_series,%d,1,0,0,%d,0,%s,%s,%d" %
(n_frames,frame_trigger_type,filename_base,filename_suffix,number_field_width))
while self.state() != "acquiring series": sleep(0.05)
if self.bulb_mode == 0 and not self.ignore_first_trigger:
self.trigger()
# Wait for the first (suppressed) image readout to complete.
sleep(self.readout_time)
self.trigger_signal_type = "Opto"
def trigger(self):
"""Software-trigger the detector"""
self.write("trigger,0.001")
while "busy" in self.state(): sleep(0.05)
def get_trigger_signal_type(self):
"""'Opto','Opto Inverted','CMOS Pulldown','CMOS Pullup',
'CMOS Pulldown Inerted','CMOS Pullup Inerted''Software'"""
return self.query("get_trigger_signal_type")
def set_trigger_signal_type(self,value):
self.write("set_trigger_signal_type,%s" % value)
while "busy" in self.state(): sleep(0.05)
trigger_signal_type = property(get_trigger_signal_type,set_trigger_signal_type)
def get_bin_factor(self):
try: return int(self.query("get_bin").split(",")[0])
except: return
def set_bin_factor(self,n):
if self.bin_factor == n: return
if not self.state() == "idle": self.abort()
while self.state() != "idle": sleep(0.05)
self.write("set_bin,"+str(n)+","+str(n))
# After a bin factor change it takes about 2 s before the new
# bin factor is read back.
t = time()
while self.get_bin_factor() != n and time()-t < 3: sleep (0.1)
bin_factor = property(get_bin_factor,set_bin_factor,
doc="Readout X and Y bin factor")
def read_bkg(self):
"""Reads a fresh the backgound image, which is substracted from every image after
readout before the correction is applied.
"""
if not self.is_idle(): self.abort()
while not self.is_idle(): sleep(0.05)
self.write("readout,1") # read the CCD and stores the result as background
while not self.is_idle(): sleep(0.05)
self.last_read = time()
def image_size(self):
"""Width and height of the image in pixels at the current bin mode"""
try: return int(self.query("get_size").split(",")[0])
except: return 0
def filesize(self,bin_factor):
"""Image file size in bytes including headers
bin_facor: 2,4,8,16"""
image_size = 7680/bin_factor # MS340HS
headersize = 4096
image_nbytes = 2*image_size**2
filesize = headersize+image_nbytes
return filesize
def bkg_image_size(self): # does not work with protocol v1 (timeout)
"""Width and height of the current background image in pixels.
This value is important to know if the bin factor is changed.
If the backgroud image does not have the the same number of pixels
as the last read image the correction as saving to file will fail.
At startup, the background image is empty and this value is 0.
"""
try: return int(self.query("get_size_bkg").split(",")[0])
except: return 0
def update_bkg(self):
"""Updates the backgound image if needed, for instance after the server has
been restarted or after the bin factor has been changed.
"""
if not self.bkg_valid(): self.read_bkg()
def bkg_valid(self):
"""Does detector software have a the backgound image for the current
bin mode, which is substracted from every image after readout before
the correction is applied."""
return self.bkg_image_size() == self.image_size()
# By default verbose logging is enabled. Change when problem solved.
logging = False
@property
def readout_time(self):
"""Estimated readout time in seconds. Changes with 'bin_factor'."""
safetyFactor = 1
from numpy import nan
# Readout rate in frames per second as function of bin factor:
readout_rate = {1: 2, 2: 10, 3: 15, 4: 25, 5: 40, 6: 60, 8: 75, 10: 120}
bin_factor = self.bin_factor
if bin_factor in readout_rate: read_time = 1.0/readout_rate[bin_factor]
else: read_time = nan
return read_time*safetyFactor
def make_directory(self,filename):
"""Make sure that the directory of teh given filename exists by create it,
if necessary."""
if filename is None or filename == "": return
from os.path import dirname
directory = dirname(filename)
if directory == "": return
makedirs(directory)
def log_error(self,message):
"""For error messages.
Display the message and append it to the error log file.
If verbose logging is enabled, it is also added to the transcript."""
from sys import stderr
if len(message) == 0 or message[-1] != "\n": message += "\n"
t = timestamp()
stderr.write("%s: %s: %s" % (t,self.ip_address,message))
file(self.error_logfile,"a").write("%s: %s" % (t,message))
self.log(message)
def log(self,message):
"""For non-critical messages.
Append the message to the transcript, if verbose logging is enabled."""
if not self.verbose_logging: return
if len(message) == 0 or message[-1] != "\n": message += "\n"
t = timestamp()
file(self.logfile,"a").write("%s: %s" % (t,message))
def get_error_logfile(self):
"""File name error messages."""
from tempfile import gettempdir
return gettempdir()+"/rayonix_detector_error.log"
error_logfile = property(get_error_logfile)
def get_logfile(self):
"""File name for transcript if verbose logging is enabled."""
from tempfile import gettempdir
return gettempdir()+"/rayonix_detector.log"
logfile = property(get_logfile)
def timestamp():
"""Current date and time as formatted ASCCI text, precise to 1 ms"""
from datetime import datetime
timestamp = str(datetime.now())
return timestamp[:-3] # omit microsconds
def remote(pathname):
"""This converts the pathname of a file on a network file server from
the local format to the format used on the MAR CCD compter.
e.g. "//id14bxf/data" in Windows maps to "/net/id14bxf/data" on Unix"""
if not pathname: return pathname
end = "/" if pathname.endswith("/") else ""
# Try to expand a Windows drive letter to a UNC name.
try:
import win32wnet
# Convert "J:/anfinrud_0811/Data" to "J:\anfinrud_0811\Data".
pathname = pathname.replace("/","\\")
pathname = win32wnet.WNetGetUniversalName(pathname)
except: pass
# Convert separators from DOS style to UNIX style.
pathname = pathname.replace("\\","/")
if pathname.find("//") == 0: # //server/share/directory/file
parts = pathname.split("/")
if len(parts) >= 4:
server = parts[2] ; share = parts[3]
path = ""
for part in parts[4:]: path += part+"/"
path = path.rstrip("/")
pathname = "/net/"+server+"/"+share+"/"+path
if not pathname.endswith(end): pathname += end
return pathname
def makedirs(pathname):
"""Create a directory, or make sure that the directory is world-writable"""
# This is a workaround for promblem caused by the Rayonix software running
# under a different user id on the Rayonix control computer, compared
# to the beamline control computer, so directories created via NFS on the
# control machine might not be writable on the Rayonix computer.
# E.g. user id 10660(xppopr) on "xpp-daq", versus user id 500(hsuser)
# on "con-ics-xpp-rayonix"
from os import makedirs,umask,chmod
from os.path import exists
from sys import stderr
if exists(pathname) and not iswritable(pathname):
try: chmod(pathname,0777)
except Exception,details: stderr.write("chmod: %r: %r" % (pathname,details))
if not exists(pathname):
umask(0000)
try: makedirs(pathname)
except Exception,details: stderr.write("makedirs: %r: %r" % (pathname,details))
def iswritable(pathname):
"""Is file or folder writable?"""
from os import access,W_OK
return access(pathname,W_OK)
def common_topdir(filenames):
"""filenames: list of strings"""
from os.path import dirname
if len(filenames) == 0: return []
if len(filenames) == 1: return dirname(filenames[0])
for level in range(1,4):
dirnames = []
for pathname in filenames:
for i in range(0,level): pathname = dirname(pathname)
dirnames += [pathname]
if all([n == dirnames[0] for n in dirnames]): break
pathname = filenames[0]
for i in range(0,level): pathname = dirname(pathname)
return pathname
rayonix_detector = Rayonix_Detector()
if __name__ == "__main__": # for testing
from pdb import pm
import logging
logging.basicConfig(level=logging.DEBUG,format="%(asctime)s: %(message)s")
self = rayonix_detector # for debugging
filenames = ["/tmp/test_%03d.mccd" % (i+1) for i in range(0,10)]
print('rayonix_detector.ip_address = %r' % rayonix_detector.ip_address)
print('')
print('rayonix_detector.bin_factor')
print('rayonix_detector.acquire_images_triggered(filenames)')
| 2.5625 | 3 |
apps/poll/admin.py | CasualGaming/studlan | 9 | 12799835 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Poll, PollOption, PollParticipant, PollTranslation
class PollTranslationInlineAdmin(admin.StackedInline):
verbose_name = _(u'poll translation')
verbose_name_plural = _(u'poll translations')
model = PollTranslation
max_num = len(settings.LANGUAGES)
extra = 1
class PollOptionInlineAdmin(admin.StackedInline):
verbose_name = _(u'poll option')
verbose_name_plural = _(u'poll options')
model = PollOption
extra = 1
class PollAdmin(admin.ModelAdmin):
inlines = [PollTranslationInlineAdmin, PollOptionInlineAdmin]
list_display = ['__unicode__', 'lan']
class PollParticipantAdmin(admin.ModelAdmin):
model = PollParticipant
readonly_fields = ['poll', 'user', 'option']
admin.site.register(Poll, PollAdmin)
admin.site.register(PollParticipant, PollParticipantAdmin)
| 1.867188 | 2 |
build-hooks/hook-browse_utils.py | rezgar/pythonista-chromeless | 1 | 12799836 | from PyInstaller.utils.hooks import collect_data_files
datas = collect_data_files('cli', include_py_files=True) | 1.242188 | 1 |
log/migrations/0001_initial.py | zyayoung/lab-item-tracking | 4 | 12799837 | # Generated by Django 2.1.1 on 2019-02-09 01:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('login', '0004_user_settings'),
]
operations = [
migrations.CreateModel(
name='Log',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='操作时间')),
('obj', models.TextField(blank=True, null=True, verbose_name='操作对象')),
('result', models.TextField(blank=True, null=True, verbose_name='操作结果')),
('operator', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='login.User', verbose_name='操作人')),
],
options={
'verbose_name': '操作记录',
'verbose_name_plural': '操作记录',
'ordering': ['-time'],
},
),
]
| 1.882813 | 2 |
examples/run.py | GregoireDelannoy/udon | 1 | 12799838 | <gh_stars>1-10
import getopt
import logging
import sys
import time
import udon.log
import udon.run
pidfile = None
opts, args = getopt.getopt(sys.argv[1:], "p:")
for opt, arg in opts:
if opt == '-p':
pidfile = arg
if args:
if args[0] == 'stop':
udon.run.stop(pidfile)
elif args[0] == 'kill':
udon.run.kill(pidfile)
else:
udon.log.init(foreground = False, level = "DEBUG")
udon.run.daemon(pidfile)
logging.info("starting")
for i in range(20):
logging.info("%d...", i)
time.sleep(1)
logging.info("done")
| 2.21875 | 2 |
service1.py | theneon-Hacker/main_numProperty | 0 | 12799839 | <reponame>theneon-Hacker/main_numProperty
from itertools import dropwhile
def roman(n):
if n > 0 and n < 3999:
ones = ["","I","II","III","IV","V","VI","VII","VIII","IX"]
tens = ["","X","XX","XXX","XL","L","LX","LXX","LXXX","XC"]
hunds = ["","C","CC","CCC","CD","D","DC","DCC","DCCC","CM"]
thounds = ["","M","MM","MMM","MMMM"]
t = thounds[n // 1000]
h = hunds[n // 100 % 10]
te = tens[n // 10 % 10]
o = ones[n % 10]
return t + h + te + o
else:
return " - "
def romanize(num):
if num < 0 or num > 3999:
return "Ваше число нельзя представить в римской системе счисления"
else:
return f"Число в римской системе счисления: {roman(num)}"
def bitize(num):
return f"Число в двоичной системе счисления: {str(bin(num))[2:]}"
def print_all(numData, num):
print('Число {}:'.format(num), end='\n\t')
for k, item in enumerate(numData):
print(numData[k], end='\n\t')
print('')
isS = rct = unus = isEx = suf = False
def formData(numData, MC, num):
global rct, unus, isEx
global isS, suf
if 'простое' in numData[1]:
isS = True
elif 'составное' in numData[1]:
isS = False
if ',' in numData:
rct = True
elif ',' not in numData:
rct = False
if '.' in numData:
unus = True
elif '.' not in numData:
unus = False
if '>' in numData:
isEx = True
if '>' not in numData:
isEx = False
if '<' in numData:
suf = True
else:
suf = False
datapiece = f'''
"Число %d":
"{MC.dividers()}",
"Число простое": {str(isS).lower()},
"Число является прямоугольным": {str(rct).lower()},
"Число - необычное": {str(unus).lower()},
"%s",
"Число избыточное": {str(isEx).lower()},
"Число недостаточное": {str(suf).lower()},
"{MC.repr_pow2()}",
"{MC.repr_sqrt2()}",
"Число в римской системе счисления": {romanize(num)[35:]},
"Число в двоичной системе счисления": {str(bin(num))[2:]}.
''' % (num, MC.smooth())
return datapiece
def check_savings(file, patterns_list):
with open(file, 'r') as f:
try:
all_ = f.read()
all_ = ''.join(all_)
all_ = all_.split('\n')
all_ = reversed(all_)
for line in all_:
for elem in patterns_list:
if elem in line: continue
else:
break
else:
character_map = {
}
for j in patterns_list[1:]:
character_map.update({ord(j): ''})
line = line.translate(character_map)
line = line.replace(f"{patterns_list[0]}", '')
line = line.lstrip(' \n').rstrip(' \n')
line = line.split(", ")
line = [i for i in map(int, line)]
return line
assert False
except:
return []
if __name__ == '__main__':
pass
| 3.015625 | 3 |
src/daemon/initialisation.py | 0CT3T/Daemon_Home_Integration | 1 | 12799840 | <reponame>0CT3T/Daemon_Home_Integration
from importlib.machinery import SourceFileLoader
from daemon.Configuration.Modele import *
from daemon.Configuration.configuration import configuration
#############################################################
#
# INITIALISATION
#############################################################
lobjet = {} #liste des classes
lmodrules = [] #liste des modules de regle
config = configuration()
# CHARGEMENT DU FICHIER de configuration des modules
with open(JSONdirectory + "module.json", "r") as fichier:
JSON = fichier.read()
config.load(JSON)
#import des classes et objet
for item in config.getlitem():
temp = getattr(SourceFileLoader(item,Moduledirectory +item+".py").load_module(), item)
lobjet[item] = temp()
| 1.8125 | 2 |
day10.py | kdrag0n/aoc2021 | 2 | 12799841 | #!/usr/bin/env python3
import sys
def ints(itr):
return [int(i) for i in itr]
with open(sys.argv[1], 'r') as f:
file_lines = [l for l in f.read().strip().split('\n')]
in_nums = []
total = 0
result = 0
other = 0
opn_to_cls = {'(': ')', '[': ']', '{': '}', '<': '>'}
cls_to_opn = {v: k for k, v in opn_to_cls.items()}
scores = {
')': 3,
']': 57,
'}': 1197,
'>': 25137
}
while True:
for l in file_lines:
stk = []
syms = list(l)
err = False
for sym in syms:
print(sym)
if sym in list('([{<'):
stk += [opn_to_cls[sym]]
elif len(stk) and sym in cls_to_opn:
print(stk)
if stk[-1] == sym:
stk.pop()
else:
print('INV', sym)
err = True
break
if err:
total += scores[sym]
break
print(f'Total: {total}')
print(f'Result: {result}')
print(f'Other: {other}')
| 3.546875 | 4 |
fHDHR/originwrapper/origin_channels_standin.py | deathbybandaid/fHDHR_NewsOn | 2 | 12799842 | <gh_stars>1-10
class OriginChannels_StandIN():
def __init__(self):
pass
def get_channels(self):
return []
def get_channel_stream(self, chandict):
return None
| 1.609375 | 2 |
data_wrangling/legacy_code/frame_times_crawler.py | alexmitchell/file_manipulations | 0 | 12799843 | <gh_stars>0
import os
import numpy as np
from pathlib import Path
# From Helpyr
import data_loading
from helpyr_misc import nsplit
from helpyr_misc import ensure_dir_exists
from logger import Logger
from crawler import Crawler
class FrameTimesCrawler (Crawler):
# The FrameTimesCrawler navigates through the backup data drives and
# collects all the image names. The names are timestamps for when images
# where taken, therefore can be used to figure out the frame rate for any
# particular second. Save the image times in text files
def __init__(self, destination_dir, log_filepath="./log-files/frame-name-crawler.txt"):
logger = Logger(log_filepath, default_verbose=True)
Crawler.__init__(self, logger)
self.mode_dict['collect_frame_times'] = self.collect_frame_times
self.set_target_names('*.tif')
self.destination_dir = destination_dir
ensure_dir_exists(destination_dir, self.logger)
def end(self):
Crawler.end(self)
self.logger.end_output()
def collect_frame_times(self):
self.collect_names(verbose_file_list=False)
print()
paths = self.file_list
# Get the run parameters and frame times. Store in dict for now.
self.logger.write(f"Extracting run info")
time_dict = {}
n_paths = len(paths)
print_perc = lambda p: print(f"{p:4.0%}", end='\r')
i_tracker = 0
for i, path in enumerate(paths):
if (i / n_paths) >= i_tracker:
print_perc(i_tracker)
i_tracker += 0.1
_, exp_code, step, period, file = nsplit(path, 4)
time_str, ext = file.rsplit('.', 1)
key = (exp_code, step, period)
if key in time_dict:
time_dict[key].append(np.float(time_str))
else:
time_dict[key] = [np.float(time_str)]
self.logger.write(f"Writing times files")
self.logger.increase_global_indent()
npy_paths = []
for key in time_dict:
(exp_code, step, period) = key
times = np.sort(np.array(time_dict[key]))
self.logger.write(f"Found {len(times)} images for {exp_code} {step} {period}")
times_filename = f"{exp_code}_{step}_{period}_frame_times.npy"
times_filepath = os.path.join(self.destination_dir, times_filename)
np.save(times_filepath, times)
npy_paths.append(times_filepath)
self.logger.decrease_global_indent()
npy_list_filepath = os.path.join(self.destination_dir, 'npy_list.txt')
npy_path = Path(npy_list_filepath)
if npy_path.is_file():
# Add to existing records. Ignores duplicates.
with npy_path.open() as fid:
existing_paths = fid.read().splitlines()
self.logger.write(f"Adding {len(time_dict)} files to {len(existing_paths)} existing files.")
npy_paths = list(filter(None, set(existing_paths) | set(npy_paths)))
else:
self.logger.write(f"{len(time_dict)} files written")
with npy_path.open('w') as fid:
fid.write('\n'.join(npy_paths))
self.logger.write("Done!")
if __name__ == "__main__":
destination_dir = '/home/alex/feed-timing/data/extracted-lighttable-results/frame-times'
crawler = FrameTimesCrawler(destination_dir)
exp_root = '/run/media/alex/Alex4/lighttable-data'
crawler.set_root(exp_root)
crawler.run()
| 2.609375 | 3 |
Conversation chatbot/dialogue_manager.py | kirilcvetkov92/MNIST-Classifier- | 2 | 12799844 | import os
from sklearn.metrics.pairwise import pairwise_distances_argmin
from chatterbot import ChatBot
from chatterbot.trainers import ListTrainer
from chatbot import *
from utils import *
import tensorflow as tf
class ThreadRanker(object):
def __init__(self, paths):
self.word_embeddings, self.embeddings_dim = load_embeddings(paths['WORD_EMBEDDINGS'])
self.thread_embeddings_folder = paths['THREAD_EMBEDDINGS_FOLDER']
def __load_embeddings_by_tag(self, tag_name):
embeddings_path = os.path.join(self.thread_embeddings_folder, tag_name + ".pkl")
thread_ids, thread_embeddings = unpickle_file(embeddings_path)
return thread_ids, thread_embeddings
def get_best_thread(self, question, tag_name):
""" Returns id of the most similar thread for the question.
The search is performed across the threads with a given tag.
"""
thread_ids, thread_embeddings = self.__load_embeddings_by_tag(tag_name)
# HINT: you have already implemented a similar routine in the 3rd assignment.
question_vec = question_to_vec(question, self.word_embeddings, self.embeddings_dim)
best_thread = pairwise_distances_argmin(
X=question_vec.reshape(1, -1),
Y=thread_embeddings,
metric='cosine'
)
return thread_ids[best_thread[0]]
class DialogueManager(object):
def __init__(self, paths):
print("Loading resources...")
self.create_chitchat_bot()
def create_chitchat_bot(self):
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.1)
self.sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
self.model = Seq2SeqModel(vocab_size=len(word2id),
embeddings_size=300,
hidden_size=128,
max_iter=20,
start_symbol_id=word2id['[^]'],
end_symbol_id=word2id['[$]'],
padding_symbol_id=word2id['[#]'])
saver = tf.train.Saver()
saver.restore(self.sess, 'checkpoints/model_four_691')
def generate_answer(self, question):
# Pass question to chitchat_bot to generate a response.
response = self.model.get_response(self.sess, question)
return response
| 2.453125 | 2 |
trim.py | watsoncm/PruneSeg | 0 | 12799845 | <reponame>watsoncm/PruneSeg
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Trims weights on a pruned model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import logging
import os
import shutil
import sys
import collections
# https://github.com/tensorflow/tensorflow/issues/2034#issuecomment-220820070
import numpy as np
import tensorflow as tf
flags = tf.app.flags
FLAGS = flags.FLAGS
sys.path.insert(1, 'incl')
import tensorvision.train as train
import tensorvision.analyze as ana
import tensorvision.utils as utils
import tensorvision.core as core
from evaluation import kitti_test
flags.DEFINE_string('RUN', 'KittiSeg_pretrained',
'Modifier for model parameters.')
flags.DEFINE_string('hypes', 'hypes/KittiSeg.json',
'File storing model parameters.')
flags.DEFINE_string('name', None,
'Append a name Tag to run.')
flags.DEFINE_string('project', None,
'Append a name Tag to run.')
if 'TV_SAVE' in os.environ and os.environ['TV_SAVE']:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug, '
'hence it will get overwritten by further runs.'))
else:
tf.app.flags.DEFINE_boolean(
'save', True, ('Whether to save the run. In case --nosave (default) '
'output will be saved to the folder TV_DIR_RUNS/debug '
'hence it will get overwritten by further runs.'))
segmentation_weights_url = ("ftp://mi.eng.cam.ac.uk/"
"pub/mttt2/models/KittiSeg_pretrained.zip")
def maybe_download_and_extract(runs_dir):
logdir = os.path.join(runs_dir, FLAGS.RUN)
if os.path.exists(logdir):
# weights are downloaded. Nothing to do
return
if not FLAGS.RUN == 'KittiSeg_pretrained':
return
import zipfile
download_name = utils.download(segmentation_weights_url, runs_dir)
logging.info("Extracting KittiSeg_pretrained.zip")
zipfile.ZipFile(download_name, 'r').extractall(runs_dir)
return
def main(_):
utils.set_gpus_to_use()
try:
import tensorvision.train
import tensorflow_fcn.utils
except ImportError:
logging.error("Could not import the submodules.")
logging.error("Please execute:"
"'git submodule update --init --recursive'")
exit(1)
with open(tf.app.flags.FLAGS.hypes, 'r') as f:
logging.info("f: %s", f)
hypes = json.load(f)
utils.load_plugins()
if 'TV_DIR_RUNS' in os.environ:
runs_dir = os.path.join(os.environ['TV_DIR_RUNS'],
'KittiSeg')
else:
runs_dir = 'RUNS'
utils.set_dirs(hypes, tf.app.flags.FLAGS.hypes)
utils._add_paths_to_sys(hypes)
train.maybe_download_and_extract(hypes)
maybe_download_and_extract(runs_dir)
logging.info("Trimming weights.")
logdir = os.path.join(runs_dir, FLAGS.RUN)
modules = utils.load_modules_from_hypes(hypes)
with tf.Graph().as_default():
# build the graph based on the loaded modules
with tf.name_scope("Queues"):
queue = modules['input'].create_queues(hypes, 'train')
tv_graph = core.build_training_graph(hypes, queue, modules)
# prepare the tv session
with tf.Session().as_default():
tv_sess = core.start_tv_session(hypes)
sess = tv_sess['sess']
saver = tv_sess['saver']
cur_step = core.load_weights(logdir, sess, saver)
if cur_step is None:
logging.warning("Loaded global_step is None.")
logging.warning("This could mean,"
" that no weights have been loaded.")
logging.warning("Starting Training with step 0.")
cur_step = 0
with tf.name_scope('Validation'):
tf.get_variable_scope().reuse_variables()
image_pl = tf.placeholder(tf.float32)
image = tf.expand_dims(image_pl, 0)
image.set_shape([1, None, None, 3])
inf_out = core.build_inference_graph(hypes, modules,
image=image)
tv_graph['image_pl'] = image_pl
tv_graph['inf_out'] = inf_out
# prepaire the tv session
image_pl = tf.placeholder(tf.float32)
image = tf.expand_dims(image_pl, 0)
image.set_shape([1, None, None, 3])
inf_out = core.build_inference_graph(hypes, modules,
image=image)
# Create a session for running Ops on the Graph.
trim_dir = 'RUNS/trimmed'
shutil.copytree(logdir, trim_dir)
shutil.copy(tf.app.flags.FLAGS.hypes,
os.path.join(trim_dir, 'model_files', 'hypes.json'))
sess = tf.Session()
saver = tf.train.Saver()
core.load_weights(trim_dir, sess, saver)
for weight in tf.contrib.model_pruning.get_masks():
if any([layer in weight.name for layer in hypes['layer_pruning']['layers']]):
weight_value = tv_sess['sess'].run(weight)
kernel_count = int(weight_value.shape[3] * hypes['layer_pruning']['layer_sparsity'])
l1_values = np.sum(np.abs(weight_value), axis=(0, 1, 2))
toss_kernels = l1_values.argsort()[:kernel_count]
weight_value[:, :, :, toss_kernels] = 0
assign_op = tf.assign(weight, tf.constant(weight_value))
tv_sess['sess'].run(assign_op)
checkpoint_path = os.path.join(trim_dir, 'model.ckpt')
tv_sess['saver'].save(sess, checkpoint_path, global_step=cur_step)
train.continue_training(trim_dir)
if __name__ == '__main__':
tf.app.run()
| 2.09375 | 2 |
src/modules/catalog/domain/services.py | Ermlab/python-ddd | 308 | 12799846 | # from seedwork.domain.services import DomainService
# from seedwork.domain.value_objects import UUID
# from .entities import Listing, Seller
# from .repositories import ListingRepository
# from .rules import (
# ListingMustBeInDraftState,
# SellerMustBeEligibleForAddingNextListing,
# )
# class CatalogService:
# def publish_listing(self, listing: Listing, seller: Seller):
# self.check_rule(ListingMustBeInDraftState(listing.status))
# self.check_rule(SellerMustBeEligibleForAddingNextListing(seller))
# listing.publish()
| 2.0625 | 2 |
ndv_ru.py | nonameists/puls_test | 0 | 12799847 | <reponame>nonameists/puls_test
import json
import re
import requests
from bs4 import BeautifulSoup as soup
DICT_KEYS = ['complex', 'type', 'phase', 'building', 'section', 'price_base',
'price_finished', 'price_sale', 'price_finished_sale', 'area',
'number', 'number_on_site', 'rooms', 'floor', 'in_sale',
'sale_status', 'finished', 'currency', 'ceil', 'article',
'finishing_name', 'furniture', 'furniture_price', 'plan',
'feature', 'view', 'euro_planning', 'sale', 'discount_percent',
'discount', 'comment']
class NdvParser:
def __init__(self):
self.session = requests.Session()
self.base_url = 'https://www.ndv.ru'
self.base_url_flats = 'https://www.ndv.ru/novostrojki/flats'
self.new_buildings_url = 'https://www.ndv.ru/novostrojki'
self.parser_dict = dict.fromkeys(DICT_KEYS)
self.objects_list = self._get_new_buildings(self.new_buildings_url)
def get_flats_data(self):
"""
Метод для получения данных о продаже квартир в новостройках
Возвращает список словарей с данными о квартирах
:return: list of dicts
"""
# исходный список объектов который будем возвращать
objects = []
raw_data = self.session.get(self.base_url_flats).content
content = soup(raw_data, 'html.parser')
# Поиск паджинатора на странице
pages = self._find_pagination(content)
if pages:
for i in range(1, pages+1):
page_url = self.base_url_flats + f'?page={i}'
raw_data = self.session.get(page_url).content
content = soup(raw_data, 'html.parser')
# добавляем(объединяем) в исходный список
objects.extend(self._write_flats_data(content))
else:
objects = self._write_flats_data(content)
return objects
def get_parking_data(self):
"""
Метод для получения данных о продаже парковочных мест
Возвращает список словарей с данными о парковочных местах
:return: list of dicts
"""
objects = []
# Итерируемся по списку ЖК
for item in self.objects_list:
# забираем имя ЖК и ссылку на его страницу. Добавляем к URL /parking
location, url = item
url += '/parking'
answer = self.session.get(url)
# проверка есть ли в продаже парковочне места. Если нет, берем следующий ЖК
if answer.status_code == 404:
continue
raw_data = answer.content
content = soup(raw_data, 'html.parser')
# Поиск кнопки <<Показать n предложений>>. Поиск кол-ва предложений о продаже
row = content.find('a', id='NewBuildingComplexUpdateButton').get_text(strip=True)
number = int(re.search('(?P<number>\d+)', row).group())
# Если страница есть, но в данный момент 0 предложений, берем следующий ЖК
if not number:
continue
# Поиск паджинатора на странице
pages = self._find_pagination(content)
if pages:
for i in range(1, pages+1):
page_url = url + f'?page={i}'
raw_data = self.session.get(page_url).content
content = soup(raw_data, 'html.parser')
# добавляем(объединяем) в исходный список
objects.extend(self._write_parking_data(content, location))
else:
objects.extend(self._write_parking_data(content, location))
return objects
def get_full_data(self, json_file=None):
"""
Метод парсит данные о квартирах в новостройках + данные о парковочных местах
Записывает полученные данные в json файл
:return: list of dicts - if json_file=None
:return: json_file - if json_file=True
"""
print('Starting data parsing...')
flats = self.get_flats_data()
parking = self.get_parking_data()
data_result = flats + parking
if json_file is None:
return data_result
else:
with open('ndv_ru.json', 'w') as file:
json.dump(data_result, file)
print('Success')
def _get_new_buildings(self, url):
"""
Метод возвращает список кортежей с именем ЖК и его URL
:param url: str
:return: list of tuples
[('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')]
"""
objects = []
raw_data = self.session.get(url).content
content = soup(raw_data, 'html.parser')
# Поиск паджинатора на странице
pages = self._find_pagination(content)
if pages:
for i in range(1, pages + 1):
# добавляем ?page=n к URL
page_url = self.new_buildings_url + f'?page={i}'
raw_data = self.session.get(page_url).content
content = soup(raw_data, 'html.parser')
# добавляем(объединяем) в исходный список
objects.extend(self._get_objects(content))
else:
objects = self._get_objects(content)
return objects
def _get_objects(self, data):
"""
Функция принимает на вход объект класса bs4.BeautifulSoup.
Ищет название жк, регион и ссылку на объект ЖК
:param data: bs4.BeautifulSoup
:return: list of tuples
[('Мкр. «Мегаполис»(Москва, ВАО, Салтыковская улица 8с22)','/novostrojki/zhk/mkr-megapolis')]
"""
output = []
raw_data = data.find_all('div', {'class': 'tile__content'})
for item in raw_data:
name = item.select_one('a', {'class': 'tile__name'}).text.strip()
location = item.find('span', {'class': 'tile__location'}).get_text().strip()
urn = item.select_one('a', {'class': 'tile__name'}).get('href')
output.append((name + f'({location})', self.base_url + urn))
return output
def _find_pagination(self, data):
"""
Функция принимает на вход объект класса bs4.BeautifulSoup.
Производит поиск пагинатора. Если он есть то возвращает номер последней страницы.
:param data: bs4.BeautifulSoup
:return: int last page number or False
"""
pages = data.findAll('a', {'class': 'move-to-page'})
if pages:
last_page = int(pages[-2].text)
return last_page
return False
def _get_image(self, data):
"""
Метод для парсинга схемы квартиры
На вход принимает bs4.element.Tag. Производит поиск по div
классу tile__image. С помощью регулярного выражения забирает URL
:param data: bs4.element.Tag
:return: str (image src url)
"""
try:
plan = data.find('div', class_='tile__image')['data-deskstop']
plan = re.search("url\('(?P<url>\S+)'\)", plan).group('url')
if plan == '/img/new-design/no-image.svg':
return None
return plan
except AttributeError:
return None
def _get_complex(self, data):
"""
Метод для поиска имени ЖК и его региона
:param data: bs4.element.Tag
:return: str
"""
try:
complex = data.find(
'a',
class_='tile__resale-complex--link js_tile_complex_link'
).get_text(
strip=True
)
location = data.find('span', class_='tile__location').get_text(strip=True)
complex += f'({location})'
return complex
except AttributeError:
return None
def _get_phase(self, data):
"""
Метод для поиска очереди строительства
:param data: bs4.element.Tag
:return: str
"""
try:
phase = data.find('span', class_='tile__row--resale_date').get_text(strip=True)
return phase
except AttributeError:
return None
def _price_base(self, data):
"""
Метод для поиска цены квартиры
:param data: bs4.element.Tag
:return: str
"""
try:
price_base = data.find('span', class_='tile__price').get_text(strip=True)
price_base = int(''.join(price_base.split()[:3]))
return price_base
except AttributeError:
return None
def _get_complex_item(self, data):
"""
Метод для поиска информации о квартире
Поиск корпуса, секции, этажа и номера квартиры
Возвращает словарь с ключами ['section', 'floor', 'number', 'building']
:param data: bs4.element.Tag
:return: dict
"""
keys = ('section', 'floor', 'number', 'building')
result = dict.fromkeys(keys)
info = data.find_all('div', class_='tile__in-complex-item')
for item in info:
title = item.select_one('.tile__in-complex-title').get_text(strip=True).lower()
value = item.select_one('.tile__in-complex-value').get_text(strip=True)
if title == 'корпус':
result['building'] = value
elif title == 'секция':
result['section'] = value
elif title == 'этаж':
result['floor'] = value
elif title == 'номер':
result['number'] = value
return result
def _get_dimentions(self, data):
"""
Метод производит поиск кол-ва комнат в квартире, площади + определение типа апартаменты/квартира
:param data: bs4.element.Tag
:return: dict
"""
result = dict()
name = data.find('a', {'class': 'tile__name'}).get_text(strip=True)
result['area'] = float(name.split()[-1].replace('м²', '').replace(',', '.'))
if 'студия' in name.split()[0].lower():
result['rooms'] = 'studio'
else:
result['rooms'] = int(name.split('-')[0])
if 'апартамент' in name.lower():
result['type'] = 'apartment'
else:
result['type'] = 'flat'
return result
def _write_flats_data(self, data):
"""
Метод для записи данных о отдельной квартире в словарь
На вход принимает объект класса bs4.BeautifulSoup
:param data: bs4.BeautifulSoup
:return: list of dict
"""
result = []
# Поиск отдельных объектов объявлений на странице
raw_data = data.find_all('div', class_='tile__link js-tile-link')
# в цикле проходим по каждому объявлению
for item in raw_data:
# Бремем копию исходного словаря с ключами в который будем записывать данные
output = self.parser_dict.copy()
# записываем имя ЖК и его регион
output['complex'] = self._get_complex(item)
# записываем очередь строительства
output['phase'] = self._get_phase(item)
# записываем цену
output['price_base'] = self._price_base(item)
# записвыем ссылку на план объекта
output['plan'] = self._get_image(item)
# обновляем в словаре ключи с данными для корпуса, секции, этажа и номера квартиры
output.update(self._get_complex_item(item))
# обновляем в словаре ключи с данными для комнат в квартире, площади + типа квартиры
output.update(self._get_dimentions(item))
# добавляем словарь в список который будем возвращать
result.append(output)
return result
def _write_parking_data(self, data, location):
"""
Метод для записи данных о отдельном парковочном месте
На вход принимает объект класса bs4.BeautifulSoup
:param data: bs4.BeautifulSoup
:param location: str
:return: list of dicts
"""
result = []
# Поиск отдельных объектов парковочных мест на сранице ЖК
raw_data = data.find_all('a', class_='flats-table__row table-body--row')
# в цикле проходим по каждому парковочному месту
for item in raw_data:
# Бремем копию исходного словаря с ключами в который будем записывать данные
output = self.parser_dict.copy()
# записываем имя ЖК и регион
output['complex'] = location
# записываем данные о парковочном месте (площаь, корпус, секция, этаж, план)
output.update(self._get_parking_info(item))
# добавляем словарь в список который будем возвращать
result.append(output)
return result
def _get_parking_info(self, data):
"""
Метод для парсинга данных о парковочном месте
:param data: bs4.element.Tag
:return: dict
"""
plan_img = None
price_base = None
price_sale = None
building = None
area = None
section = None
floor = None
number = None
urn = data.get('href')
parking_url = self.base_url + urn
parking_data = soup(self.session.get(parking_url).content, 'html.parser')
# поиск номера парковочного места
raw_number = parking_data.find('meta', {'content': '10'})
if raw_number:
number = raw_number.previous.strip().split()[1].replace('№', '')
else:
try:
number = parking_data.find('h1', class_='title').get_text(strip=True).split()[2]
except AttributeError:
pass
# поиск ссылки на план
try:
plan_div = parking_data.find('div', {'id': 'plans_layout'})
plan_img = plan_div.find('img').get('src')
except AttributeError:
pass
# поиск цены (в том числе со скидкой)
try:
price_base = parking_data.find('span', class_='card__info-prices__price').get_text(strip=True)
price_base = int(price_base.split('руб.')[0].replace(' ', ''))
except AttributeError:
try:
price_base = parking_data.find('span', class_='card__info-prices__old').get_text(strip=True)
price_base = int(price_base.split('руб.')[0].replace(' ', ''))
price_sale = parking_data.find(
'span',
class_='card__info-prices__price card__info-prices--red'
).get_text(strip=True)
price_sale = int(price_sale.split('руб.')[0].replace(' ', ''))
except AttributeError:
pass
# парсинг данных о парковочном месте(метраж, копус, секцияб этаж)
parking_div_info = parking_data.find('div', class_='card__info-row card__info-row--settings')
parking_div_data = parking_div_info.find_all('div', class_='card__info-params__number')
# парсинг площади
try:
raw_area = parking_div_data[0].get_text(strip=True).split()[0]
area = float(raw_area.replace(',', '.'))
except (AttributeError, IndexError):
pass
# парсинг корпуса
try:
building = parking_div_data[1].get_text(strip=True)
except (AttributeError, IndexError):
pass
# парсинг секции
try:
section = parking_div_data[2].get_text(strip=True)
except (AttributeError, IndexError):
pass
# парсинг этажа
try:
floor = parking_div_data[3].get_text(strip=True)
except (AttributeError, IndexError):
pass
output_dict = {
'number': number,
'building': building,
'area': area,
'price_sale': price_sale,
'price_base': price_base,
'type': 'parking',
'plan': plan_img,
'section': section,
'floor': floor
}
return output_dict
if __name__ == '__main__':
ndv = NdvParser()
# Запускаем парсер на квартиры и машиноместа.
# Данные записываются в json файл
ndv.get_full_data(json_file=True)
| 2.796875 | 3 |
learn-to-code-with-python/32-PROJECT-Texas-Hold-Em-Poker/tests/test_card.py | MaciejZurek/python_practicing | 0 | 12799848 | import unittest
from poker.card import Card
class CardTest(unittest.TestCase):
def test_has_rank(self):
card = Card(rank = "Queen", suit = "Hearts")
self.assertEqual(card.rank, "Queen")
def test_has_suit(self):
card = Card(rank = "2", suit = "Clubs")
self.assertEqual(card.suit, "Clubs")
def test_knows_its_rank_index(self):
card = Card(rank = "Jack", suit = "Hearts")
self.assertEqual(card.rank_index, 9)
def test_has_string_representation_with_rank_and_suit(self):
card = Card("5", "Diamonds")
self.assertEqual(str(card), "5 of Diamonds")
def test_has_technical_representation(self):
card = Card("5", "Diamonds")
self.assertEqual(repr(card), "Card('5', 'Diamonds')")
def test_card_has_four_possible_suit_options(self):
self.assertEqual(
Card.SUITS,
("Hearts", "Clubs", "Spades", "Diamonds")
)
def test_card_has_thirteen_possible_rank_options(self):
self.assertEqual(
Card.RANKS,
(
"2", "3", "4", "5", "6", "7", "8", "9", "10",
"Jack", "Queen", "King", "Ace"
)
)
def test_card_only_allows_for_valid_rank(self):
with self.assertRaises(ValueError):
Card(rank = "Two", suit = "Hearts")
def test_card_only_allows_for_valid_suit(self):
with self.assertRaises(ValueError):
Card(rank = "2", suit = "Dots")
def test_can_create_standard_52_cards(self):
cards = Card.create_standard_52_cards()
self.assertEqual(len(cards), 52)
self.assertEqual(
cards[0],
Card(rank = "2", suit = "Hearts")
)
self.assertEqual(
cards[-1],
Card(rank = "Ace", suit = "Diamonds")
)
def test_figures_out_if_two_cards_are_equal(self):
self.assertEqual(
Card(rank = "2", suit = "Hearts"),
Card(rank = "2", suit = "Hearts")
)
def test_card_can_sort_itself_with_another_one(self):
queen_of_spades = Card(rank = "Queen", suit = "Spades")
king_of_spades = Card(rank = "King", suit = "Spades")
evaluation = queen_of_spades < king_of_spades
self.assertEqual(
evaluation,
True,
"The sort algorithm is not sorting the lower card first"
)
def test_sorts_cards(self):
two_of_spades = Card(rank = "2", suit = "Spades")
five_of_diamonds = Card(rank = "5", suit = "Diamonds")
five_of_hearts = Card(rank = "5", suit = "Hearts")
eight_of_hearts = Card(rank = "8", suit = "Hearts")
ace_of_clubs = Card(rank = "Ace", suit = "Clubs")
unsorted_cards = [
five_of_hearts,
five_of_diamonds,
two_of_spades,
ace_of_clubs,
eight_of_hearts
]
unsorted_cards.sort()
self.assertEqual(
unsorted_cards,
[
two_of_spades,
five_of_diamonds,
five_of_hearts,
eight_of_hearts,
ace_of_clubs
]
)
| 3.75 | 4 |
tests/test_generics.py | danpoland/pyramid-restful | 18 | 12799849 | <filename>tests/test_generics.py
from unittest import TestCase, mock
from pyramid import testing
from pyramid.httpexceptions import HTTPNotFound
from sqlalchemy import create_engine, Column, String, Integer
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm.query import Query
from marshmallow import Schema, fields
from pyramid_restful import generics
from pyramid_restful.filters import FieldFilter
engine = create_engine('sqlite://')
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String)
class UserSchema(Schema):
id = fields.Integer()
name = fields.String()
class UserAPIView(generics.GenericAPIView):
model = User
schema_class = UserSchema
pagination_class = mock.Mock()
filter_classes = (FieldFilter,)
filter_fields = (User.name,)
class UserOverrideView(generics.GenericAPIView):
model = User
lookup_column = (User, 'id')
def get_query(self):
return self.request.dbsession.query(self.model)
def get_schema_class(self, *args, **kwargs):
return UserSchema
def get_dbsession():
Session = sessionmaker()
Session.configure(bind=engine)
return Session()
class GenericAPIViewTests(TestCase):
@classmethod
def setUpClass(cls):
Base.metadata.create_all(engine)
cls.dbsession = get_dbsession()
user = User(id=1, name='testing')
user2 = User(id=2, name='testing 2')
cls.dbsession.add(user)
cls.dbsession.add(user2)
cls.dbsession.commit()
@classmethod
def tearDownClass(cls):
cls.dbsession.close()
def setUp(self):
self.request = testing.DummyRequest()
self.request.dbsession = self.dbsession
def test_get_query_w_model(self):
view = UserAPIView()
view.request = self.request
query = view.get_query()
assert isinstance(query, Query)
def test_get_query_w_override(self):
view = UserOverrideView()
view.request = self.request
query = view.get_query()
assert isinstance(query, Query)
def test_missing_model(self):
view = generics.GenericAPIView()
view.request = self.request
self.assertRaises(AssertionError, view.get_query)
def test_get_object(self):
view = UserAPIView()
view.request = self.request
view.lookup_url_kwargs = {'id': 1}
instance = view.get_object()
assert isinstance(instance, User)
assert instance.id == 1
assert instance.name == 'testing'
def test_get_object_override(self):
view = UserOverrideView()
view.request = self.request
view.lookup_url_kwargs = {'id': 1}
instance = view.get_object()
assert isinstance(instance, User)
assert instance.id == 1
assert instance.name == 'testing'
def test_get_object_not_found(self):
view = UserAPIView()
view.request = self.request
view.lookup_url_kwargs = {'id': 3}
self.assertRaises(HTTPNotFound, view.get_object)
def test_get_schema(self):
view = UserAPIView()
view.request = self.request
schema = view.get_schema()
assert isinstance(schema, UserSchema)
assert schema.context['request'] == self.request
def test_override_get_schema(self):
view = UserOverrideView()
view.request = self.request
schema = view.get_schema()
assert isinstance(schema, UserSchema)
assert schema.context['request'] == self.request
def test_filter_query(self):
view = UserAPIView()
self.request.params = {'filter[name]': 'testing'}
view.request = self.request
results = view.filter_query(view.get_query()).all()
assert len(results) == 1
assert results[0].id == 1
def test_filter_query_empty(self):
view = UserAPIView()
self.request.params = {'filter[name]': 'testing3'}
view.request = self.request
results = view.filter_query(view.get_query()).all()
assert len(results) == 0
def test_paginate_query(self):
view = UserAPIView()
view.request = self.request
query = view.get_query()
view.paginate_query(query)
assert view.paginator.paginate_query.call_count == 1
def test_no_paginator(self):
view = UserOverrideView()
view.request = self.request
query = view.get_query()
assert view.paginate_query(query) == None
def test_get_paginated_response(self):
view = UserAPIView()
view.request = self.request
view.get_paginated_response({})
assert view.paginator.get_paginated_response.call_count == 1
class ConcreteGenericAPIViewsTest(TestCase):
def test_create_api_view_post(self):
class MockCreateApiView(generics.CreateAPIView):
def create(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockCreateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.post('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_list_api_view_get(self):
class MockListApiView(generics.ListAPIView):
def list(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockListApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_api_view_get(self):
class MockRetrieveApiView(generics.RetrieveAPIView):
def retrieve(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_destroy_api_view_delete(self):
class MockDestroyApiView(generics.DestroyAPIView):
def destroy(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockDestroyApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.delete('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_update_api_view_partial_update(self):
class MockUpdateApiView(generics.UpdateAPIView):
def partial_update(self, request, *args, **kwargs):
self.partial_called = True
self.partial_call_args = (request, args, kwargs)
def update(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockUpdateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.patch('test request', 'test arg', test_kwarg='test')
assert view.partial_called is True
assert view.partial_call_args == data
view.put('test request', 'test arg', test_kwarg='test')
assert view.partial_called is True
assert view.partial_call_args == data
def test_list_create_api_view(self):
class MockListCreateApiView(generics.ListCreateAPIView):
def list(self, request, *args, **kwargs):
self.list_called = True
self.list_call_args = (request, args, kwargs)
def create(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockListCreateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
assert view.list_called is True
assert view.list_call_args == data
view.post('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_update_api_view_get(self):
class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):
def retrieve(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveUpdateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_update_api_view_put(self):
class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):
def update(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveUpdateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.put('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_update_api_view_patch(self):
class MockRetrieveUpdateApiView(generics.RetrieveUpdateAPIView):
def partial_update(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveUpdateApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.patch('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_destroy_api_view_get(self):
class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView):
def retrieve(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveDestroyUApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_destroy_api_view_delete(self):
class MockRetrieveDestroyUApiView(generics.RetrieveDestroyAPIView):
def destroy(self, request, *args, **kwargs):
self.called = True
self.call_args = (request, args, kwargs)
view = MockRetrieveDestroyUApiView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.delete('test request', 'test arg', test_kwarg='test')
assert view.called is True
assert view.call_args == data
def test_retrieve_update_destroy_api_view(self):
class MockRetrieveUpdateDestroyAPIView(generics.RetrieveUpdateDestroyAPIView):
def retrieve(self, request, *args, **kwargs):
self.r_called = True
self.r_call_args = (request, args, kwargs)
def destroy(self, request, *args, **kwargs):
self.d_called = True
self.d_call_args = (request, args, kwargs)
def update(self, request, *args, **kwargs):
self.u_called = True
self.u_call_args = (request, args, kwargs)
def partial_update(self, request, *args, **kwargs):
self.p_called = True
self.p_call_args = (request, args, kwargs)
view = MockRetrieveUpdateDestroyAPIView()
data = ('test request', ('test arg',), {'test_kwarg': 'test'})
view.get('test request', 'test arg', test_kwarg='test')
view.delete('test request', 'test arg', test_kwarg='test')
view.put('test request', 'test arg', test_kwarg='test')
view.patch('test request', 'test arg', test_kwarg='test')
assert view.r_called is True
assert view.r_call_args == data
assert view.d_called is True
assert view.d_call_args == data
assert view.u_called is True
assert view.u_call_args == data
assert view.p_called is True
assert view.p_call_args == data
| 2.3125 | 2 |
emLam/corpus/corenlp_preprocessor.py | DavidNemeskey/emLam | 2 | 12799850 | <reponame>DavidNemeskey/emLam<gh_stars>1-10
#!/usr/bin/env python3
"""A preprocessor that invokes a Stanford CoreNLP server for analysis."""
from __future__ import absolute_import, division, print_function
from emLam.corenlp import CoreNLP
from emLam.corpus.preprocessor_base import Preprocessor
class CoreNlpPreprocessor(Preprocessor):
"""A preprocessor that invokes a Stanford CoreNLP server for analysis."""
NAME = 'CoreNLP'
DESCRIPTION = 'CoreNLP preprocessor'
def __init__(self, corenlp_props, max_length=10000):
self.corenlp_props = corenlp_props
self.max_length = max_length
self.corenlp = None
def initialize(self):
"""
The CoreNLP server is initialized here so that it is only created once,
in the processing process, not in the main one.
"""
if not self.corenlp:
self.corenlp = CoreNLP(self.corenlp_props)
def cleanup(self):
if self.corenlp:
del self.corenlp
self.corenlp = None
def preprocess(self, input_stream, output_stream):
for chunk, parsed in enumerate(self.__parse_with_corenlp(input_stream)):
if chunk > 0:
# Preserve the empty sentence separator line between chunks
print(u'', file=output_stream)
print(u'\n\n'.join(u'\n'.join(u'\t'.join(token) for token in sent)
for sent in parsed),
file=output_stream)
def __parse_with_corenlp(self, input_stream):
"""
Parses the input with CoreNLP. This generator is called from
preprocess(). Reads from the input_stream a batch of sentences, and
yields the parsed data chunk.
"""
text = ''
for txt in input_stream:
text += txt
if len(text) > self.max_length:
yield self.corenlp.parse(text)
text = ''
if text:
yield self.corenlp.parse(text)
@classmethod
def instantiate(cls, process_id=1, **kwargs):
mod_args = dict(kwargs)
mod_args['corenlp_props'] = kwargs['corenlp_props'].replace(
'%', str(process_id))
if process_id > 1 and mod_args['corenlp_props'] == kwargs['corenlp_props']:
raise ValueError('At least as many gate servers must be '
'specified as there are processes.')
return super(CoreNlpPreprocessor, cls).instantiate(process_id,
**mod_args)
| 3.234375 | 3 |