content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
#!/usr/bin/python
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# Created by: Anderson Brito
# Email: [email protected]
#
# singleChain.py -> This code splits a multichain PDB file into its
# multiple individual chains, saving them as output.
#
# Usage: python singleChain.py workingDirectory pdbFile
#
# Release date: 30/12/2017
# Last update: 30/12/2017
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
from Bio.PDB import PDBParser, PDBIO
from sys import *
import os
dir = argv[1]
inFile = argv[2]
newname = '_chain'
io = PDBIO()
pdb = PDBParser().get_structure(newname, dir + inFile)
for chain in pdb.get_chains():
io.set_structure(chain)
io.save(dir + inFile.split('.')[0] + newname + chain.get_id() + ".pdb")
| python |
import random as python_random
def safe_sample_edges(nodes, edges, sample_size):
edges = set(edges)
nodes = list(nodes)
edge_label = {}
node2edges = {node : [] for node in nodes}
for edge in edges:
node2edges[edge[0]].append(edge)
node2edges[edge[1]].append(edge)
edge_label[edge] = 'keep'
def walk(source, visited):
queue = set()
if source not in visited:
queue.add(source)
while len(queue) > 0:
current = queue.pop()
visited.add(current)
for edge in node2edges[current]:
if edge_label[edge] == 'keep':
if edge[0] == current:
added = edge[1]
else:
added = edge[0]
if added not in visited:
queue.add(added)
# choice giant component
visited = set()
walk(python_random.choice(nodes), visited)
if len(visited) != len(nodes):
print 'Graph is disconnected, will try to choice giant component'
while len(visited) < 0.8 * len(nodes):
visited = set()
walk(python_random.choice(nodes), visited)
print 'visited %d out of %d nodes' % (len(visited), len(nodes))
edges = set([edge for edge in edges if edge[0] in visited and edge[1] in visited])
nodes = list(visited)
node2edges = {node : [] for node in nodes}
for edge in edges:
node2edges[edge[0]].append(edge)
node2edges[edge[1]].append(edge)
edge_label[edge] = 'keep'
sampled_edges = set()
iteration = 0
while len(sampled_edges) < sample_size:
candidates = python_random.sample(edges - sampled_edges, sample_size - len(sampled_edges))
for edge in candidates:
edge_label[edge] = 'candidate'
visited = set()
source = python_random.choice(nodes)
while len(visited) < len(nodes):
assert(source not in visited)
walk(source, visited)
for edge in candidates:
if edge_label[edge] == 'candidate':
if edge[0] not in visited and edge[1] in visited:
edge_label[edge] = 'keep'
source = edge[0]
break
elif edge[1] not in visited and edge[0] in visited:
edge_label[edge] = 'keep'
source = edge[1]
break
elif edge[0] in visited and edge[1] in visited:
edge_label[edge] = 'remove'
else:
pass
for edge in edges:
if edge_label[edge] == 'remove':
sampled_edges.add(edge)
assert(edge_label[edge] != 'candidate')
print 'Iteration %d, sampled edges %d' % (iteration, len(sampled_edges))
iteration += 1
return nodes, edges, sampled_edges | python |
import json
import os
from pb.homing_motor import HomingMotor, build_from_config, build
def init_motors(config: dict) -> list:
try:
x = build_from_config(config, 'x')
except RuntimeError:
x = build("x", dir_pin=5, step_pin=6, ms1_pin=26, ms2_pin=19, ms3_pin=13, sensor_pin=24,
max_steps=770, inverted=False, pulse_delay=.001)
try:
y = build_from_config(config, 'y')
except RuntimeError:
y = build("y", dir_pin=27, step_pin=22, ms1_pin=9, ms2_pin=10, ms3_pin=11, sensor_pin=23,
max_steps=905, inverted=False)
try:
z = build_from_config(config, 'z')
except RuntimeError:
z = build("z", dir_pin=1, step_pin=12, ms1_pin=21, ms2_pin=20, ms3_pin=16, sensor_pin=25,
max_steps=4000, inverted=True, pulse_delay=.00001)
if 'position' not in config:
position = {}
else:
position = config['position']
for motor in [z, x, y]:
pos_key = motor.get_name()
if pos_key in position:
motor.set_pos(position[pos_key])
else:
print('{} position unknown. Calibrating...'.format(motor.get_name()))
count = motor.go_home()
print('{} moved {}/{} steps back to find MIN'
.format(motor.get_name(), count, motor.get_step_size()))
position[pos_key] = 0
return x, y, z
def read_config():
try:
home = os.path.expanduser('~/')
with open(home + '.plotbot.json', 'r') as f:
data = json.load(f)
except FileNotFoundError:
data = {}
write_config(data)
return data
def write_config(data):
home_dir = os.path.expanduser('~/')
with open(home_dir + '.plotbot.json', 'w') as f:
json.dump(data, f, indent=4, sort_keys=True)
f.write('\n')
def save(config: dict, x: HomingMotor, y: HomingMotor, z: HomingMotor):
position = {}
for m in [x, y, z]:
position[m.get_name()] = m.get_pos()
config[m.get_name()] = m.get_config()
config['position'] = position
write_config(config)
def named_point(config: dict, motor_name: str, point_name: str):
points = config['named-points'][motor_name]
return points[point_name]
| python |
from django.conf.urls import url
from dal_queryset_sequence.fields import QuerySetSequenceModelField
from queryset_sequence import QuerySetSequence
from dal_select2_queryset_sequence.widgets import QuerySetSequenceSelect2
from dal_select2_queryset_sequence.views import Select2QuerySetSequenceAutoView
class Select2GenericForeignKeyModelField(QuerySetSequenceModelField):
"""
Field that generate automatically the view for the QuerySetSequenceSelect2 widget
"""
def __init__(self, *args, model_choice=None, field_id=None, **kwargs):
self.field_id = field_id if field_id else id(self)
if model_choice:
self.model_choice = model_choice
models_queryset = [model[0].objects.all() for model in model_choice]
kwargs['queryset'] = QuerySetSequence(*models_queryset)
super().__init__(*args, **kwargs)
def as_url(self, form):
url_name = '{}_autocomp_{}'.format(form.__name__, self.field_id)
self.widget = QuerySetSequenceSelect2(url_name)
# generate the class to work with multiple gfk (can't work on instance level)
AutoView = type('Autoview{}{}'.format(form.__name__, self.field_id),
(Select2QuerySetSequenceAutoView,),
{'model_choice': self.model_choice}) # send to the view the model and filter list
return url(r'^{}_{}_autocomp$'.format(form.__name__, self.field_id),
AutoView.as_view(), name=url_name)
| python |
##
# \file data_anonymizer.py
#
# \author Michael Ebner ([email protected])
# \date Dec 2016
#
# Import libraries
import string
import random
import string
import cPickle
import datetime
import os
import re
# Import modules
import pysitk.python_helper as ph
class DataAnonymizer(object):
def __init__(self,
dictionary=None,
identifiers=None,
prefix_identifiers="",
filenames=None):
self._dictionary = dictionary
self._identifiers = identifiers
self._prefix_identifiers = prefix_identifiers
self._filenames = filenames
##
# Generate identifiers
# \date 2016-12-06 18:30:56+0000
#
# \param self The object
# \param length The length
#
# \return { description_of_the_return_value }
#
def generate_identifiers(self, randomized=False):
if self._filenames is None:
raise ValueError("Filenames are not set yet")
# Create random identifier based on string
if randomized:
# Define amount of digits of random identifier
digits = 4
self._identifiers = [None] * len(self._filenames)
for j in range(0, len(self._filenames)):
self._identifiers[j] = ''.join(random.choice(
string.ascii_uppercase + string.digits)
for i in range(digits))
# Identifier based on alphabet
else:
# ['a', 'b', 'c', ...]
alphabet_str = list(string.ascii_lowercase)
# Set identifiers
self._identifiers = alphabet_str[0:len(self._filenames)]
##
# Sets/Gets the identifiers.
# \date 2016-12-06 18:29:49+0000
#
def set_identifiers(self, identifiers):
self._identifiers = identifiers
def get_identifiers(self):
return self._identifiers
def read_nifti_filenames_from_directory(self, directory):
pattern = "([a-zA-Z0-9_]+)[.](nii.gz|nii)"
p = re.compile(pattern)
filenames = [p.match(f).group(1)
for f in os.listdir(directory) if p.match(f)]
self._filenames = filenames
##
# Sets/Gets filenames
# \date 2016-12-06 18:29:59+0000
#
def set_filenames(self, filenames):
self._filenames = filenames
def get_filenames(self):
return self._filenames
##
# Set/Get the identifier prefix
# \date 2016-12-06 18:30:19+0000
#
def set_prefix_identifiers(self, prefix_identifiers):
self._prefix_identifiers = prefix_identifiers
def get_prefix_identifiers(self):
return self._prefix_identifiers
##
# Sets/Gets dictionary
# \date 2016-12-06 18:29:59+0000
#
def set_dictionary(self, dictionary):
self._dictionary = dictionary
def get_dictionary(self):
return self._dictionary
##
# Generate a random dictionary based on given filenames and identifiers
# \date 2016-12-06 18:33:32+0000
#
# \param self The object
# \post self._dictionary created
#
def generate_randomized_dictionary(self):
self._dictionary = {}
if len(self._filenames) is not len(self._identifiers):
raise ValueError("Length of filenames does not match identifiers")
# Shuffle identifiers
random.shuffle(self._identifiers)
# Create dictionary
for i in range(0, len(self._filenames)):
basename = os.path.basename(os.path.basename(self._filenames[i]))
filename, ext = ph.strip_filename_extension(basename)
# Update identifier including the prefix
self._identifiers[i] = "%s%s.%s" % (
self._prefix_identifiers,
self._identifiers[i],
ext
)
# Create dictionary
self._dictionary[self._identifiers[i]] = basename
##
# Writes a dictionary.
# \date 2016-12-06 19:26:22+0000
#
# \param self The object
# \param path_to_file The path to file (".o" extension)
# \param filename_backup The filename backup
# \param verbose The verbose
#
def write_dictionary(self,
path_to_file,
filename_backup=None,
verbose=False):
directory = os.path.dirname((path_to_file))
filename, ext = ph.strip_filename_extension(
os.path.basename(path_to_file))
ph.create_directory(directory)
# Write backup file (human readable)
if filename_backup is None:
path_to_file_backup = os.path.join(
directory, "%s_backup_human_readable.txt" % filename)
# Save randomized dictionary
f = open(path_to_file, 'wb')
cPickle.dump(self._dictionary, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
date = ph.get_current_date()
time = ph.get_current_time()
file_handle = open(path_to_file_backup, "w")
text = "## Randomized Dictionary " + date + " " + time + "\n"
file_handle.write(text)
file_handle.close()
# Print in an alphabetical order
keys = sorted(self._dictionary.keys())
for i in range(0, len(self._filenames)):
file_handle = open(path_to_file_backup, "a")
text = keys[i] + " : " + self._dictionary[keys[i]] + "\n"
file_handle.write(text)
file_handle.close()
if verbose:
print("\t%s : %s" % (keys[i], self._dictionary[keys[i]]))
ph.print_info("Anonymization dictionary written to '%s'" %
path_to_file)
##
# Reads a dictionary.
# \date 2016-12-06 19:35:51+0000
#
# \param self The object
# \param path_to_file The path to file
#
def read_dictionary(self, path_to_file):
# Read dictionary
f = open(path_to_file, 'rb')
self._dictionary = cPickle.load(f)
f.close()
# Retrieve identifiers and filenames
self._identifiers = self._dictionary.keys()
##
# Print dictionary line by line
# \date 2016-12-06 19:47:12+0000
#
# \param self The object
#
def print_dictionary(self):
# Print in an alphabetical order
print("Content of current dictionary:")
keys = sorted(self._dictionary.keys())
for i in range(0, len(self._filenames)):
print("\t%s : %s" % (keys[i], self._dictionary[keys[i]]))
def anonymize_files(self, dir_output):
ph.create_directory(dir_output)
filenames_in = [os.path.basename(f) for f in self._filenames]
for i in range(0, len(self._filenames)):
filename_anonymized = self._identifiers[i]
filename_original = self._dictionary[
self._identifiers[i]]
try:
index = filenames_in.index(filename_original)
except ValueError:
raise IOError(
"Given filenames (--filenames) do not match the ones given in the dictionary")
path_to_file_anon = os.path.join(dir_output, filename_anonymized)
cmd = "cp -p "
cmd += self._filenames[index] + " "
cmd += path_to_file_anon + " "
# print(cmd)
ph.execute_command(cmd)
##
# Reveals the anonymization and adds the original filename next to the
# encryption.
# \date 2016-12-06 20:27:23+0000
#
# \param self The object
# \param directory The directory
# \param filename_extension The filename extension
#
# \return revealed filenames as list of strings
#
def reveal_anonymized_files(self, directory):
ph.create_directory(directory)
filenames_revealed = []
for i in range(0, len(self._filenames)):
basename_anonymized = os.path.basename(self._filenames[i])
filename_anonymized = ph.strip_filename_extension(basename_anonymized)[0]
try:
basename_revealed = self._dictionary[basename_anonymized]
except KeyError:
raise IOError("Dictionary does not match given (anonymized) filenames")
filename_revealed = "%s_%s" % (
filename_anonymized, basename_revealed)
# filename_anonymized = self._identifiers[i] + filename_extension
# filename_revealed = self._identifiers[i] + "_" + \
# self._dictionary[self._identifiers[i]] + filename_extension
# filename_revealed = re.sub("_masked_srr", "", filename_revealed)
# path_to_file_anon = os.path.join(directory, filename_anonymized)
path_to_file_reve = os.path.join(directory, filename_revealed)
# if not os.path.isfile(path_to_file_anon):
# print("%s: Nothing to reveal" % (filename_anonymized))
cmd = "cp -p "
cmd += self._filenames[i] + " "
cmd += path_to_file_reve + " "
# print(cmd)
ph.execute_command(cmd)
filenames_revealed.append(filename_revealed)
return filenames_revealed
| python |
#!/usr/bin/env python3
import contextlib
import functools
import re
import itertools
import argparse
import os
import io
import copy
import json
from importlib import resources
from collections import UserDict
from typing import Optional, Sequence, Mapping, Any, IO
# TODO is this actually safe?
import mktcmenu_schemas
import jsonschema
import yaml
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import SafeLoader as Loader, SafeDumper as Dumper
yaml_load = functools.partial(yaml.load, Loader=Loader)
yaml_dump = functools.partial(yaml.dump, Dumper=Dumper, default_flow_style=False)
RE_AUTOID_DELIM = re.compile(r'[\W_]+')
RE_CPP_NAME = re.compile(r'^[A-Za-z_][A-Za-z0-9_]*$')
DESC_SUFFIX = '.tcmdesc.yaml'
MAP_SUFFIX = '.tcmmap.yaml'
SRC_HEADER = '''
/**
* Automatically managed by mktcmenu.
*
* DO NOT manually edit this file. Changes made in this file will be overwritten
* on next descriptor generation.
*/
'''.lstrip()
def parse_args():
p = argparse.ArgumentParser()
p.add_argument('desc', help='Menu descriptor file (*.tcmdesc.yaml).')
p.add_argument('-e', '--eeprom-map', help='Override EEPROM mapping file location (defaults to <descriptor basename without suffix>.tcmmap.yaml).')
p.add_argument('-c', '--eeprom-capacity', type=int, help='Set EEPROM capacity (only used during initialization/defragmentation of the mapping file).')
p.add_argument('-o', '--output-dir', help='Output directory (defaults to <descriptor dirname>/gen).')
p.add_argument('-s', '--source-dir', default='.', help='C++ source directory (defaults to .).')
p.add_argument('-i', '--include-dir', default='.', help='Include directory (defaults to .).')
p.add_argument('-p', '--pgmspace', action='store_true', default=False, help='Enable pgmspace support for some Arduino platforms (e.g. avr8 and esp8266).')
return p, p.parse_args()
# C++ code emitter helpers
def emit_cppdef(buf, name, type_, is_static=False, is_const=False, is_constexpr=False, is_extern=False, nmemb=-1, init=False, extra_decl=tuple()):
extern_kw = 'extern ' if is_extern else ''
static_kw = 'static ' if is_static else ''
const_kw = 'const ' if is_const else ''
constexpr_kw = 'constexpr ' if is_constexpr else ''
extra_decl_str = f' {" ".join(extra_decl)}' if len(extra_decl) != 0 else ''
if nmemb < 0:
nmemb_str = ''
elif nmemb == 0:
nmemb_str = '[]'
else:
nmemb_str = f'[{nmemb}]'
buf.write(f'{extern_kw}{static_kw}{constexpr_kw}{const_kw}{type_} {name}{nmemb_str}{extra_decl_str}{" = " if init else ""}')
def emit_cppeol(buf):
buf.write(';\n')
@contextlib.contextmanager
def emit_cppobjarray(buf, multiline=False):
buf.write('{')
buf.write('\n' if multiline else ' ')
try:
yield buf
finally:
buf.write('\n' if multiline else ' ')
buf.write('}')
def emit_cppindent(buf, level=1):
buf.write(' ' * level)
def cppstr(str_):
str_escaped = str(str_).replace('"', r'\"')
return f'"{str_escaped}"'
class EEPROMMap(UserDict):
def __init__(self, capacity=0xffff, reserve=0):
super().__init__()
super().__setitem__('_reserved', {'offset': 0, 'size': 2})
self._auto_index = 2
self.capacity = capacity
self.varstore_bar = self.capacity - reserve
self.spare_segments = {}
@property
def auto_index(self):
return self._auto_index
def auto_allocate(self, name, size):
max_space = min(self.varstore_bar, self.capacity)
offset = self._auto_index
if offset >= 0xffff or offset+size > 0xffff:
raise RuntimeError('EEPROM address space exhausted. Please run defragmentation and bump EEPROM mapping version.')
elif offset >= max_space or offset+size >= max_space:
raise RuntimeError('No space left on EEPROM. Please run defragmentation and bump EEPROM mapping version.')
allocated = {'offset': offset, 'size': size}
super().__setitem__(name, allocated)
self._auto_index += size
return allocated
def check_consistency(self):
pass # TODO perform intersection to find holes/overlaps/oob allocations
@classmethod
def load(cls, fmap: IO[str]):
data = yaml_load(fmap)
obj = cls()
obj.capacity = data['capacity']
obj.varstore_bar = data['varstore-bar']
obj._auto_index = data['auto-index']
if 'vars' in data:
obj.data.clear()
obj.data.update(data['vars'])
if 'spare-segments' in data:
obj.spare_segments.update(data['spare-segments'])
return obj
def save(self, fmap: IO[str]):
data = {
'capacity': self.capacity,
'varstore-bar': self.varstore_bar,
'auto-index': self._auto_index,
'vars': self.data,
}
if len(self.spare_segments) != 0:
data['spare-segments'] = self.spare_segments
yaml_dump(data, fmap)
# Data model for menu entries
class MenuBaseType:
auto_index = 1
serializable = False
cpp_type_prefix = ''
render_callback_parent = ''
def __init__(self, props, alias):
v = functools.partial(self._validate_entry, props)
self._global_index = MenuBaseType.auto_index
MenuBaseType.auto_index += 1
self.id_ = v('id')
self.id_suffix = v('id-suffix')
self.name = v('name', required=True)
self.persistent = v('persistent', default=False)
self.read_only = v('read-only', default=False)
self.local_only = v('local-only', default=False)
self.visible = v('visible', default=True)
self.callback = v('callback')
@staticmethod
def _validate_entry(props, key, required=False, default=None, extra_validation=None):
if required and key not in props:
raise ValueError(f'Required property {key} is missing.')
if required:
value = props[key]
else:
value = props.get(key, default)
if extra_validation is not None:
extra_validation(value)
return value
def emit_code(self, ctx: 'CodeEmitterContext'):
raise NotImplementedError()
def get_serialized_size(self):
raise NotImplementedError()
def get_type_name(self):
raise NotImplementedError()
def emit_default_flags_block(self, buf, namespace: Sequence["MenuBaseType"]):
id_ = self.generate_id()
ns_id = ''.join(ns.generate_id() for ns in namespace)
menu_name = f'menu{ns_id}{id_}'
emit_cppindent(buf, level=1)
if self.read_only:
buf.write(f'{menu_name}.setReadOnly(true);')
if self.local_only:
buf.write(f'{menu_name}.setLocalOnly(true);')
if not self.visible:
buf.write(f'{menu_name}.setVisible(false);')
def emit_simple_static_menu_item(self, ctx: 'CodeEmitterContext', minfo_extra: Sequence[Any], menu_item_extra: Sequence[Any], cpp_type_prefix: Optional[str] = None, cpp_type_prefix_minfo: Optional[str] = None, next_entry_namespace: Sequence["MenuBaseType"] = None):
eeprom_offset = self.find_or_allocate_eeprom_space(ctx.eeprom_map)
id_ = self.generate_id()
ns_id = ''.join(ns.generate_id() for ns in ctx.namespace)
if next_entry_namespace is None:
next_ns_id = ns_id
else:
next_ns_id = ''.join(ns.generate_id() for ns in next_entry_namespace)
minfo_name = f'minfo{ns_id}{id_}'
menu_name = f'menu{ns_id}{id_}'
cpp_type_prefix = self.__class__.cpp_type_prefix if cpp_type_prefix is None else cpp_type_prefix
cpp_type_prefix_minfo = cpp_type_prefix if cpp_type_prefix_minfo is None else cpp_type_prefix_minfo
minfo_type = f'{cpp_type_prefix_minfo}MenuInfo'
menu_type = f'{cpp_type_prefix}MenuItem'
next_name = f'menu{next_ns_id}{ctx.next_entry.generate_id()}' if ctx.next_entry is not None else None
next_name_ref = f'&{next_name}' if next_name is not None else 'nullptr'
minfo_builtin = (cppstr(self.name), self._global_index, hex(eeprom_offset),)
menu_item_first = (f'&{minfo_name}',)
menu_item_last = (next_name_ref,)
emit_cppdef(ctx.bufsrc, minfo_name, minfo_type, is_const=True, is_static=True, extra_decl=('PROGMEM', ) if ctx.use_pgmspace else tuple(), init=True)
with emit_cppobjarray(ctx.bufsrc):
ctx.bufsrc.write(', '.join(map(str, itertools.chain(minfo_builtin, minfo_extra))))
emit_cppeol(ctx.bufsrc)
emit_cppdef(ctx.bufsrc, menu_name, menu_type)
ctx.bufsrc.write(f'({", ".join(map(str, itertools.chain(menu_item_first, menu_item_extra, menu_item_last)))})')
emit_cppeol(ctx.bufsrc)
ctx.bufsrc.write('\n')
emit_cppdef(ctx.bufhdr, menu_name, menu_type, is_extern=True)
emit_cppeol(ctx.bufhdr)
return menu_name
def emit_simple_dynamic_menu_item(self, ctx: 'CodeEmitterContext', menu_item_extra: Sequence[Any], name_prefix: Optional[str] = None, cpp_type_prefix: Optional[str] = None, render_callback_parent: Optional[str] = None, global_index_order: bool = 'after_callback', next_entry_namespace: Sequence["MenuBaseType"] = None, custom_callback_ref: Optional[str] = None):
# global_index_order: first, after_callback, na
eeprom_offset = self.find_or_allocate_eeprom_space(ctx.eeprom_map)
id_ = self.generate_id()
ns_id = ''.join(ns.generate_id() for ns in ctx.namespace)
if next_entry_namespace is None:
next_ns_id = ns_id
else:
next_ns_id = ''.join(ns.generate_id() for ns in next_entry_namespace)
menu_name = f'menu{name_prefix or ""}{ns_id}{id_}'
if custom_callback_ref is None:
render_callback_name = f'fn{ns_id}{id_}RtCall'
else:
render_callback_name = custom_callback_ref
cpp_type_prefix = self.__class__.cpp_type_prefix if cpp_type_prefix is None else cpp_type_prefix
render_callback_parent = self.__class__.render_callback_parent if render_callback_parent is None else render_callback_parent
menu_type = f'{cpp_type_prefix}MenuItem'
next_name = f'menu{next_ns_id}{ctx.next_entry.generate_id()}' if ctx.next_entry is not None else None
next_name_ref = f'&{next_name}' if next_name is not None else 'nullptr'
if global_index_order == 'after_callback':
menu_item_first = (render_callback_name, self._global_index, )
elif global_index_order == 'first':
menu_item_first = (self._global_index, render_callback_name, )
elif global_index_order == 'na':
menu_item_first = (render_callback_name, )
else:
raise ValueError(f'Invalid global_index_order {global_index_order}')
menu_item_last = (next_name_ref, )
if custom_callback_ref is None:
callback_factory_params = ', '.join(map(str, (
render_callback_name, render_callback_parent,
cppstr(self.name), hex(eeprom_offset), self.get_callback_ref()
)))
ctx.bufsrc.write(f'RENDERING_CALLBACK_NAME_INVOKE({callback_factory_params})\n')
emit_cppdef(ctx.bufsrc, menu_name, menu_type)
ctx.bufsrc.write(f'({", ".join(map(str, itertools.chain(menu_item_first, menu_item_extra, menu_item_last)))})')
emit_cppeol(ctx.bufsrc)
ctx.bufsrc.write('\n')
emit_cppdef(ctx.bufhdr, menu_name, menu_type, is_extern=True)
emit_cppeol(ctx.bufhdr)
return menu_name
def get_callback_ref(self):
return 'NO_CALLBACK' if self.callback is None or len(self.callback) == 0 else f'{self.callback}'
def generate_id(self):
if self.id_ is not None:
id_ = self.id_
else:
id_ = ''.join(w.capitalize() for w in RE_AUTOID_DELIM.split(self.name))
#id_ = f'{id_}{self.get_type_name()}{self.id_suffix if self.id_suffix is not None else ""}'
id_ = f'{id_}{self.id_suffix if self.id_suffix is not None else ""}'
return id_
def find_or_allocate_eeprom_space(self, eeprom_map: EEPROMMap):
id_ = self.generate_id()
if self.__class__.serializable and self.persistent and id_ in eeprom_map:
offsize = eeprom_map[id_]
if offsize['size'] == self.get_serialized_size():
return offsize['offset']
else:
# TODO maybe give a warning about this?
del eeprom_map[id_]
new_offsize = eeprom_map.auto_allocate(id_, self.get_serialized_size())
return new_offsize['offset']
elif self.persistent:
offsize = eeprom_map.auto_allocate(id_, self.get_serialized_size())
return offsize['offset']
else:
return 0xffff
def list_callbacks(self):
return {('on_change', self.callback)} if self.callback is not None else set()
class CodeEmitterContext:
def __init__(self, bufsrc: IO[str], bufhdr: IO[str], eeprom_map: EEPROMMap, namespace: Sequence[MenuBaseType], next_entry: MenuBaseType, use_pgmspace: bool):
self.bufsrc = bufsrc
self.bufhdr = bufhdr
self.eeprom_map = eeprom_map
self.namespace = namespace
self.next_entry = next_entry
self.use_pgmspace = use_pgmspace
class AnalogType(MenuBaseType):
serializable = True
cpp_type_prefix = 'Analog'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
max_ = v('max', default=None)
min_ = v('min', default=None)
self.precision = v('precision', default=None)
self.offset = v('offset', default=None)
self.divisor = v('divisor', default=1)
self.unit = v('unit')
if self.offset is None and min_ is None:
self.offset = 0
elif self.offset is None:
self.offset = min_
elif self.offset is not None and min_ is not None:
raise ValueError('Offset and min are mutually exclusive.')
if self.precision is None and max_ is None:
raise ValueError(f'One of precision or max must be specified.')
elif self.precision is None:
self.precision = max_ - self.offset
elif self.precision is not None and max_ is not None:
raise ValueError('Precision and max are mutually exclusive.')
def get_serialized_size(self):
return 2
def get_type_name(self):
return 'I'
def emit_code(self, ctx: CodeEmitterContext):
self.emit_simple_static_menu_item(ctx, (
self.precision, self.get_callback_ref(), self.offset, self.divisor,
cppstr(self.unit) if self.unit is not None else cppstr(""),
), (
0,
))
class LargeNumberType(MenuBaseType):
serializable = True
cpp_type_prefix = 'EditableLargeNumber'
render_callback_parent = 'largeNumItemRenderFn'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
self.decimal_places = v('decimal-places', default=0)
self.length = v('length', default=12)
self.signed = v('signed', default=False)
def get_serialized_size(self):
# TODO is this 7 or 8?
# https://github.com/davetcc/tcMenuLib/blob/3d4ae0621df020c3919e3512a5c33b9b5a1cef6f/src/EepromItemStorage.cpp#L37-L41
# The source shows 7 (sign byte+12 nibbles) but the editor shows 8
return 7
def get_type_name(self):
return f'LN'
def emit_code(self, ctx: CodeEmitterContext):
self.emit_simple_dynamic_menu_item(ctx, (
self.length, self.decimal_places, str(self.signed).lower(),
), global_index_order='after_callback')
class FloatType(MenuBaseType):
cpp_type_prefix = 'Float'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
self.decimal_places = v('decimal-places', default=2)
def get_serialized_size(self):
raise ValueError('FloatType is not serializable')
def get_type_name(self):
return f'F'
def emit_code(self, ctx: CodeEmitterContext):
self.emit_simple_static_menu_item(ctx, (
self.decimal_places, self.get_callback_ref()
), tuple())
class EnumType(MenuBaseType):
serializable = True
cpp_type_prefix = 'Enum'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
self.options = v('options', required=True)
def get_serialized_size(self):
return 2
def get_type_name(self):
return 'E'
def emit_code(self, ctx: CodeEmitterContext):
ns_id = ''.join(ns.generate_id() for ns in ctx.namespace)
enum_str_name = f'enumStr{ns_id}{self.generate_id()}'
# Write enum item strings
for i, str_ in enumerate(self.options):
emit_cppdef(ctx.bufsrc, f'{enum_str_name}_{i}', 'char', is_const=True, is_static=True, nmemb=0, init=True, extra_decl=('PROGMEM', ) if ctx.use_pgmspace else tuple())
ctx.bufsrc.write(cppstr(str_))
emit_cppeol(ctx.bufsrc)
nmemb = len(self.options)
emit_cppdef(ctx.bufsrc, enum_str_name, 'char * const', is_const=True, is_static=True, nmemb=nmemb, init=True, extra_decl=('PROGMEM', ) if ctx.use_pgmspace else tuple())
with emit_cppobjarray(ctx.bufsrc, multiline=True):
ctx.bufsrc.write(',\n'.join(f' {enum_str_name}_{i}' for i in range(nmemb)))
emit_cppeol(ctx.bufsrc)
# ew
self.emit_simple_static_menu_item(ctx, (
nmemb - 1, self.get_callback_ref(), enum_str_name,
) ,(0, ))
class ScrollChoiceType(MenuBaseType):
serializable = True
cpp_type_prefix = 'ScrollChoice'
render_callback_parent = 'enumItemRenderFn'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
self.item_size = v('item-size', required=True)
self.items = v('items', required=True)
self.data_source = v('data-source', required=True, extra_validation=self._validate_data_source)
self._mode, self._address = self.data_source.split(':')
def get_serialized_size(self):
return 2
def get_type_name(self):
return 'SC'
@staticmethod
def _validate_data_source(ds):
_valid_entry = ('eeprom', 'array-in-eeprom', 'ram', 'array-in-ram', 'custom-renderfn')
ds_split = ds.split(':')
if len(ds_split) != 2:
raise ValueError(f'Invalid ScrollChoiceType data-source {ds} (more than 1 delimiter)')
mode, _address = ds_split
if mode not in _valid_entry:
raise ValueError(f'Invalid ScrollChoiceType mode {mode} (expecting one of {_valid_entry})')
def emit_code(self, ctx: CodeEmitterContext):
if self._mode in ('eeprom', 'array-in-eeprom'):
custom_callback = None
menu_item_extra = (0, ctx.eeprom_map.spare_segments[self._address], self.item_size, self.items)
elif self._mode in ('ram', 'array-in-ram'):
custom_callback = None
menu_item_extra = (0, self._address, self.item_size, self.items)
emit_cppdef(ctx.bufsrc, self._address, 'char *', is_const=True, is_extern=True)
emit_cppeol(ctx.bufsrc)
else:
custom_callback = self._address
menu_item_extra = (0, self.items)
self.emit_simple_dynamic_menu_item(ctx,
menu_item_extra, global_index_order='first',
custom_callback_ref=custom_callback)
def list_callbacks(self):
result = super().list_callbacks()
if self._mode == 'custom-renderfn':
result.add(('on_render', self._address))
return result
class BooleanType(MenuBaseType):
serializable = True
cpp_type_prefix = 'Boolean'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
_default = {
'boolean': 'true-false',
'bool': 'true-false',
'truefalse': 'true-false',
'switch': 'on-off',
'onoff': 'on-off',
'yesno': 'yes-no'
}
self.response = v('response', default=_default[alias], extra_validation=self._validate_response)
def get_serialized_size(self):
return 1
def get_type_name(self):
return 'B'
def emit_code(self, ctx: CodeEmitterContext):
_response_syms = {
'true-false': 'NAMING_TRUE_FALSE',
'on-off': 'NAMING_ON_OFF',
'yes-no': 'NAMING_YES_NO',
}
self.emit_simple_static_menu_item(ctx, (
1, self.get_callback_ref(), _response_syms[self.response],
) ,('false', ))
@staticmethod
def _validate_response(response):
_valid_entry = ('true-false', 'yes-no', 'on-off')
if response not in _valid_entry:
raise ValueError(f'Invalid ScrollChoiceType response {response} (expecting one of {_valid_entry})')
class SubMenuType(MenuBaseType):
cpp_type_prefix = 'Sub'
def __init__(self, props, alias):
super().__init__(props, alias)
v = functools.partial(self._validate_entry, props)
self.items = tuple(map(parse_tcdesc_yaml_object, v('items', required=True)))
self.auth = v('auth', default=False)
def get_serialized_size(self):
raise ValueError('SubMenuType is not serializable')
def get_type_name(self):
return f'M'
def emit_code(self, ctx: CodeEmitterContext):
# TODO
subctx = copy.copy(ctx)
subctx.namespace = ctx.namespace + (self, )
for i, subitem in enumerate(self.items):
subctx.next_entry = self.items[i+1] if len(self.items) > i+1 else None
subitem.emit_code(subctx)
backctx = copy.copy(ctx)
backctx.next_entry = self.items[0]
back_name = self.emit_simple_dynamic_menu_item(
backctx,
tuple(),
# Try to avoid name collision
name_prefix='back',
cpp_type_prefix='Back',
render_callback_parent='backSubItemRenderFn',
global_index_order='na',
next_entry_namespace=subctx.namespace,
)
self.emit_simple_static_menu_item(ctx, (
0, self.get_callback_ref(),
), (f'&{back_name}', ))
def list_callbacks(self):
callback_list = super().list_callbacks()
for item in self.items:
callback_list.update(item.list_callbacks())
return callback_list
class ActionType(MenuBaseType):
cpp_type_prefix = 'Action'
def get_serialized_size(self):
raise ValueError('ActionType is not serializable')
def emit_code(self, ctx: CodeEmitterContext):
# seriously having a codegen is not an excuse for inconsistent API design
self.emit_simple_static_menu_item(ctx, (
0, self.get_callback_ref(),
), tuple(), cpp_type_prefix_minfo='Any')
YAML_TAG_SUFFIXES: Mapping[str, MenuBaseType] = {
'analog': AnalogType,
'fixed': AnalogType,
'number': AnalogType,
'large-number': LargeNumberType,
'bcd': LargeNumberType,
'float': FloatType,
'enum': EnumType,
'option': EnumType,
'static-option': EnumType,
'scroll-choice': ScrollChoiceType,
'scroll': ScrollChoiceType,
'dynamic-option': ScrollChoiceType,
'boolean': BooleanType,
'bool': BooleanType,
'truefalse': BooleanType,
'switch': BooleanType,
'onoff': BooleanType,
'yesno': BooleanType,
'submenu': SubMenuType,
'menu': SubMenuType,
'action': ActionType,
# 'programmable-menu': ListType,
# 'list': ListType,
# 'multi-part': MultiPartType,
# 'struct': MultiPartType,
# 'str': MultiPartType,
# 'ipv4': MultiPartType,
# 'time-24h': MultiPartType,
# 'time-12h': MultiPartType,
# 'date': MultiPartType,
# 'color': ColorType,
# 'rgb': ColorType,
# 'rgba': ColorType,
}
#def tcdesc_multi_constructor(loader: yaml.Loader, tag_suffix, node):
# if tag_suffix in YAML_TAG_SUFFIXES:
# node_parsed = loader.construct_mapping(node)
# else:
# raise RuntimeError(f'Unknown TCMenu menu entry type {tag_suffix}')
# return YAML_TAG_SUFFIXES[tag_suffix](node_parsed, alias=tag_suffix)
#yaml.add_multi_constructor('!tcm/', tcdesc_multi_constructor, Loader=Loader)
def parse_tcdesc_yaml_object(obj: Mapping):
if obj['type'] in YAML_TAG_SUFFIXES:
constructor = YAML_TAG_SUFFIXES[obj['type']]
return constructor(obj, obj['type'])
else:
raise RuntimeError(f'Unknown TCMenu menu entry type {obj["type"]}')
# TODO change paths to path-like?
def do_codegen(desc_path: str, out_dir: str, source_dir: str, include_dir: str, instance_name: str, eeprom_map: EEPROMMap, use_pgmspace: bool):
# Load schema
with resources.open_text(mktcmenu_schemas, 'tcmdesc.schema.json') as f:
desc_schema = json.load(f)
full_source_dir = os.path.normpath(os.path.join(out_dir, source_dir))
full_include_dir = os.path.normpath(os.path.join(out_dir, include_dir))
os.makedirs(full_source_dir, exist_ok=True)
if full_source_dir != full_include_dir:
os.makedirs(full_include_dir, exist_ok=True)
menu_header_name = f'{instance_name}.h'
menu_source_name = f'{instance_name}_desc.cpp'
callback_header_name = f'{instance_name}_callback.h'
extra_header_name = f'{instance_name}_extra.h'
menu_header_path = os.path.join(out_dir, include_dir, menu_header_name)
menu_source_path = os.path.join(out_dir, source_dir, menu_source_name)
callback_header_path = os.path.join(out_dir, include_dir, callback_header_name)
extra_header_path = os.path.join(out_dir, include_dir, extra_header_name)
with open(desc_path, 'r') as f:
desc = yaml_load(f)
jsonschema.validate(desc, desc_schema)
bufsrc = io.StringIO()
bufhdr = io.StringIO()
namespace = tuple()
callback_list = set()
with open(menu_source_path, 'w') as bufsrc, open(menu_header_path, 'w') as bufhdr:
# Output header
bufsrc.write(SRC_HEADER)
bufhdr.write(SRC_HEADER)
bufsrc.write('\n')
bufhdr.write('\n')
# Output includes
if use_pgmspace:
bufsrc.write('#include <Arduino.h>\n')
bufsrc.write('#include <tcMenu.h>\n')
bufsrc.write(f'#include "{menu_header_name}"\n\n')
bufhdr.write('#pragma once\n')
bufhdr.write('#include <tcMenu.h>\n\n')
bufhdr.write(f'#include "{callback_header_name}"\n')
bufhdr.write(f'#include "{extra_header_name}"\n\n')
# Output application info
emit_cppdef(bufsrc, 'applicationInfo', 'ConnectorLocalInfo', is_const=True, extra_decl=('PROGMEM', ) if use_pgmspace else tuple(), init=True)
with emit_cppobjarray(bufsrc):
bufsrc.write(f'{cppstr(desc["name"])}, {cppstr(desc["uuid"])}')
emit_cppeol(bufsrc)
bufsrc.write('\n')
emit_cppdef(bufhdr, 'applicationInfo', 'ConnectorLocalInfo', is_const=True, is_extern=True)
emit_cppeol(bufhdr)
ctx = CodeEmitterContext(bufsrc, bufhdr, eeprom_map, namespace, None, use_pgmspace)
parsed_items = tuple(map(parse_tcdesc_yaml_object, desc['items']))
# Output menu descriptor
for i, item in enumerate(parsed_items):
ctx.next_entry = parsed_items[i+1] if len(parsed_items) > i+1 else None
item.emit_code(ctx)
callback_list.update(item.list_callbacks())
# Define a getter for the root of menu descriptor
bufhdr.write(f'constexpr MenuItem *getRootMenuItem() {{ return &menu{parsed_items[0].generate_id()}; }}\n')
bufhdr.write('\n')
# Define menu property initializer
emit_cppdef(bufsrc, 'setupMenuDefaults', 'void')
bufsrc.write('() ')
with emit_cppobjarray(bufsrc, multiline=True):
for item in parsed_items:
item.emit_default_flags_block(bufsrc, namespace)
emit_cppdef(bufhdr, 'setupMenuDefaults', 'void')
bufhdr.write('()')
emit_cppeol(bufhdr)
# Generate callback header
with open(callback_header_path, 'w') as bufcb:
bufcb.write(SRC_HEADER)
bufcb.write('\n')
bufcb.write('#pragma once\n')
bufcb.write('#include <tcMenu.h>\n')
bufcb.write('#include <stdint.h>\n\n')
callback_overlap_check = {}
for cb_type, cb_ref in callback_list:
if cb_ref in callback_overlap_check:
raise RuntimeError(f'Callback {cb_ref} conflicts with other callbacks.')
callback_overlap_check[cb_ref] = cb_type
if cb_type == 'on_change':
bufcb.write(f'void {cb_ref}(int id);\n')
elif cb_type == 'on_render':
bufcb.write(f'int {cb_ref}(RuntimeMenuItem* item, uint8_t row, RenderFnMode mode, char* buffer, int bufferSize);\n')
with open(extra_header_path, 'w') as bufext:
# TODO: Make this dynamic?
bufext.write(SRC_HEADER)
bufext.write('\n')
bufext.write('#pragma once\n')
bufext.write('#include <ScrollChoiceMenuItem.h>\n')
bufext.write('#include <EditableLargeNumberMenuItem.h>\n')
if __name__ == '__main__':
p, args = parse_args()
desc_dirname, desc_basename = os.path.split(args.desc)
is_standard_suffix = len(desc_basename) > len(DESC_SUFFIX) and desc_basename.endswith(DESC_SUFFIX)
desc_instance_name = desc_basename[:-len(DESC_SUFFIX)] if is_standard_suffix else os.path.splitext(desc_basename)[0]
out_dir = args.output_dir if args.output_dir is not None else os.path.join(desc_dirname, 'gen')
if args.eeprom_map is not None:
eeprom_map_file = args.eeprom_map
else:
eeprom_map_file = os.path.join(desc_dirname, f'{desc_instance_name}{MAP_SUFFIX}')
if os.path.isfile(eeprom_map_file):
with open(eeprom_map_file, 'r') as f:
eeprom_map = EEPROMMap.load(f)
if args.eeprom_capacity is not None and args.eeprom_capacity != eeprom_map.capacity:
print('WARNING: Ignoring --eeprom-capacity and using the capacity specified in the mapping file.')
else:
if args.eeprom_capacity is None:
p.error('--eeprom-capacity must be specified when initializing the mapping file.')
eeprom_map = EEPROMMap(args.eeprom_capacity)
do_codegen(args.desc, out_dir, args.source_dir, args.include_dir, desc_instance_name, eeprom_map, args.pgmspace)
with open(eeprom_map_file, 'w') as f:
eeprom_map.save(f)
| python |
#! /usr/bin/env python3
import subprocess
import sys
from config_loader import ConfigLoader
from write_tfvars import TfVarWriter
from setup_class_loader import load_class
"""
Setup.py sets up and runs the initial terraform deployment. It's broken into
3 parts:
1) Load and Validate Inputs
2) Run Setup scripts
3) Terraform Init/Plan/Apply
The script generates a .tfvars file that is used to deploy via terraform.
"""
###############################################################################
# Load and Validate Inputs
###############################################################################
## Load the Config and Definitions
config_loader = ConfigLoader()
is_valid, validation_errors = config_loader.load_config()
if not is_valid:
new_line = '\n\t'
exit(f"Found the following validation errors: {new_line}{f'{new_line}'.join(validation_errors)}")
###############################################################################
# Load Setup Class for the specific template directory
###############################################################################
template_dir = config_loader.get_template_dir()
Setup = load_class(template_dir)
template_setup = Setup(config_loader)
template_setup.setup_log_file()
current_user_function = subprocess.run([
"/bin/bash", "-c",
f"source cloud/azure/bin/lib.sh && azure::get_current_user_id"
], capture_output=True)
if current_user_function:
current_user = current_user_function.stdout.decode("ascii")
image_tag = config_loader.get_config_var("IMAGE_TAG")
log_args = f"\"{image_tag}\" {current_user}"
try:
template_setup.pre_terraform_setup()
###############################################################################
# Terraform Init/Plan/Apply
###############################################################################
terraform_tfvars_path = f"{template_dir}/{config_loader.tfvars_filename}"
# Write the passthrough vars to a temporary file
tf_var_writter = TfVarWriter(terraform_tfvars_path)
conf_variables = config_loader.get_terraform_variables()
tf_var_writter.write_variables(conf_variables)
# Note that the -chdir means we use the relative paths for
# both the backend config and the var file
terraform_init_args = [
"terraform",
f"-chdir={template_dir}",
"init",
]
if config_loader.use_backend_config():
terraform_init_args.append(f"-backend-config={config_loader.backend_vars_filename}")
subprocess.check_call(terraform_init_args)
subprocess.check_call([
"terraform",
f"-chdir={template_dir}",
"apply",
f"-var-file={config_loader.tfvars_filename}"
])
###############################################################################
# Post Run Setup Tasks (if needed)
###############################################################################
if template_setup.requires_post_terraform_setup():
template_setup.post_terraform_setup()
subprocess.check_call([
"terraform",
f"-chdir={template_dir}",
"apply",
f"-var-file={config_loader.tfvars_filename}"
])
subprocess.run([
"/bin/bash", "-c",
f"source cloud/shared/bin/lib.sh && LOG_TEMPFILE={template_setup.log_file_path} log::deploy_succeeded {log_args}"
], check=True)
except:
subprocess.run([
"/bin/bash", "-c",
f"source cloud/shared/bin/lib.sh && LOG_TEMPFILE={template_setup.log_file_path} log::deploy_failed {log_args}"
], check=True)
print("Deployment Failed :(", file=sys.stderr)
finally:
template_setup.cleanup()
| python |
def spam(divide_by):
return 42 / divide_by
print(spam(0))
"""
Traceback (most recent call last):
File "/Users/moqi/Documents/Code/automate-the-boring-stuff/c03/p053_zero_devide.py", line 5, in <module>
print(spam(0))
File "/Users/moqi/Documents/Code/automate-the-boring-stuff/c03/p053_zero_devide.py", line 2, in spam
return 42 / divide_by
ZeroDivisionError: division by zero
"""
| python |
class Line(object):
def __init__(self, line_num, line_real,line_altered):
self.num = line_num
self.real = line_real
self.altered = line_altered
def __repr__(self):
return str(self.num)+": "+self.real.rstrip()
| python |
## To use this example:
# curl -d '{"name": "John Doe"}' localhost:8000
from sanic import Sanic
from sanic.response import html
from jinja2 import Template
template = Template('Hello {{ name }}!')
app = Sanic(__name__)
#
# 异步响应:
# - 使用 jinja2 模板:
#
@app.route('/')
async def test(request):
data = request.json
return html(template.render(**data)) # 模板页面渲染
app.run(host="0.0.0.0", port=8000)
| python |
# -*- coding=utf-8 -*-
import random
import os,pickle
import pygame
from globals import *
from matrix import Matrix
class VirtualHintBox(object):
pid = 0
block_manage=None
next_block= None
def __init__(self, pid, block_manage):
#print pid
self.pid = pid
self.block_manage = block_manage
def take_block(self):
block = self.next_block
if block is None: # make first block
block = self.block_manage.get_block(self.pid)
self.next_block = self.block_manage.get_block(self.pid)
return block
def paint(self):
pass
class HintBox(VirtualHintBox):
def __init__(self, bg, block_size, position, block_manage):
super(HintBox, self).__init__(0, block_manage)
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
self.block_manage = block_manage
def paint(self):
mid_x = self._x + self._width / 2
pygame.draw.line(self._bg, self._bgcolor, [mid_x, self._y], [mid_x, self._y + self._height], self._width)
bz = self._block_size
if self.next_block:
arr = self.next_block.get_rect_arr()
minx, miny = arr[0]
maxx, maxy = arr[0]
for x, y in arr:
if x < minx: minx = x
if x > maxx: maxx = x
if y < miny: miny = y
if y > maxy: maxy = y
w = (maxx - minx) * bz
h = (maxy - miny) * bz
cx = self._width / 2 - w / 2 - minx * bz - bz / 2
cy = self._height / 2 - h / 2 - miny * bz - bz / 2
for rect in arr:
x, y = rect
pygame.draw.line(self._bg, self.next_block.color,
[self._x + x * bz + cx + bz / 2, self._y + cy + y * bz],
[self._x + x * bz + cx + bz / 2, self._y + cy + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255],
[self._x + x * bz + cx, self._y + y * bz + cy, bz + 1, bz + 1], 1)
class ScoreBox(object):
total_score = 0
high_score = 0
db_file = 'tetris.db'
def __init__(self, bg, block_size, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
if os.path.exists(self.db_file): self.high_score = pickle.load(open(self.db_file, 'rb'))
def paint(self):
myfont = get_user_font(24)
white = 255, 255, 255
textImage = myfont.render(LanguageLib.instance().get_text('high') + ': %06d' % (self.high_score), True, white)
self._bg.blit(textImage, (self._x, self._y - 10))
textImage = myfont.render(LanguageLib.instance().get_text('score') + ':%06d' % (self.total_score), True, white)
self._bg.blit(textImage, (self._x, self._y + 20))
def add_score(self, score):
self.total_score += score
if self.total_score > self.high_score:
self.high_score = self.total_score
pickle.dump(self.high_score, open(self.db_file, 'wb+'))
class VirtualScoreBox(object):
total_score = 0
def __init__(self, bg, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._bgcolor = [0, 0, 0]
def paint(self):
myfont = get_user_font(16)
white = 255, 255, 255
textImage = myfont.render(LanguageLib.instance().get_text('player2 score') + ':%06d' % (self.total_score), True, white)
self._bg.blit(textImage, (self._x, self._y))
def add_score(self, score):
self.total_score += score
class Panel(object):
attack_num = 0
block_id = 0
rect_arr = []
moving_block = None
hint_box = None
score_box = None
def __init__(self, bg, block_size, position):
self._bg = bg;
self._x, self._y, self._width, self._height = position
self._block_size = block_size
self._bgcolor = [0, 0, 0]
self.block_id = 0
self.rect_arr = []
self.moving_block = None
def get_rect_matrix(self):
matrix = Matrix(ROW_COUNT, COL_COUNT)
for rect_info in self.rect_arr:
matrix.set_val(rect_info.x, rect_info.y, 1)
return matrix
def add_block(self, block):
#print block.get_rect_arr()
for x, y in block.get_rect_arr():
self.rect_arr.append(RectInfo(x, y, block.color))
#print len(self.rect_arr)
def create_move_block(self):
self.block_id += 1
block = self.hint_box.take_block()
# block = create_block()
block.move(COL_COUNT / 2 - 2, -2) # move block to top center
self.moving_block = block
def check_overlap(self, diffx, diffy, check_arr=None):
if check_arr is None: check_arr = self.moving_block.get_rect_arr()
for x, y in check_arr:
for rect_info in self.rect_arr:
if x + diffx == rect_info.x and y + diffy == rect_info.y:
return True
return False
def control_block(self, diffx, diffy):
if self.moving_block.can_move(diffx, diffy) and not self.check_overlap(diffx, diffy):
self.moving_block.move(diffx, diffy)
def change_block(self):
if self.moving_block:
new_arr = self.moving_block.change()
if new_arr and not self.check_overlap(0, 0, check_arr=new_arr):
self.moving_block.rect_arr = new_arr
def move_block(self):
if self.moving_block is None: self.create_move_block()
if self.moving_block.can_move(0, 1) and not self.check_overlap(0, 1):
self.moving_block.move(0, 1)
return 1
else:
self.add_block(self.moving_block)
self.check_clear()
for rect_info in self.rect_arr:
if rect_info.y < 0: return 9 # gameover
self.create_move_block()
return 2
def check_clear(self):
tmp_arr = [[] for i in range(20)]
for rect_info in self.rect_arr:
if rect_info.y < 0: return
tmp_arr[rect_info.y].append(rect_info)
clear_num = 0
clear_lines = set([])
y_clear_diff_arr = [[] for i in range(20)]
for y in range(19, -1, -1):
if len(tmp_arr[y]) == 10:
clear_lines.add(y)
clear_num += 1
y_clear_diff_arr[y] = clear_num
if clear_num > 0:
new_arr = []
for y in range(19, -1, -1):
if y in clear_lines: continue
tmp_row = tmp_arr[y]
y_clear_diff = y_clear_diff_arr[y]
for rect_info in tmp_row:
# new_arr.append([x,y+y_clear_diff])
new_arr.append(RectInfo(rect_info.x, rect_info.y + y_clear_diff, rect_info.color))
self.rect_arr = new_arr
score = SCORE_MAP[clear_num - 1]
self.score_box.add_score(score)
def get_attach_num(self):
if self.score_box.total_score / 1000 > self.attack_num:
self.attack_num += 1
return 1
else:
return 0
def add_hinder(self):
hinder_lines = 2
for tmp in self.rect_arr:
tmp.y -= hinder_lines
for y in range(hinder_lines):
arr = range(10)
for i in range(5):
n = random.randint(0, len(arr) - 1)
arr.pop(n)
for x in arr:
self.rect_arr.append(RectInfo(x, 19 - y, [0, 0, 255]))
def paint(self):
mid_x = self._x + self._width / 2
pygame.draw.line(self._bg, self._bgcolor, [mid_x, self._y], [mid_x, self._y + self._height],
self._width)
bz = self._block_size
for rect_info in self.rect_arr:
x = rect_info.x
y = rect_info.y
pygame.draw.line(self._bg, rect_info.color, [self._x + x * bz + bz / 2, self._y + y * bz],
[self._x + x * bz + bz / 2, self._y + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255], [self._x + x * bz, self._y + y * bz, bz + 1, bz + 1], 1)
if self.moving_block:
for rect in self.moving_block.get_rect_arr():
x, y = rect
pygame.draw.line(self._bg, self.moving_block.color, [self._x + x * bz + bz / 2, self._y + y * bz],
[self._x + x * bz + bz / 2, self._y + (y + 1) * bz], bz)
pygame.draw.rect(self._bg, [255, 255, 255], [self._x + x * bz, self._y + y * bz, bz + 1, bz + 1], 1)
self.score_box.paint()
self.hint_box.paint() | python |
from app import celery
from celery.utils.log import get_task_logger
from bridge.bridge_manager import BridgeManager
from models.modelDetail import AiModelDetail
from models.receiveJobs import ReceiveJobs
from models.category import Category
from models.subcategory import SubCategory
from models.compliance import ShelfCompliance
from utilities.category_Detail import CategoryDetail
from utilities.category_Response import CategoryResponse
from utilities.brand_Response import BrandResponse
from utilities.complex_encoder import ComplexEncoder
from utilities.rectangle2 import Rectangle2
from utilities.point import Point
from utilities.geometery_operation import is_point_within_dist_of_rect
from utilities.geometery_operation import rectangle_contain
from utilities.compliance_meta import ComplianceMetaData
from utilities.constant import JOB_STATUS_DONE, JOB_STATUS_ERROR, JOB_STATUS_INSERTED, JOB_STATUS_PENDING, JOB_STATUS_COMMUNICATION_ERROR
from utilities.common import get_url
import requests
import json
logger = get_task_logger(__name__)
def build_shelf_compliance(model_response_json, shelf_compliance):
# collection of brand with coordinates
# sample data formate
# [item_or_brand_name, x, y, h, w]
brand_tags_xy_data = model_response_json["MetaData"]
print_debug_detail(f"{brand_tags_xy_data}")
compliance_collection = []
shelf_coordinate_object = None
for each_shelf in shelf_compliance:
compliance_items = each_shelf.complianceItem.split(",")
print_debug_info(f"Shelf Name and Tag:- {each_shelf.shelfName, each_shelf.shelfTag}")
#get main shelf coordinate detail
for single_item_coordinate in brand_tags_xy_data:
if single_item_coordinate[0] == each_shelf.shelfTag:
print_debug_info(f"Actual Shelf Name is:- {single_item_coordinate[0]}")
shelf_coordinate_object = single_item_coordinate
break
print_debug_detail(f"Shelf object -> {shelf_coordinate_object}")
if shelf_coordinate_object is not None:
#creat shelf Rectangle object
#logger.info(f"{shelf_coordinate_object[2]} {float(shelf_coordinate_object[2]+10)}")
shelf_rectangle = Rectangle2(shelf_coordinate_object[1]-1,float(shelf_coordinate_object[2]-1),shelf_coordinate_object[3],shelf_coordinate_object[4])
#logger.info(f"finding shelf rectangle {shelf_rectangle.x,shelf_rectangle.y,shelf_rectangle.w,shelf_rectangle.h}")
find_item_inside_shelf = []
#using loop searh compliance item in the shelf
for each_item_coordinate in brand_tags_xy_data:
predicted_item_name = each_item_coordinate[0]
print_debug_info(f"Inner item Name:- {predicted_item_name}")
#creat searchable item Rectangle object
#find_rectangle = Rectangle(each_item_coordinate[1],each_item_coordinate[2],each_item_coordinate[3],each_item_coordinate[4])
#logger.info(f"item object coordinate -> {find_rectangle.x,find_rectangle.y,find_rectangle.w,find_rectangle.h}")
item_xy_point = Point(each_item_coordinate[1], each_item_coordinate[2])
print_debug_detail(f"Inner item x,y value {each_item_coordinate[1]}, {each_item_coordinate[2]}")
#perform search
is_rect_inside = is_point_within_dist_of_rect(shelf_rectangle, item_xy_point, dist=1)
print_debug_detail(f"Item found inside:- {is_rect_inside}")
if is_rect_inside:
find_item_inside_shelf.append(predicted_item_name)
print_debug_info(f"Inside item found length: {len(find_item_inside_shelf)}")
if len(find_item_inside_shelf) > 0:
#total compliance item formula using intersection of two sets
comp_list_as_set = set(compliance_items)
intersection = comp_list_as_set.intersection(find_item_inside_shelf)
final_intersected_compliance_items = list(intersection)
print_debug_info(f"compliance items list {final_intersected_compliance_items}")
total_compliance_items_count = len(final_intersected_compliance_items)
total_shelf_items_count = len(find_item_inside_shelf)
total_ratio = total_compliance_items_count / total_shelf_items_count
compliance_metadata = ComplianceMetaData(find_item_inside_shelf,
final_intersected_compliance_items,
each_shelf.shelfName,
each_shelf.shelfTag,
total_compliance_items_count,
total_shelf_items_count,
total_ratio,
each_shelf.complianceLevel)
compliance_collection.append(compliance_metadata)
else:
logger.info(f"No Compliance item found")
print_debug_detail(f"loop-end")
else:
logger.info(f"Shelf not found")
print_debug_detail(f"main-loop-end")
json_string = json.dumps([ob.__dict__ for ob in compliance_collection], cls=ComplexEncoder)
print_debug_detail(f"Compliance Json data")
print_debug_detail(f"{json_string}")
print_debug_info(f"exit from build_shelf_compliance")
return json_string
def build_analytics(category_detail_obj, model_response_json):
actual_group_data = None
actual_group_name = []
#build analytics information
category_response = []
#build topline information
topline_response = []
group_data = model_response_json['GroupData']
print_debug_info(f"length of group_data is {len(group_data)}")
for v in group_data:
actual_group_data = json.loads(v)
for each_key in actual_group_data:
actual_group_name.append(each_key['BRAND'])
for cat_obj in category_detail_obj:
tages = cat_obj.tages.split(",")
not_found_brand = list(set(tages)-set(actual_group_name))
found_brand = list(set(tages)-set(not_found_brand))
temp_tags_counter = []
for fb in found_brand:
ag_data_item = next(item for item in actual_group_data if item["BRAND"] == fb)
temp_tags_counter.append(BrandResponse(ag_data_item['BRAND'], ag_data_item['COUNT']))
for nfb in not_found_brand:
temp_tags_counter.append(BrandResponse(nfb,0))
if cat_obj.dataContainer == "Analytics":
print_debug_detail(" Is Analytics Type ")
category_response.append(CategoryResponse(cat_obj.category_name , cat_obj.subcategory_name, temp_tags_counter, cat_obj.show_type))
else:
print_debug_detail(" Is TopLine Type ")
topline_response.append(CategoryResponse(cat_obj.category_name , cat_obj.subcategory_name, temp_tags_counter, cat_obj.show_type))
json_string = json.dumps([ob.__dict__ for ob in category_response], cls=ComplexEncoder)
topline_json_string = json.dumps([ob.__dict__ for ob in topline_response], cls=ComplexEncoder)
print_debug_detail(f"Analytic Json data")
print_debug_detail(f"{json_string}")
print_debug_detail(f"Topline analytic Json data")
print_debug_detail(f"{topline_json_string}")
print_debug_info(f"exit from build_analytics")
return json_string, topline_json_string
def build_analytics_and_compliance(category_detail_obj, model_response, shelf_compliance):
# temp for dev or testing
#response_obj = requests.get("http://knowhow.markematics.net/ReceiveJobs/GetJobDetailById/2")
#logger.info(response_obj.text)
# for dev or testing
#model_response_json = json.loads(response_obj.text)
# for live
model_response_json = json.loads(model_response)
print_debug_detail("model_response json loaded")
print_debug_detail(f"{model_response_json}")
#build analytic json
print_debug_info("Calling build analytics")
analytic_json, topline_json_string = build_analytics(category_detail_obj, model_response_json)
#build compliance json
print_debug_info("Calling build compliance")
compliance_json = build_shelf_compliance(model_response_json, shelf_compliance)
# here rebuild the json object using [GroupData, UngroupData, BrandName, Compliance, Analytics] objects
print_debug_info("Compiling Compliance & Analytics Json response")
json_response = json.dumps({"GroupData":model_response_json['GroupData'],"UngroupData":model_response_json['UngroupData'],"BrandName":model_response_json['BrandName'],"Compliance":compliance_json,"Analytics":analytic_json,"Topline":topline_json_string})
print_debug_detail(json_response)
return json_response
def print_debug_info(data):
is_debug = True
if is_debug:
logger.info(data)
def print_debug_detail(data):
is_debug = True
if is_debug:
logger.info(data)
@celery.task()
def process_image(job_id, model_id, project_id):
model_detail_obj = None
received_job_obj = None
category_detail_obj = []
print_debug_info("process_image_call")
bridge = BridgeManager().get_Instance().get_Bridge()
print_debug_info("getting_model_detail_call")
model_details = bridge.get_db().get_session().query(AiModelDetail).filter(AiModelDetail.modelID == model_id)
for model in model_details:
print_debug_info(f"{model.id} {model.port} {model.url} {model.version} {model.modelJson} {model.status} {model.modelID}")
model_detail_obj = model
logger.info(model_detail_obj)
print_debug_info("getting_job_detail")
received_jobs = bridge.get_db().get_session().query(ReceiveJobs).filter(ReceiveJobs.id == job_id)
for job in received_jobs:
print_debug_info(f"{job.unProcessedImage} {job.uri}")
received_job_obj = job
logger.info(received_job_obj)
print_debug_info("category_and_subcategory_loading")
category_obj = bridge.get_db().get_session().query(Category).filter(Category.projectId == project_id)
print_debug_info("shelf_compliance_loading")
shelf_compliance_obj = bridge.get_db().get_session().query(ShelfCompliance).filter(ShelfCompliance.projectId == project_id)
for category in category_obj:
print_debug_info(f"{category.categoryName}")
sub_category_obj = bridge.get_db().get_session().query(SubCategory).filter(SubCategory.categoryId == category.id)
for sub_category in sub_category_obj:
print_debug_info(f"{sub_category.name}")
category_detail_obj.append(CategoryDetail(category.id, category.categoryName, category.dataContainer, category.categoryDescription, category.showType, sub_category.id, sub_category.name, sub_category.tages))
# temp dev or testing analytics
#build_analytics_and_compliance(category_detail_obj,"",shelf_compliance_obj)
print_debug_info("checking_pending_job_status")
if received_job_obj != None:
# Checking received job status
if received_job_obj.requestStatus.lower() == JOB_STATUS_INSERTED:#len(received_job_obj.requestStatus.lower()) > 0:
print_debug_info(received_job_obj.requestStatus)
print_debug_info(f"Updating status value from Inserted to Pending against {job_id}")
# Update received job status into PENDING
bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_PENDING})
bridge.get_db().get_session().commit()
# Generating image processing request url
request_url = get_url(model_detail_obj.url, model_detail_obj.port, "upload-image")
print_debug_info(f"Generating image processing request url {request_url}")
try:
# Sending image to model for analysis
headers = {'Content-type': 'application/json'}
request_data = {'data_url':received_job_obj.uri,'job_id':job_id}
print_debug_info(f"Request data inside {request_data}")
response_obj = requests.post(request_url, data = json.dumps(request_data), headers=headers)
print_debug_info(response_obj.text)
if response_obj.status_code == 200:
# build live analytic
print_debug_info("> Sending Request for Complianc & Analysis Building")
analytic_data = build_analytics_and_compliance(category_detail_obj, response_obj.text, shelf_compliance_obj)
# Update received job status into DONE
bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_DONE,ReceiveJobs.dataResponse:analytic_data})
bridge.get_db().get_session().commit()
elif response_obj.status_code == 400 or response_obj.status_code == 500:
# Update received job status into ERROR
bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_ERROR,ReceiveJobs.dataResponse:response_obj.status_code})
bridge.get_db().get_session().commit()
except:
# Update received job status into ERROR
bridge.get_db().get_session().query(ReceiveJobs).filter_by(id = job_id).update({ReceiveJobs.requestStatus:JOB_STATUS_COMMUNICATION_ERROR,ReceiveJobs.dataResponse:"Communication Error"})
bridge.get_db().get_session().commit()
else:
print_debug_info(f"Job does not proceed {received_job_obj.requestStatus}")
print_debug_info("updating_pending_job_status")
| python |
class Agent:
def __init__(self, size, velocity, k):
self.size = size
self.velocity = velocity
self.k = k
def model(self, q, t, u):
pass
def controller(self, q, qref, uref):
pass
def bloating(self, n):
pass
def run_model(self, q0, t, qref, uref):
pass | python |
from path import Path
| python |
"""
Task to orchestrate scaling for a ECS Service
"""
import boto3
from decorators import with_logging
ecs = boto3.client("ecs")
@with_logging
def handler(event, context):
cluster = event["Cluster"]
max_tasks = event["DeletionTasksMaxNumber"]
queue_size = event["QueueSize"]
service = event["DeleteService"]
desired_count = min(queue_size, max_tasks)
ecs.update_service(cluster=cluster, service=service, desiredCount=desired_count)
return desired_count
| python |
from .interactive import Interactive
from .hardcoded import Hardcoded
| python |
#
# Copyright (C) 2018 ETH Zurich and University of Bologna
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors: Germain Haugou, ETH ([email protected])
from bridge.default_debug_bridge import *
import time
JTAG_RISCV_IRLEN = 5
JTAG_RISCV_BYPASS = 0x1f
JTAG_SOC_CONFREG_ID = 6
JTAG_SOC_CONFREG = (JTAG_SOC_CONFREG_ID << 5) | (JTAG_RISCV_BYPASS << 0)
JTAG_SOC_CONFREG_WIDTH = 8 + 1
JTAG_SOC_IRLEN = 4
JTAG_IRLEN = JTAG_SOC_IRLEN + JTAG_RISCV_IRLEN
class vega_debug_bridge(debug_bridge):
def __init__(self, config, binaries=[], verbose=False):
super(vega_debug_bridge, self).__init__(config=config, binaries=binaries, verbose=verbose)
self.start_cores = False
self.first_reset = True
self.boot_mode = None
def reset(self, stop=True):
if self.first_reset:
# The first time, we need to wait enough time to let the voltage
# regulator converge
self.get_cable().chip_reset(True, 5000000)
self.first_reset = False
# Reset the chip and tell him we want to load via jtag
# We keep the reset active until the end so that it sees
# the boot mode as soon as it boots from rom
# Use bootsel pad to tell boot code to stop
if stop:
self.get_cable().chip_config(1)
# Due to voltage convergence and so on we need to wait
# 200ms when the reset is low
#self.get_cable().chip_reset(True, 200000000)
self.get_cable().chip_reset(True, 100000000)
# It also takes some time before the JTAG is ready
self.get_cable().chip_reset(False, 4000000)
#self.get_cable().jtag_reset(True)
self.get_cable().jtag_reset(False)
return 0
def wait_eoc(self):
while True:
value = self.read_32(0x1a1040a0)
if (value >> 31) == 1:
return value & 0x7fffffff
time.sleep(0.1)
def jtag_hyper_boot(self):
self.get_cable().jtag_set_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, ((((2 << 0) | (1<<3)) << 1) | 1) << 1, JTAG_IRLEN)
def jtag_mram_boot(self):
self.get_cable().jtag_set_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, ((((2 << 0) | (2<<3)) << 1) | 1) << 1, JTAG_IRLEN)
def jtag_spim_boot(self):
self.get_cable().jtag_set_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, ((((2 << 0) | (0<<3)) << 1) | 1) << 1, JTAG_IRLEN)
def load_jtag(self, binaries):
if self.verbose:
print ('Loading binary through jtag')
#if self.stop():
# return -1
# Load the binary through jtag
if self.verbose:
print ("Loading binaries")
for binary in binaries:
if self.load_elf(binary=binary):
return 1
return 0
def start(self):
# First stall the core
self.write_dmi(0x10, 0x00000001) # DMACTIVE
self.write_dmi(0x10, 0x03E00001) # HART SEL
self.write_dmi(0x10, 0x83E00001) # HALT REQ
# Wait until it is halted
while True:
status = self.read_dmi(0x11)
if ((status >> 9) & 1) == 1:
break
# Set PC
self.write_dmi(0x04, 0x1c008080) # PC into DATA0
self.write_dmi(0x17, 0x00230000 | 0x7b1) # Abstract cmd to set DPC
# Resume the core
self.write_dmi(0x10, 0x43E00001)
return 0
def clear(self):
self.get_cable().chip_config(0)
def wait_available(self):
boot_mode = 0
if self.boot_mode is not None:
boot_mode = (self.boot_mode << 1) | 1
# Loop until we see bit 0 becoming 1, this will indicate that the
# target is ready to accept bridge requests
while True:
reg_value = self.get_cable().jtag_get_reg(JTAG_SOC_CONFREG, JTAG_SOC_CONFREG_WIDTH, boot_mode, JTAG_IRLEN) >> 1
rt_req = (reg_value >> 1) & 0x7
if rt_req == 4 or rt_req == 1:
break
if self.verbose:
print ("Target is available")
def write_dmi(self, reg, value):
self.write_reg_int(reg, value, 4, 0) # DMACTIVE
def read_dmi(self, reg):
return self.read_reg_int(reg, 4, 0) # DMACTIVE
def stop(self):
return 0 | python |
from .ranges import * # NOQA
| python |
from spade.behaviour import OneShotBehaviour
from spade.message import Message
from driftage.base.conf import getLogger
class FastNotifyContacts(OneShotBehaviour):
_logger = getLogger("fast_notify_contacts")
async def run(self):
"""[summary]
"""
for contact in self.agent.available_contacts.copy():
msg = Message(
to=contact,
body=self.template.body
)
await self.send(msg)
self._logger.debug(f"Sent {self.template.body} to all contacts")
| python |
"""
vg plot command
make plot of flybys using SPICE data
To use, need SPICE kernels - download the following files and put them in the /kernels folder:
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/lsk/naif0012.tls
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/satellites/a_old_versions/jup100.bsp
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/satellites/a_old_versions/sat132.bsp
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/satellites/a_old_versions/ura083.bsp
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/spk/satellites/a_old_versions/nep016-6.bsp
ftp://naif.jpl.nasa.gov/pub/naif/generic_kernels/pck/pck00010.tpc
ftp://naif.jpl.nasa.gov/pub/naif/VOYAGER/kernels/spk/Voyager_1.a54206u_V0.2_merged.bsp
ftp://naif.jpl.nasa.gov/pub/naif/VOYAGER/kernels/spk/Voyager_2.m05016u.merged.bsp
"""
import os
import os.path
import math
import spiceypy as spice
import config
import lib
import libimg
import libspice
def loadSpice():
"""
load SPICE kernels (data files)
see above for sources
"""
spice.furnsh('kernels/naif0012.tls') # leap second data (5kb)
spice.furnsh('kernels/Voyager_1.a54206u_V0.2_merged.bsp') # voyager 1 data (6mb)
spice.furnsh('kernels/Voyager_2.m05016u.merged.bsp') # voyager 2 data (6mb)
spice.furnsh('kernels/jup100.bsp') # jupiter and satellite data (20mb)
spice.furnsh('kernels/sat132.bsp') # saturn and satellite data (63mb)
spice.furnsh('kernels/ura083.bsp') # uranus and satellite data (81mb)
spice.furnsh('kernels/nep016-6.bsp') # neptune and satellite data (9mb)
spice.furnsh('kernels/pck00010.tpc') # planetary constants (radius etc) (120kb)
def plotMap(flyby, positions, minPos):
"plot the map for the given flyby"
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
bodies = flyby.bodies
planet = bodies[0]
observer = bodies[1]
title = observer + ' at ' + planet
# title = observer + ' at ' + planet + ' (' + flyby.date[:4] + ')'
axisMax = flyby.axisMax # km
bgcolor = '0.05' # grayscale 0-1
labelcolor = '0.9' # grayscale 0-1
labelsize = 12 # pts
labeloffset = int(2 * axisMax / 25) # km
# set font etc
# see http://matplotlib.org/users/customizing.html
# mpl.rcParams['legend.fontsize'] = 10
# mpl.rcParams.update({'font.size': 22})
mpl.rcParams['font.size'] = 20
mpl.rcParams['font.family'] = 'Futura-Light'
#axes.titlesize : large # fontsize of the axes title
#axes.labelsize : medium # fontsize of the x any y labels
#xtick.labelsize : medium # fontsize of the tick labels
#legend.fontsize : large
#figure.dpi : 80 # figure dots per inch
mpl.rcParams['figure.dpi'] = 80 # figure dots per inch
mpl.rcParams['figure.figsize'] = (8.26,8) # figure size in inches
mpl.rcParams['figure.edgecolor'] = 'black' # figure edgecolor
mpl.rcParams['savefig.edgecolor'] = 'black' # figure edgecolor when saving
mpl.rcParams['savefig.dpi'] = 125
#savefig.facecolor : white # figure facecolor when saving
fig = plt.figure()
ax = fig.gca(projection='3d',axisbg=bgcolor)
ax.set_title(title,color='w')
# color of bodies, in order
moon = '#ff8000'
colors = ['r','g',moon,moon,moon,moon,moon,moon,moon]
dots = []
dotlabels = []
# draw planet
color = colors[0]
dot, = ax.plot([0],[0],[0],color+'o')
dots.append(dot)
dotlabels.append(bodies[0])
# draw orbit lines and voyager path
i = 0
for body in bodies:
rows = [row[i] for row in positions]
x = [row[0] for row in rows]
y = [row[1] for row in rows]
z = [row[2] for row in rows]
# draw line
linestyle = 'dotted' if body==observer else 'solid'
ax.plot(x, y, z, color='0.3', linestyle=linestyle)
# draw a dot for moon at closest approach
try:
pos = minPos[body]
x = [pos[0]]
y = [pos[1]]
z = [pos[2]]
color = colors[i]
dot, = ax.plot(x,y,z,color=color, marker='o')
dots.append(dot)
dotlabels.append(body)
except:
pass
i += 1
# add legend
# plt.legend(dots, dotlabels, numpoints=1)
# label axes
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
# make it a cube
# ax.set_xlim([-axisMax,axisMax])
# ax.set_ylim([-axisMax,axisMax])
# ax.set_zlim([-axisMax,axisMax])
cx,cy,cz = flyby.axisCenter
ax.set_xlim([cx-axisMax,cx+axisMax])
ax.set_ylim([cy-axisMax,cy+axisMax])
ax.set_zlim([cz-axisMax,cz+axisMax])
# label planet, voyager, moons
# labelcolor = 'w'
ax.text(labeloffset,labeloffset,labeloffset,planet,size=labelsize,color=labelcolor)
for key in minPos:
pos = minPos[key]
x = pos[0]
y = pos[1]
z = pos[2]
ax.text(x+labeloffset,y+labeloffset,z+labeloffset,key,size=labelsize,color=labelcolor)
# draw an arrow at end of voyager's trajectory to indicate direction
# from stackoverflow
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
i = 1 # voyager
rows = [row[i] for row in positions]
x = [row[0] for row in rows]
y = [row[1] for row in rows]
z = [row[2] for row in rows]
dx=[x[-2],x[-1]]
dy=[y[-2],y[-1]]
dz=[z[-2],z[-1]]
a = Arrow3D(dx,dy,dz, mutation_scale=20, lw=1, arrowstyle="-|>", color="w")
ax.add_artist(a)
# each system will have its own view
azim, elev = flyby.azimuthElevation
ax.view_init(azim=azim, elev=elev)
# no axes
plt.axis('off')
# save image without white border
# see http://stackoverflow.com/questions/11837979/
# removing-white-space-around-a-saved-image-in-matplotlib
filename = 'plot-' + planet + '-' + observer.replace(' ','') + '.jpg'
filepath = lib.getFolder('plot') + filename
plt.savefig(filepath, bbox_inches='tight', pad_inches=0.0)
# plt.show()
def vgPlot():
"create plot for each system flyby"
loadSpice()
#. loop through these, save each file to stepxx_maps/map-Jupiter-Voyager1.jpg etc
#. crop each file when done to a square
#. vg titles could use these for titlepage for each system flyby
#. might as well draw info on maps here - Voyager 1 at Jupiter, date, etc - futura font
# note: azimuthElevation values were determined with the plot viewer
class Flyby:
bodies = None
date = None
ndays = None
axisMax = 1e6 # km
axisCenter = (0,0,0)
azimuthElevation = None
flybys = []
flyby = Flyby()
flyby.bodies = ['Jupiter', 'Voyager 1', 'Io', 'Europa', 'Ganymede', 'Callisto']
flyby.date = "1979-03-05"
flyby.ndays = 4
flyby.axisMax = 1e6 # km
flyby.axisCenter = (0.6e6,-0.2e6,0)
flyby.azimuthElevation = (-100,48)
flybys.append(flyby)
flyby = Flyby()
flyby.bodies = ['Saturn', 'Voyager 1','Titan','Enceladus','Rhea','Mimas','Tethys','Dione']
flyby.date = "1980-11-12"
flyby.ndays = 3
flyby.axisMax = 0.6e6 # km
flyby.axisCenter = (-0.4e6,-0.4e6,0)
flyby.azimuthElevation = (80,97)
flybys.append(flyby)
flyby = Flyby()
flyby.bodies = ['Jupiter', 'Voyager 2', 'Io', 'Europa', 'Ganymede', 'Callisto']
flyby.date = "1979-07-09"
flyby.ndays = 5
flyby.axisMax = 1e6 # km
flyby.axisCenter = (-0.2e6,0,0)
flyby.azimuthElevation = (102,107)
flybys.append(flyby)
flyby = Flyby()
flyby.bodies = ['Saturn','Voyager 2','Titan','Enceladus','Rhea','Mimas','Tethys','Dione']
flyby.date = "1981-08-26"
flyby.ndays = 2
flyby.axisMax = 0.6e6 # km
flyby.axisCenter = (-0.2e6,0.1e6,0)
flyby.azimuthElevation = (172,82)
flybys.append(flyby)
flyby = Flyby()
flyby.bodies = ['Uranus','Voyager 2','Ariel','Miranda','Oberon','Titania','Umbriel']
flyby.date = "1986-01-25"
flyby.ndays = 2
flyby.axisMax = 0.4e6 # km
flyby.azimuthElevation = (-82,-7)
flybys.append(flyby)
flyby = Flyby()
flyby.bodies = ['Neptune','Voyager 2','Triton'] # proteus not in kernels
flyby.date = "1989-08-25"
flyby.ndays = 2
flyby.axisMax = 1e6 # km
flyby.azimuthElevation = (-62,40)
flybys.append(flyby)
for flyby in flybys:
planet = flyby.bodies[0]
observer = flyby.bodies[1]
print 'Generating plot for %s at %s' % (observer, planet)
nsteps = 100 # plot density
# get ephemeris time around closest approach (seconds since J2000)
etClosest = int(spice.str2et(flyby.date))
etStart = int(etClosest - flyby.ndays * 24*60*60 / 2)
etEnd = int(etClosest + flyby.ndays * 24*60*60 / 2)
etStep = int((etEnd - etStart) / nsteps)
# initialize data structs
ets = []
positions = []
minDist = {}
minPos = {}
for body in flyby.bodies:
minDist[body] = 9e15
# loop over time range, get positions
for et in xrange(etStart, etEnd, etStep):
row = []
for body in flyby.bodies:
# get position of body (voyager or moon) relative to planet (eg Jupiter).
# position is an (x,y,z) coordinate in the given frame of reference.
frame = 'J2000'
abberationCorrection = 'NONE'
position, lightTime = spice.spkpos(planet, et, frame, abberationCorrection, body)
# save time and position to arrays
ets.append(et)
row.append(position)
# find closest approach of voyager to each body
if body==observer: # voyager
posVoyager = position # save for other bodies
# distance = int(libspice.getDistance(position))
# if distance < minDist[body]:
# minDist[body] = distance
# minPos[body] = position
elif body==planet:
pass
else:
# get distance to voyager, km
posToVoyager = position-posVoyager
distance = int(libspice.getDistance(posToVoyager))
if distance < minDist[body]:
minDist[body] = distance
minPos[body] = position
positions.append(row)
# make the map
plotMap(flyby, positions, minPos)
# all done - clean up the kernels
spice.kclear()
if __name__ == '__main__':
os.chdir('..')
vgPlot()
print 'done'
| python |
import codecs
import hashlib
import json
import os
import tempfile
import unittest
from pathlib import Path
import tifffile
import numpy as np
from slicedimage._compat import fspath
import slicedimage
from slicedimage import ImageFormat
from slicedimage._dimensions import DimensionNames
from tests.utils import build_skeleton_manifest
baseurl = Path(__file__).parent.resolve().as_uri()
class TestWrite(unittest.TestCase):
def test_write_tileset(self):
image = slicedimage.TileSet(
[DimensionNames.X, DimensionNames.Y, "ch", "hyb"],
{'ch': 2, 'hyb': 2},
{DimensionNames.Y: 120, DimensionNames.X: 80},
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
{
DimensionNames.X: (0.0, 0.01),
DimensionNames.Y: (0.0, 0.01),
},
{
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = np.zeros((120, 80))
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
with tempfile.TemporaryDirectory() as tempdir:
with tempfile.NamedTemporaryFile(
suffix=".json", dir=tempdir, delete=False) as partition_file:
partition_file_path = Path(partition_file.name)
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
image, partition_file_path.as_uri())
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
loaded = slicedimage.Reader.parse_doc(
partition_file_path.name, partition_file_path.parent.as_uri())
for hyb in range(2):
for ch in range(2):
tiles = [_tile
for _tile in loaded.tiles(
lambda tile: (
tile.indices['hyb'] == hyb
and tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = np.zeros((100, 100))
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
def test_write_collection(self):
image = slicedimage.TileSet(
[DimensionNames.X, DimensionNames.Y, "ch", "hyb"],
{'ch': 2, 'hyb': 2},
{DimensionNames.Y: 120, DimensionNames.X: 80},
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
{
DimensionNames.X: (0.0, 0.01),
DimensionNames.Y: (0.0, 0.01),
},
{
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = np.zeros((120, 80))
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
collection = slicedimage.Collection()
collection.add_partition("fov002", image)
with tempfile.TemporaryDirectory() as tempdir:
with tempfile.NamedTemporaryFile(
suffix=".json", dir=tempdir, delete=False) as partition_file:
partition_file_path = Path(partition_file.name)
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
collection, partition_file_path.as_uri())
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
loaded = slicedimage.Reader.parse_doc(
partition_file_path.name, partition_file_path.parent.as_uri())
for hyb in range(2):
for ch in range(2):
tiles = [_tile
for _tile in loaded.tiles(
lambda tile: (
tile.indices['hyb'] == hyb
and tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = np.zeros((100, 100))
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
def test_checksum_on_write(self):
"""
Generate a tileset consisting of a single TIFF tile. Load it and then write it back out
as a numpy tile, which should be written with different checksums. Then verify that the
numpy version can load without an error.
"""
# write the tiff file
with tempfile.TemporaryDirectory() as tempdir:
tempdir_path = Path(tempdir)
data = np.random.randint(0, 65535, size=(120, 80), dtype=np.uint16)
file_path = os.path.join(tempdir, "tile.tiff")
with tifffile.TiffWriter(file_path) as tiff:
tiff.save(data)
with open(file_path, "rb") as fh:
checksum = hashlib.sha256(fh.read()).hexdigest()
manifest = build_skeleton_manifest()
manifest['tiles'].append(
{
"coordinates": {
DimensionNames.X.value: [
0.0,
0.0001,
],
DimensionNames.Y.value: [
0.0,
0.0001,
]
},
"indices": {
"hyb": 0,
"ch": 0,
},
"file": "tile.tiff",
"format": "tiff",
"sha256": checksum,
},
)
with open(fspath(tempdir_path / "tileset.json"), "w") as fh:
fh.write(json.dumps(manifest))
image = slicedimage.Reader.parse_doc(
"tileset.json",
tempdir_path.as_uri(),
{"cache": {"size_limit": 0}}, # disabled
)
with tempfile.TemporaryDirectory() as output_tempdir:
with tempfile.NamedTemporaryFile(
suffix=".json", dir=output_tempdir, delete=False) as partition_file:
partition_file_path = Path(partition_file.name)
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
image, partition_file_path.as_uri())
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
partition_file.flush()
loaded = slicedimage.Reader.parse_doc(
partition_file_path.name, partition_file_path.parent.as_uri())
loaded.tiles()[0].numpy_array
def test_write_tiff(self):
image = slicedimage.TileSet(
dimensions=[DimensionNames.X, DimensionNames.Y, "ch", "hyb"],
shape={'ch': 2, 'hyb': 2},
default_tile_shape={DimensionNames.Y: 120, DimensionNames.X: 80},
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
coordinates={
DimensionNames.X: (0.0, 0.01),
DimensionNames.Y: (0.0, 0.01),
},
indices={
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = np.zeros((120, 80), dtype=np.uint32)
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
with tempfile.TemporaryDirectory() as tempdir:
with tempfile.NamedTemporaryFile(
suffix=".json", dir=tempdir, delete=False) as partition_file:
partition_file_path = Path(partition_file.name)
# create the tileset and save it.
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
image, partition_file_path.as_uri(), tile_format=ImageFormat.TIFF)
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
partition_file.flush()
# construct a URL to the tileset we wrote, and load the tileset.
loaded = slicedimage.Reader.parse_doc(
partition_file_path.name, partition_file_path.parent.as_uri())
# compare the tiles we loaded to the tiles we set up.
for hyb in range(2):
for ch in range(2):
tiles = [_tile
for _tile in loaded.tiles(
lambda tile: (
tile.indices['hyb'] == hyb
and tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = np.zeros((120, 80), dtype=np.uint32)
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
def test_multi_directory_write_collection(self):
"""Test that we can write collections with a directory hierarchy."""
image = slicedimage.TileSet(
["x", "y", "ch", "hyb"],
{'ch': 2, 'hyb': 2},
{'y': 120, 'x': 80},
)
for hyb in range(2):
for ch in range(2):
tile = slicedimage.Tile(
{
'x': (0.0, 0.01),
'y': (0.0, 0.01),
},
{
'hyb': hyb,
'ch': ch,
},
)
tile.numpy_array = np.zeros((120, 80))
tile.numpy_array[hyb, ch] = 1
image.add_tile(tile)
collection = slicedimage.Collection()
collection.add_partition("fov002", image)
def partition_path_generator(parent_toc_path, toc_name):
directory = parent_toc_path.parent / toc_name
directory.mkdir()
return directory / "{}.json".format(parent_toc_path.stem)
def tile_opener(tileset_path, tile, ext):
directory_path = tempfile.mkdtemp(dir=str(tileset_path.parent))
return tempfile.NamedTemporaryFile(
suffix=".{}".format(ext),
prefix="{}-".format(tileset_path.stem),
dir=directory_path,
delete=False,
)
with tempfile.TemporaryDirectory() as tempdir:
with tempfile.NamedTemporaryFile(
suffix=".json", dir=tempdir, delete=False) as partition_file:
partition_file_path = Path(partition_file.name)
partition_doc = slicedimage.v0_0_0.Writer().generate_partition_document(
collection, partition_file_path.as_uri(),
partition_path_generator=partition_path_generator,
tile_opener=tile_opener,
)
writer = codecs.getwriter("utf-8")
json.dump(partition_doc, writer(partition_file))
loaded = slicedimage.Reader.parse_doc(
partition_file_path.name, partition_file_path.parent.as_uri())
for hyb in range(2):
for ch in range(2):
tiles = [
_tile
for _tile in loaded.tiles(
lambda tile: (
tile.indices['hyb'] == hyb
and tile.indices['ch'] == ch))]
self.assertEqual(len(tiles), 1)
expected = np.zeros((100, 100))
expected[hyb, ch] = 1
self.assertEqual(tiles[0].numpy_array.all(), expected.all())
self.assertIsNotNone(tiles[0].sha256)
if __name__ == "__main__":
unittest.main()
| python |
{
"cells": [
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"\n",
"import pandas as pd\n",
"import numpy as np\n",
"\n",
"import sqlalchemy\n",
"from sqlalchemy.ext.automap import automap_base\n",
"from sqlalchemy.orm import Session\n",
"from sqlalchemy import create_engine\n",
"\n",
"from flask import Flask, jsonify, render_template\n",
"from flask_sqlalchemy import sqlalchemy\n",
"\n",
"app = Flask(__name__)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///db/bellybutton.sqlite\"\n",
"db = sqlalchemy(app)\n",
"\n",
"# New Model for Databalse\n",
"Base = automap_base()\n",
"# reflect the tables\n",
"Base.prepare(db.engine, reflect=True)\n",
"\n",
"# Save references to each table\n",
"Samples_Metadata = Base.classes.sample_metadata\n",
"Samples = Base.classes.samples"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@app.route(\"/\")\n",
"def index():\n",
" \"\"\"Return the homepage.\"\"\"\n",
" return render_template(\"index.html\")"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@app.route(\"/names\")\n",
"def names():\n",
" \"\"\"Return a list of sample names.\"\"\"\n",
"\n",
" # Use Pandas to perform the sql query\n",
" stmt = db.session.query(Samples).statement\n",
" df = pd.read_sql_query(stmt, db.session.bind)\n",
"\n",
" # Return a list of the column names (sample names)\n",
" return jsonify(list(df.columns)[2:])"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@app.route(\"/metadata/<sample>\")\n",
"def sample_metadata(sample):\n",
" \"\"\"Return the MetaData for a given sample.\"\"\"\n",
" sel = [\n",
" Samples_Metadata.sample,\n",
" Samples_Metadata.ETHNICITY,\n",
" Samples_Metadata.GENDER,\n",
" Samples_Metadata.AGE,\n",
" Samples_Metadata.LOCATION,\n",
" Samples_Metadata.BBTYPE,\n",
" Samples_Metadata.WFREQ,\n",
" ]"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"results = db.session.query(*sel).filter(Samples_Metadata.sample == sample).all()"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
" # Create a dictionary entry for each row of metadata information\n",
"sample_metadata = {}\n",
"for result in results:\n",
" sample_metadata[\"sample\"] = result[0]\n",
" sample_metadata[\"ETHNICITY\"] = result[1]\n",
" sample_metadata[\"GENDER\"] = result[2]\n",
" sample_metadata[\"AGE\"] = result[3]\n",
" sample_metadata[\"LOCATION\"] = result[4]\n",
" sample_metadata[\"BBTYPE\"] = result[5]\n",
" sample_metadata[\"WFREQ\"] = result[6]\n",
"\n",
"print(sample_metadata)\n",
"return jsonify(sample_metadata)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"@app.route(\"/samples/<sample>\")\n",
"def samples(sample):\n",
" \"\"\"Return `otu_ids`, `otu_labels`,and `sample_values`.\"\"\"\n",
" stmt = db.session.query(Samples).statement\n",
" df = pd.read_sql_query(stmt, db.session.bind)\n",
"\n",
" # Filter the data based on the sample number and\n",
" sample_data = df.loc[df[sample] > 1, [\"otu_id\", \"otu_label\", sample]]\n",
"\n",
" # Sort by sample\n",
" sample_data.sort_values(by=sample, ascending=False, inplace=True)\n",
"\n",
" # Format the data to send as json\n",
" data = {\n",
" \"otu_ids\": sample_data.otu_id.values.tolist(),\n",
" \"sample_values\": sample_data[sample].values.tolist(),\n",
" \"otu_labels\": sample_data.otu_label.tolist(),\n",
" }\n",
" return jsonify(data)\n",
"\n",
"\n",
"if __name__ == \"__main__\":\n",
" app.run()\n",
" "
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
" "
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
| python |
#!/usr/bin/env PYTHONHASHSEED=1234 python3
from analysis.utils import inspect
from frontend.utils import inspect # Overwrites!
'frontend' in inspect.__module__
print(inspect.__module__) | python |
try:
from local_settings import *
except ImportError:
pass
from azure.devops.connection import Connection
from msrest.authentication import BasicAuthentication
import threading, queue
class QueryResultStatus():
CHECKING = "Checking"
BUILD_COMPLETE = "Build Complete"
BUILD_IN_PROGRESS = "Building"
class QueryResult():
def __init__(self, result_status=QueryResultStatus.CHECKING):
self.status = result_status
self.last_build = None
self.latest_build = None
self.enable_dev = False
self.enable_stage = False
self.enable_prod = False
self.deploying_dev = False
self.deploying_stage = False
self.deploying_prod = False
self.dev_release = None
self.stage_release = None
self.prod_release = None
class Pipelines():
def __init__(self):
self._poll_thread = None
def get_status(self):
if self._poll_thread is None:
self._poll_thread = PollStatusThread(interval=10)
self._poll_thread.start()
return self._poll_thread._last_result
def approve(self, approve_env):
print("Approve env:" + approve_env)
# Get Release Client
connection = Connection(base_url=ORG_URL, creds=BasicAuthentication('', PAT))
rm_client = connection.clients.get_release_client()
approvals = (rm_client.get_approvals(project=PROJECT, type_filter="preDeploy")).value
releaseApproval = None
for a in approvals:
# print(a.release.name + " awaiting approval to " + a.release_environment.name)
if approve_env == a.release_environment.name:
# Approve this environment
approval = a
approval.status = "approved"
approval.comments = "Approved by DasDeployer big button"
releaseApproval = rm_client.update_release_approval(approval, PROJECT, approval.id)
print("Approved " + releaseApproval.release.name + " to " + releaseApproval.release_environment.name)
return releaseApproval
class PollStatusThread(threading.Thread):
def __init__(self, interval=10):
super(PollStatusThread, self).__init__()
self.daemon = True
self.stoprequest = threading.Event()
self.regularInterval = interval
self.delay = interval
self._connection = Connection(base_url=ORG_URL, creds=BasicAuthentication('', PAT))
self._build_client = self._connection.clients.get_build_client()
self._rm_client = self._connection.clients.get_release_client()
self._last_result = QueryResult()
def start(self):
self.stoprequest.clear()
super(PollStatusThread, self).start()
def stop(self, timeout=10):
self.stoprequest.set()
self.join(timeout)
def join(self, timeout=None):
super(PollStatusThread, self).join(timeout)
if self.is_alive():
assert timeout is not None
raise RuntimeError(
"PollStatusThread failed to die within %d seconds" % timeout)
def run(self):
while True:
# Wait a bit then poll the server again
result = QueryResult()
buildDef = self._build_client.get_definition(PROJECT, BUILD_PIPELINE_ID, include_latest_builds=True)
if buildDef.latest_completed_build.id == buildDef.latest_build.id:
result.status = QueryResultStatus.BUILD_COMPLETE
result.latest_build = buildDef.latest_build
result.last_build = buildDef.latest_completed_build
else:
# A build is in progress
result.status = QueryResultStatus.BUILD_IN_PROGRESS
result.latest_build = buildDef.latest_build
result.last_build = buildDef.latest_completed_build
# Figure out if we should enable approval toggles
# First see if any of the environments are deploying
for e in ENVIRONMENTS:
deployments = (self._rm_client.get_deployments(PROJECT, definition_id=RELEASE_ID, definition_environment_id=ENVIRONMENTS[e], top=1, deployment_status="all")).value
deploy_env = (deployments[0].deployment_status == "inProgress" or deployments[0].operation_status == "QueuedForAgent")
enable_env = (deployments[0].deployment_status == "inProgress" or deployments[0].deployment_status == "notDeployed")
if e == 'Dev':
result.enable_dev = enable_env
result.deploying_dev = deploy_env
result.dev_release = deployments[0].release
elif e == 'Stage':
result.enable_stage = enable_env
result.deploying_stage = deploy_env
result.stage_release = deployments[0].release
elif e == 'Prod':
result.enable_prod = enable_env
result.deploying_prod = deploy_env
result.prod_release = deployments[0].release
#if deploy_env:
# print(deployments[0])
# print(e + ": " + deployments[0].release.name + " - " + deployments[0].deployment_status + " q:" + deployments[0].queued_on.strftime("%Y-%m-%d %H:%M") )
if (self._last_result.status != result.status or
(self._last_result.latest_build is not None and
self._last_result.latest_build.last_changed_date != result.latest_build.last_changed_date
) or
self._last_result.enable_dev != result.enable_dev or
self._last_result.enable_stage != result.enable_stage or
self._last_result.enable_prod != result.enable_prod or
self._last_result.deploying_dev != result.deploying_dev or
self._last_result.deploying_stage != result.deploying_stage or
self._last_result.deploying_prod != result.deploying_prod
):
# Something has changed
print("change")
self._last_result = result
# At the end of the thread execution, wait a bit and then poll again
if self.stoprequest.wait(self.delay):
break
def pipemain():
# Create a connection to the org
connection = Connection(base_url=ORG_URL, creds=BasicAuthentication('', PAT))
# Get the build status
build_client = connection.clients.get_build_client()
buildDef = build_client.get_definition(PROJECT, BUILD_PIPELINE_ID, include_latest_builds=True)
if buildDef.latest_completed_build.id == buildDef.latest_build.id:
print("Build " + buildDef.latest_build.definition.name + " " + buildDef.latest_build.build_number + " " + buildDef.latest_completed_build.result)
else:
# A build is in progress
print("Build " + buildDef.latest_build.definition.name + " " + buildDef.latest_build.build_number + " " + buildDef.latest_completed_build.result + " (" + buildDef.latest_build.status + ")")
# Get Release Client
rm_client = connection.clients.get_release_client()
# See what environments we have and the status of their latest deployments
release = rm_client.get_release_definition(PROJECT, RELEASE_ID)
for e in release.environments:
deployments = (rm_client.get_deployments(PROJECT, definition_id=RELEASE_ID, definition_environment_id=e.id, top=1, deployment_status="all")).value
print(str(e.id) + " - " + e.name + ": " + deployments[0].release.name + " - " + deployments[0].deployment_status )
# Look up pending approvals
approvals = (rm_client.get_approvals(project=PROJECT, type_filter="preDeploy")).value
for a in approvals:
print(a.release.name + " awaiting approval to " + a.release_environment.name)
if len(approvals) > 0:
# Approve one of them
approval = approvals[0]
approval.status = "approved"
approval.comments = "Approved by DasDeployer"
releaseApproval = rm_client.update_release_approval(approval, PROJECT, approval.id)
print("Approved " + releaseApproval.release.name + " to " + releaseApproval.release_environment.name)
| python |
import os
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import SafeConfigParser as ConfigParser
class ConfigParameter(object):
def __init__(self, name, value_type):
self.name = name
self.value_type = value_type
def __repr__(self):
return "ConfigParameter({!r}, {!r})".format(self.name, self.value_type)
def parse(self, section, config_parser):
if int == self.value_type:
return config_parser.getint(section, self.name)
if bool == self.value_type:
return config_parser.getboolean(section, self.name)
if float == self.value_type:
return config_parser.getfloat(section, self.name)
if list == self.value_type:
v = config_parser.get(section, self.name)
return v.split(" ")
return config_parser.get(section, self.name)
def interpret(self, config_dict):
value = config_dict.get(self.name)
if value is None:
raise Exception('Missing configuration item: ' + self.name)
try:
if str == self.value_type:
return str(value)
if int == self.value_type:
return int(value)
if bool == self.value_type:
if "true" == value.lower():
return True
elif "false" == value.lower():
return False
else:
raise Exception(self.name + " must be True or False")
if float == self.value_type:
return float(value)
if list == self.value_type:
return value.split(" ")
except Exception as e:
raise Exception("Error interpreting config item '{}' with value '{}' and type {}".format(
self.name, value, self.value_type))
raise Exception("Unexpected configuration type: " + repr(self.value_type))
def format(self, value):
if list == self.value_type:
return " ".join(value)
return str(value)
def write_pretty_params(f, config, params):
param_names = [p.name for p in params]
longest_name = max(len(name) for name in param_names)
param_names.sort()
params = dict((p.name, p) for p in params)
for name in param_names:
p = params[name]
f.write('{} = {}\n'.format(p.name.ljust(longest_name), p.format(getattr(config, p.name))))
class Config(object):
''' A simple container for user-configurable parameters of NEAT. '''
__params = [ConfigParameter('pop_size', int),
ConfigParameter('fitness_criterion', str),
ConfigParameter('fitness_threshold', float),
ConfigParameter('reset_on_extinction', bool)]
def __init__(self, genome_type, reproduction_type, species_set_type, stagnation_type, filename):
# Check that the provided types have the required methods.
assert hasattr(genome_type, 'parse_config')
assert hasattr(reproduction_type, 'parse_config')
assert hasattr(species_set_type, 'parse_config')
assert hasattr(stagnation_type, 'parse_config')
self.genome_type = genome_type
self.reproduction_type = reproduction_type
self.species_set_type = species_set_type
self.stagnation_type = stagnation_type
if not os.path.isfile(filename):
raise Exception('No such config file: ' + os.path.abspath(filename))
parameters = ConfigParser()
with open(filename) as f:
if hasattr(parameters, 'read_file'):
parameters.read_file(f)
else:
parameters.readfp(f)
# NEAT configuration
if not parameters.has_section('NEAT'):
raise RuntimeError("'NEAT' section not found in NEAT configuration file.")
for p in self.__params:
setattr(self, p.name, p.parse('NEAT', parameters))
# Parse type sections.
genome_dict = dict(parameters.items(genome_type.__name__))
self.genome_config = genome_type.parse_config(genome_dict)
species_set_dict = dict(parameters.items(species_set_type.__name__))
self.species_set_config = species_set_type.parse_config(species_set_dict)
stagnation_dict = dict(parameters.items(stagnation_type.__name__))
self.stagnation_config = stagnation_type.parse_config(stagnation_dict)
reproduction_dict = dict(parameters.items(reproduction_type.__name__))
self.reproduction_config = reproduction_type.parse_config(reproduction_dict)
def save(self, filename):
with open(filename, 'w') as f:
f.write('# The `NEAT` section specifies parameters particular to the NEAT algorithm\n')
f.write('# or the experiment itself. This is the only required section.\n')
f.write('[NEAT]\n')
write_pretty_params(f, self, self.__params)
f.write('\n[{0}]\n'.format(self.genome_type.__name__))
self.genome_type.write_config(f, self.genome_config)
f.write('\n[{0}]\n'.format(self.species_set_type.__name__))
self.species_set_type.write_config(f, self.species_set_config)
f.write('\n[{0}]\n'.format(self.stagnation_type.__name__))
self.stagnation_type.write_config(f, self.stagnation_config)
f.write('\n[{0}]\n'.format(self.reproduction_type.__name__))
self.reproduction_type.write_config(f, self.reproduction_config)
| python |
from .vector import Vector
from pygame import Rect
import pygame
from .util import BASE_PATH
pygame.font.init()
DEMIBOLD_BIG = pygame.font.Font(BASE_PATH + '/../lightsouls/data/LucidaSansDemiBold.ttf', 20)
DEFAULT_COLOR = GREEN = (128, 255, 128, 0)
class Frame:
"""
Rectangular piece of the screen.
Manages relative positions of objects.
"""
def __init__(self, screen, rect, font=DEMIBOLD_BIG):
"""
rect - position and size of the frame in pixels (x, y, x, y)
"""
self.screen = screen
self.rect = rect
self.font = font
@property
def pos(self):
return Vector(self.rect.x, self.rect.y)
@property
def size(self):
return Vector(self.rect.width, self.rect.height)
def get_dest_rect(self, rect):
"""Calculate absolute position of the given rect."""
pos = self.pos + Vector(rect.x, rect.y)
return Rect(pos.x, pos.y, rect.width, rect.height)
def blit(self, bitmap, rect, sourcerect):
"""Copies graphics on the screen (quick)."""
destrect = self.get_dest_rect(rect)
self.screen.blit(bitmap, destrect, sourcerect)
def print_text(self, text, pos, font=DEMIBOLD_BIG, color=DEFAULT_COLOR):
"""Writes text on the screen."""
font = font or self.font
color = color or self.color
rendered = font.render(text, 1, color)
pos = self.pos + pos
self.screen.display.blit(rendered, tuple(pos))
def clear(self):
"""Clears the area in the frame."""
self.screen.blit(self.screen.background, self.rect, \
Rect(0, 0, self.size.x, self.size.y))
def __repr__(self):
return "[Frame '%s']"%(str(self.rect))
| python |
import pylab as pl
fig = pl.figure()
fig.subplots_adjust(bottom=0.025, left=0.025, top = 0.975, right=0.975)
pl.subplot(2, 1, 1)
pl.xticks(()), pl.yticks(())
pl.subplot(2, 3, 4)
pl.xticks(())
pl.yticks(())
pl.subplot(2, 3, 5)
pl.xticks(())
pl.yticks(())
pl.subplot(2, 3, 6)
pl.xticks(())
pl.yticks(())
pl.show()
| python |
from .FeatureDescriptionLabel import *
from .FeatureExtractionLogic import *
from .FeatureWidgets import *
| python |
'''
The np.npv() function estimates the present values for a given set of future cash
flows. The first input value is the discount rate, and the second input is an array of
future cash flows. This np.npv() function mimics Excel's NPV function. Like Excel,
np.npv() is not a true NPV function. It is actually a PV function. It estimates the
present value of future cash flows by assuming the first cash flow happens at the
end of the first period.
'''
import scipy as sp
cashflows=[50,40,20,10,50]
npv=sp.npv(0.1,cashflows) #estimate NPV
npvrounded = round(npv,2)
the npv caculated here is not consistent to execel
need to be found why.
print(npvrounded) | python |
from tempfile import NamedTemporaryFile
import boto3
from rivet import inform, s3_path_utils
from rivet.s3_client_config import get_s3_client_kwargs
from rivet.storage_formats import get_storage_fn
def write(obj, path, bucket=None,
show_progressbar=True, *args, **kwargs):
"""
Writes an object to a specified file format and uploads it to S3.
Storage format is determined by file extension, to prevent
extension-less files in S3.
Args:
obj (object): The object to be uploaded to S3
path (str): The path to save obj to
bucket (str, optional): The S3 bucket to save 'obj' in
show_progresbar (bool, default True): Whether to show a progress bar
Returns:
str: The full path to the object in S3, without the 's3://' prefix
"""
path = s3_path_utils.clean_path(path)
bucket = bucket or s3_path_utils.get_default_bucket()
bucket = s3_path_utils.clean_bucket(bucket)
filetype = s3_path_utils.get_filetype(path)
write_fn = get_storage_fn(filetype, 'write')
s3 = boto3.client('s3')
with NamedTemporaryFile(suffix='.' + filetype) as tmpfile:
inform('Writing object to tempfile...')
write_fn(obj, tmpfile, *args, **kwargs)
s3_kwargs = get_s3_client_kwargs(tmpfile.name, bucket,
operation='write',
show_progressbar=show_progressbar)
inform('Uploading to s3://{}/{}...'.format(bucket, path))
s3.upload_file(tmpfile.name, bucket, path, **s3_kwargs)
return '/'.join([bucket, path])
def upload_file(local_file_path, path, bucket=None, show_progressbar=True):
"""
Uploads a file from local storage directly to S3
Args:
local_file_path (str): Location of the file to upload
path (str): The key the file is to be stored under in S3
bucket (str, optional): The S3 bucket to store the object in
show_progresbar (bool, default True): Whether to show a progress bar
"""
bucket = bucket or s3_path_utils.get_default_bucket()
if local_file_path is None:
raise ValueError('A local file location must be provided.')
s3 = boto3.client('s3')
s3_kwargs = get_s3_client_kwargs(local_file_path, bucket,
operation='write',
show_progressbar=show_progressbar)
s3.upload_file(local_file_path, bucket, path, **s3_kwargs)
| python |
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
tf.random.set_seed(2021)
from models import DNMC, NMC, NSurv, MLP, train_model, evaluate_model
df = pd.read_csv('http://pssp.srv.ualberta.ca/system/predictors/datasets/000/000/032/original/All_Data_updated_may2011_CLEANED.csv?1350302245')
numrc_cols = df.nunique() > 2
df.loc[:, numrc_cols] = (df.loc[:, numrc_cols] - df.loc[:, numrc_cols].mean()) / df.loc[:, numrc_cols].std()
OUTCOMES = ['SURVIVAL', 'CENSORED']
X = df.drop(OUTCOMES, axis=1).sample(frac=1, random_state=2021)
X = X.values
print('There are', X.shape[1], 'features')
from generate_data import generate_semi_synthetic, generate_synth_censoring, onehot
### BEGIN COLLECTING RESULTS HERE ###
all_results = []
all_weight_results = []
LEARNING_RATE = 1e-3
BATCH_SIZE = 100
N_BINS = 10
MAX_EPOCHS = 500
lr = 0.03
DATATYPE = 'synth_censoring'
DEPENDENT_CENSORING = False
RESULTS_NAME = '../results/NACD_' + DATATYPE + '.csv'
assert DATATYPE in ['synth_censoring', 'synthetic', 'real']
# NOTE that we are skipping importance weights here.
for random_state in [2020, 2016, 2013]:
for num_distinct in [4, 8, 12, 16]:
num_shared = 20 - num_distinct
print('')
print('Starting runs with random state', random_state, 'and %i distinct features' % num_distinct)
print('')
if DATATYPE == 'synthetic':
synth = generate_semi_synthetic(
X, num_distinct, num_shared, N_BINS, random_state,
e_prob_spread=3.,
dependent_censoring=DEPENDENT_CENSORING)
elif DATATYPE == 'synth_censoring':
synth = generate_synth_censoring(
X, df['SURVIVAL'].values, 1 - df['CENSORED'].values,
num_distinct, N_BINS, random_state,
e_prob_spread=3.,
dependent_censoring=DEPENDENT_CENSORING)
x_train, x_val, x_test = X[:1500], X[1500:1900], X[1900:]
y = onehot(synth['y_disc'], ncategories=10)
y_train, y_val, y_test = y[:1500], y[1500:1900], y[1900:]
s_train, s_val, s_test = synth['s'][:1500], synth['s'][1500:1900], synth['s'][1900:]
e_train, e_val, e_test = synth['e'][:1500], synth['e'][1500:1900], synth['e'][1900:]
#for lr in np.logspace(-2, -1, 6):
# Run NMC
print('Running NMC with lr =', lr)
model = NMC(n_bins=N_BINS, lr=lr, dependent_censoring=DEPENDENT_CENSORING)
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
# Run NSurv
print('Running NSurv with lr =', lr)
model = NSurv(n_bins=N_BINS, lr=lr, dependent_censoring=DEPENDENT_CENSORING)
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
# Run MLP
print('Running MLP with lr =', lr)
model = MLP(lr=lr)
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
# Run DNMC
for ld in [1., 10.]:
print('Running DNMC (with Psi) with lr =', lr, 'and ld =', ld)
model = DNMC(n_bins=N_BINS, lr=lr, ld=ld, dependent_censoring=DEPENDENT_CENSORING)
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
all_results.append(
evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state))
print('Running DNMC (NO Psi) with lr =', lr, 'and ld =', ld)
model = DNMC(n_bins=N_BINS, lr=lr, ld=ld, include_psi=False)
train_model(
model, (x_train, y_train, s_train), (x_val, y_val, s_val),
MAX_EPOCHS, batch_size=BATCH_SIZE, learning_rate=LEARNING_RATE)
results = evaluate_model(
model, (x_test, y_test, s_test), e_test,
(synth['shared_features'], synth['tc_features'], synth['e_features']),
dataset='nacd', random_state=random_state)
results['model'] = 'DNMC_noPsi'
all_results.append(results)
pd.DataFrame(all_results).to_csv(RESULTS_NAME)
| python |
# test return statement
def f():
return
print(f())
def g():
return 1
print(g())
def f(x):
return 1 if x else 2
print(f(0), f(1))
print("PASS") | python |
from contextlib import suppress
import warnings
import urllib.parse
import calendar
from cromulent import model, vocab
from cromulent.model import factory
from cromulent.extract import extract_physical_dimensions
from pipeline.util.cleaners import ymd_to_datetime
factory.auto_id_type = 'uuid'
vocab.add_art_setter()
def add_crom_data(data: dict, what=None):
data['_CROM_FACTORY'] = factory
data['_LOD_OBJECT'] = what
return data
def get_crom_object(data: dict):
if data is None:
return None
return data.get('_LOD_OBJECT')
def remove_crom_object(data: dict):
with suppress(KeyError):
del data['_LOD_OBJECT']
del data['_CROM_FACTORY']
return data
class MakeLinkedArtRecord:
def set_referred_to_by(self, data, thing):
for notedata in data.get('referred_to_by', []):
if isinstance(notedata, tuple):
content, itype = notedata
if itype is not None:
if isinstance(itype, type):
note = itype(content=content)
elif isinstance(itype, object):
note = itype
note.content = content
else:
note = vocab.Note(content=content)
note.classified_as = itype
elif isinstance(notedata, model.BaseResource):
note = notedata
elif isinstance(notedata, str):
note = vocab.Note(content=notedata)
else:
note = notedata
thing.referred_to_by = note
def set_properties(self, data, thing):
'''
The following keys in `data` are handled to set properties on `thing`:
`referred_to_by`
`identifiers`
`names` - An array of arrays of one or two elements. The first element of each
array is a name string, and is set as the value of a `model.Name` for
`thing`. If there is a `dict` second element, its contents are used to
assert properties of the name:
- An array associated with the key `'referred_to_by'` will be used to
assert that the `LinguisticObject`s (or `dict`s representing a
`LinguisticObject`) refer to the name.
- A value associated with the key `'classified_as'` (either a
`model.Type` or a cromulent vocab class) will be asserted as the
classification of the `model.Name`.
Example data:
{
'names': [
['J. Paul Getty'],
[
'Getty',
{
'classified_as': model.Type(ident='http://vocab.getty.edu/aat/300404670', label='Primary Name'),
# or: 'classified_as': vocab.PrimaryName,
'referred_to_by': [
{'uri': 'tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:knoedler#K-ROW-1-2-3'},
model.LinguisticObject(ident='tag:getty.edu,2019:digital:pipeline:REPLACE-WITH-UUID:knoedler#K-ROW-1-7-10'),
]
}
]
]
}
'''
self.set_referred_to_by(data, thing)
for c in data.get('classified_as', []):
thing.classified_as = c
for identifier in data.get('identifiers', []):
if isinstance(identifier, tuple):
content, itype = identifier
if itype is not None:
if isinstance(itype, type):
ident = itype(ident='', content=content)
if not content:
warnings.warn(f'Setting empty identifier on {thing.id}')
elif isinstance(itype, object):
ident = itype
ident.content = content
if not content:
warnings.warn(f'Setting empty identifier on {thing.id}')
else:
ident = model.Identifier(ident='')
if not content:
warnings.warn(f'Setting empty identifier on {thing.id}')
ident.content = content
ident.classified_as = itype
else:
ident = identifier
# c = ident.content
thing.identified_by = ident
if not hasattr(thing, '_label') and 'label' in data:
setattr(thing, '_label', data['label'])
for namedata in data.get('names', []):
# namedata should take the form of:
# ["A. Name"]
# ["A. Name", {'referred_to_by': [{'uri': 'URI-OF-LINGUISTIC_OBJECT'}, model.LinguisticObject()]}]
if isinstance(namedata, tuple):
name, *properties = namedata
else:
name = namedata
properties = []
name_kwargs = {}
for props in properties:
if 'classified_as' in props:
cl = props['classified_as']
del props['classified_as']
name_kwargs['title_type'] = cl
n = set_la_name(thing, name, **name_kwargs)
self.set_lo_properties(n, *properties)
def set_lo_properties(self, n, *properties):
for props in properties:
assert isinstance(props, dict)
for ref in props.get('referred_to_by', []):
if isinstance(ref, dict):
if 'uri' in ref:
l = model.LinguisticObject(ident=ref['uri'])
elif 'uuid' in data:
l = model.LinguisticObject(ident="urn:uuid:%s" % ref['uuid'])
else:
raise Exception(f'MakeLinkedArtRecord call attempt to set name {name} with a non-identified reference: {ref}')
elif isinstance(ref, object):
l = ref
else:
raise Exception(f'MakeLinkedArtRecord call attempt to set name {name} with an unrecognized reference type: {ref}')
n.referred_to_by = l
def __call__(self, data: dict):
if '_LOD_OBJECT' in data:
thing = data['_LOD_OBJECT']
else:
otype = data['object_type']
otypes = otype if isinstance(otype, list) else [otype]
kwargs = {}
if 'uri' in data:
kwargs['ident'] = data['uri']
elif 'uuid' in data:
kwargs['ident'] = "urn:uuid:%s" % data['uuid']
else:
raise Exception('MakeLinkedArtRecord called with a dictionary with neither uuid or uri member')
thing = vocab.make_multitype_obj(*otypes, **kwargs)
self.set_properties(data, thing)
return add_crom_data(data=data, what=thing)
def set_la_name(thing, value, title_type=None, set_label=False):
if value is None:
return None
if isinstance(value, tuple):
label, language = value
else:
label = value
language = None
if set_label:
if not label:
warnings.warn(f'Setting empty label on {thing.id}')
thing._label = label
name = model.Name(ident='', content=label)
if title_type is not None:
if isinstance(title_type, model.Type):
name.classified_as = title_type
else:
vocab.add_classification(name, title_type)
thing.identified_by = name
if language is not None:
name.language = language
return name
class MakeLinkedArtLinguisticObject(MakeLinkedArtRecord):
# TODO: document the expected format of data['translations']
# TODO: document the expected format of data['identifiers']
def set_properties(self, data, thing):
super().set_properties(data, thing)
# TODO: this whole title_type thing isn't right. most of the identifiers below aren't titles
title_type = model.Type(ident='http://vocab.getty.edu/aat/300417193', label='Title')
name = None
if 'label' in data:
name = set_la_name(thing, data['label'], title_type, set_label=True)
for author in data.get('created_by', []):
thing.created_by = author
for a in data.get('used_for', []):
thing.used_for = a
for a in data.get('about', []):
thing.about = a
for t in data.get('translations', []):
n = set_la_name(thing, t, title_type)
if name is not None:
n.translation_of = name
for content, itype, notes in data.get('qualified_identifiers', []):
ident = itype(content=content)
if not content:
warnings.warn(f'Setting empty identifier on {thing.id}')
thing.identified_by = ident
for n in notes:
ident.referred_to_by = n
code_type = None # TODO: is there a model.Type value for this sort of code?
for c in data.get('classifications', []):
if isinstance(c, model.Type):
classification = c
else:
cid, label = c
name = model.Name()
name.classified_as = title_type
name.content = label
classification = model.Type(label=label)
if not label:
warnings.warn(f'Setting empty name on {classification.id}')
classification.identified_by = name
code = model.Identifier()
code.classified_as = code_type
if not cid:
warnings.warn(f'Setting empty identifier on {code.id}')
code.content = cid
classification.identified_by = code
thing.about = classification
for c in data.get('indexing', []):
if isinstance(c, tuple):
cid, label = c
name = model.Name()
name.classified_as = title_type
name.content = label
indexing = model.Type(label=label)
if not label:
warnings.warn(f'Setting empty name on {indexing.id}')
indexing.identified_by = name
code = model.Identifier()
code.classified_as = code_type
code.content = cid
if not cid:
warnings.warn(f'Setting empty identifier on {code.id}')
indexing.identified_by = code
else:
indexing = c
thing.about = indexing
parents = data.get('part_of', [])
for parent_data in parents:
parent = get_crom_object(parent_data)
thing.part_of = parent
children = data.get('part', [])
for child_data in children:
child = get_crom_object(child_data)
thing.part = child
for carrier in data.get('carried_by', []):
hmo = get_crom_object(carrier)
thing.carried_by = hmo
for dimension in data.get('dimensions', []):
thing.dimension = dimension
def __call__(self, data: dict):
if 'object_type' not in data or data['object_type'] == []:
data['object_type'] = model.LinguisticObject
return super().__call__(data)
class MakeLinkedArtHumanMadeObject(MakeLinkedArtRecord):
def set_properties(self, data, thing):
super().set_properties(data, thing)
title_type = model.Type(ident='http://vocab.getty.edu/aat/300417193', label='Title') # TODO: is this the right aat URI?
if 'label' in data:
set_la_name(thing, data['label'], title_type, set_label=True)
if 'title' in data:
# TODO: This needs to be a PrimaryName, not a Name classified as a Title
title = data['title']
if isinstance(title, str):
set_la_name(thing, title, title_type, set_label=True)
elif isinstance(title, (list, tuple)):
value, *properties = title
n = set_la_name(thing, value, title_type, set_label=True)
n.classified_as = title_type
self.set_lo_properties(n, *properties)
thing.identified_by = n
parents = data.get('part_of', [])
for parent_data in parents:
parent = get_crom_object(parent_data)
thing.part_of = parent
for carried in data.get('carries', []):
lo = get_crom_object(carried)
thing.carries = lo
for coll in data.get('member_of', []):
thing.member_of = coll
for annotation in data.get('annotations', []):
a = model.Annotation(ident='', content=annotation)
thing.carries = a
class MakeLinkedArtAbstract(MakeLinkedArtLinguisticObject):
pass
class MakeLinkedArtAgent(MakeLinkedArtRecord):
def set_properties(self, data, thing):
super().set_properties(data, thing)
with suppress(ValueError, TypeError):
ulan = int(data.get('ulan'))
if ulan:
thing.exact_match = model.BaseResource(ident=f'http://vocab.getty.edu/ulan/{ulan}')
if 'name' in data:
title_type = model.Type(ident='http://vocab.getty.edu/aat/300417193', label='Title')
name = data['name']
if name:
if isinstance(name, str):
set_la_name(thing, name, title_type, set_label=True)
elif isinstance(name, (list, tuple)):
value, *properties = name
n = model.Name(ident='', content=value)
n.classified_as = title_type
self.set_lo_properties(n, *properties)
thing.identified_by = n
for uri in data.get('exact_match', []):
thing.exact_match = uri
for sdata in data.get('sojourns', []):
label = sdata.get('label', 'Sojourn activity')
stype = sdata.get('type', model.Activity)
act = stype(ident='', label=label)
ts = get_crom_object(sdata.get('timespan'))
place = get_crom_object(sdata.get('place'))
act.timespan = ts
act.took_place_at = place
thing.carried_out = act
self.set_referred_to_by(sdata, act)
# Locations are names of residence places (P74 -> E53)
# XXX FIXME: Places are their own model
if 'places' in data:
for p in data['places']:
if isinstance(p, model.Place):
pl = p
elif isinstance(p, dict):
pl = get_crom_object(p)
else:
pl = model.Place(ident='', label=p)
#pl._label = p['label']
#nm = model.Name()
#nm.content = p['label']
#pl.identified_by = nm
#for s in p['sources']:
# l = model.LinguisticObject(ident="urn:uuid:%s" % s[1])
# l._label = _row_label(s[2], s[3], s[4])
# pl.referred_to_by = l
thing.residence = pl
class MakeLinkedArtOrganization(MakeLinkedArtAgent):
def set_properties(self, data, thing):
super().set_properties(data, thing)
with suppress(KeyError):
thing._label = str(data['label'])
for event in data.get('events', []):
thing.carried_out = event
for n in data.get('nationality', []):
thing.classified_as = n
if data.get('formation'):
b = model.Formation()
ts = model.TimeSpan(ident='')
if 'formation_clean' in data and data['formation_clean']:
if data['formation_clean'][0]:
ts.begin_of_the_begin = data['formation_clean'][0].strftime("%Y-%m-%dT%H:%M:%SZ")
if data['formation_clean'][1]:
ts.end_of_the_end = data['formation_clean'][1].strftime("%Y-%m-%dT%H:%M:%SZ")
verbatim = data['formation']
ts._label = verbatim
ts.identified_by = model.Name(ident='', content=verbatim)
b.timespan = ts
b._label = "Formation of %s" % thing._label
thing.formed_by = b
if data.get('dissolution'):
d = model.Dissolution()
ts = model.TimeSpan(ident='')
if 'dissolution_clean' in data and data['dissolution_clean']:
if data['dissolution_clean'][0]:
ts.begin_of_the_begin = data['dissolution_clean'][0].strftime("%Y-%m-%dT%H:%M:%SZ")
if data['dissolution_clean'][1]:
ts.end_of_the_end = data['dissolution_clean'][1].strftime("%Y-%m-%dT%H:%M:%SZ")
verbatim = data['dissolution']
ts._label = verbatim
ts.identified_by = model.Name(ident='', content=verbatim)
d.timespan = ts
d._label = "Dissolution of %s" % thing._label
thing.dissolved_by = d
def __call__(self, data: dict):
if 'object_type' not in data or data['object_type'] == []:
data['object_type'] = model.Group
return super().__call__(data)
class MakeLinkedArtAuctionHouseOrganization(MakeLinkedArtOrganization):
def __call__(self, data: dict):
if 'object_type' not in data or data['object_type'] == []:
data['object_type'] = vocab.AuctionHouseOrg
return super().__call__(data)
# XXX Reconcile with provenance.timespan_from_outer_bounds
def make_ymd_timespan(data: dict, start_prefix="", end_prefix="", label=""):
y = f'{start_prefix}year'
m = f'{start_prefix}month'
d = f'{start_prefix}day'
y2 = f'{end_prefix}year'
m2 = f'{end_prefix}month'
d2 = f'{end_prefix}day'
t = model.TimeSpan(ident='')
if not label:
label = ymd_to_label(data[y], data[m], data[d])
if y != y2:
lbl2 = ymd_to_label(data[y2], data[m2], data[d2])
label = f'{label} to {lbl2}'
t._label = label
if not label:
warnings.warn(f'Setting empty name on {t.id}')
t.identified_by = model.Name(ident='', content=label)
t.begin_of_the_begin = ymd_to_datetime(data[y], data[m], data[d])
t.end_of_the_end = ymd_to_datetime(data[y2], data[m2], data[d2], which="end")
return t
def ymd_to_label(year, month, day):
# Return monthname day year
if not year:
return "Unknown"
if not month:
return str(year)
if not isinstance(month, int):
try:
month = int(month)
month_name = calendar.month_name[month]
except:
# Assume it's already a name of a month
month_name = month
else:
month_name = calendar.month_name[month]
if day:
return f'{month_name} {day}, {year}'
else:
return f'{month_name} {year}'
class MakeLinkedArtPerson(MakeLinkedArtAgent):
def set_properties(self, data, who):
super().set_properties(data, who)
with suppress(KeyError):
who._label = str(data['label'])
for ns in ['aat_nationality_1', 'aat_nationality_2','aat_nationality_3']:
# add nationality
n = data.get(ns)
# XXX Strip out antique / modern anonymous as a nationality
if n:
if int(n) in [300310546,300264736]:
break
natl = vocab.Nationality(ident="http://vocab.getty.edu/aat/%s" % n)
who.classified_as = natl
natl._label = str(data[ns+'_label'])
else:
break
for n in data.get('nationality', []):
if isinstance(n, model.BaseResource):
who.classified_as = n
for n in data.get('occupation', []):
if isinstance(n, model.BaseResource):
who.classified_as = n
# nationality field can contain other information, but not useful.
# XXX Intentionally ignored but validate with GRI
if data.get('active_early') or data.get('active_late'):
act = vocab.Active()
ts = model.TimeSpan(ident='')
if data['active_early']:
ts.begin_of_the_begin = "%s-01-01:00:00:00Z" % (data['active_early'],)
ts.end_of_the_begin = "%s-01-01:00:00:00Z" % (data['active_early']+1,)
if data['active_late']:
ts.begin_of_the_end = "%s-01-01:00:00:00Z" % (data['active_late'],)
ts.end_of_the_end = "%s-01-01:00:00:00Z" % (data['active_late']+1,)
ts._label = "%s-%s" % (data['active_early'], data['active_late'])
act.timespan = ts
who.carried_out = act
for event in data.get('events', []):
who.carried_out = event
if data.get('birth'):
b = model.Birth()
ts = model.TimeSpan(ident='')
if 'birth_clean' in data and data['birth_clean']:
if data['birth_clean'][0]:
ts.begin_of_the_begin = data['birth_clean'][0].strftime("%Y-%m-%dT%H:%M:%SZ")
if data['birth_clean'][1]:
ts.end_of_the_end = data['birth_clean'][1].strftime("%Y-%m-%dT%H:%M:%SZ")
verbatim = data['birth']
ts._label = verbatim
ts.identified_by = model.Name(ident='', content=verbatim)
b.timespan = ts
b._label = "Birth of %s" % who._label
who.born = b
if data.get('death'):
d = model.Death()
ts = model.TimeSpan(ident='')
if 'death_clean' in data and data['death_clean']:
if data['death_clean'][0]:
ts.begin_of_the_begin = data['death_clean'][0].strftime("%Y-%m-%dT%H:%M:%SZ")
if data['death_clean'][1]:
ts.end_of_the_end = data['death_clean'][1].strftime("%Y-%m-%dT%H:%M:%SZ")
verbatim = data['death']
ts._label = verbatim
ts.identified_by = model.Name(ident='', content=verbatim)
d.timespan = ts
d._label = "Death of %s" % who._label
who.died = d
if 'contact_point' in data:
for p in data['contact_point']:
if isinstance(p, model.Identifier):
pl = p
elif isinstance(p, dict):
pl = get_crom_object(p)
else:
pl = model.Identifier(ident='', content=p)
who.contact_point = pl
def __call__(self, data: dict):
if 'object_type' not in data or data['object_type'] == []:
data['object_type'] = model.Person
return super().__call__(data)
class MakeLinkedArtPlace(MakeLinkedArtRecord):
TYPES = {
'city': vocab.instances['city'],
'province': vocab.instances['province'],
'state': vocab.instances['province'],
'country': vocab.instances['nation'],
'address': vocab.instances['address']
}
def __init__(self, base_uri=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self.base_uri = base_uri
def set_properties(self, data, thing):
name = data.get('name')
data.setdefault('names', [name])
super().set_properties(data, thing)
type_name = data.get('type', 'place').lower()
label = name
parent_data = data.get('part_of')
place_type = MakeLinkedArtPlace.TYPES.get(type_name)
parent = None
if parent_data:
parent_data = self(parent_data)
parent = get_crom_object(parent_data)
if label:
try:
label = f'{label}, {parent._label}'
except AttributeError:
print('*** NO LABEL IN PARENT:' + factory.toString(parent, False))
placeargs = {'label': label}
if data.get('uri'):
placeargs['ident'] = data['uri']
if place_type:
thing.classified_as = place_type
if not name:
warnings.warn(f'Place with missing name on {thing.id}')
if parent:
# print(f'*** Setting parent on place object: {parent}')
thing.part_of = parent
def __call__(self, data: dict):
if 'object_type' not in data or data['object_type'] == []:
data['object_type'] = model.Place
if self.base_uri and not data.get('uri'):
data['uri'] = self.base_uri + urllib.parse.quote(data['name'])
return super().__call__(data)
def make_la_place(data:dict, base_uri=None):
'''
Given a dictionary representing data about a place, construct a model.Place object,
assign it as the crom data in the dictionary, and return the dictionary.
The dictionary keys used to construct the place object are:
- name
- type (one of: 'City' or 'Country')
- part_of (a recursive place dictionary)
'''
TYPES = {
'city': vocab.instances['city'],
'province': vocab.instances['province'],
'state': vocab.instances['province'],
'country': vocab.instances['nation'],
'address': vocab.instances['address']
}
if data is None:
return None
type_name = data.get('type', 'place').lower()
name = data['name']
label = name
parent_data = data.get('part_of')
place_type = TYPES.get(type_name)
parent = None
if parent_data:
parent_data = make_la_place(parent_data, base_uri=base_uri)
parent = get_crom_object(parent_data)
label = f'{label}, {parent._label}'
placeargs = {'label': label}
if data.get('uri'):
placeargs['ident'] = data['uri']
elif base_uri:
data['uri'] = base_uri + urllib.parse.quote(label)
placeargs['ident'] = data['uri']
p = model.Place(**placeargs)
if place_type:
p.classified_as = place_type
if name:
p.identified_by = model.Name(ident='', content=name)
else:
warnings.warn(f'Place with missing name on {p.id}')
if parent:
p.part_of = parent
return add_crom_data(data=data, what=p)
class PopulateObject:
'''
Shared functionality for project-specific bonobo node sub-classes to populate
object records.
'''
@staticmethod
def populate_object_statements(data:dict, default_unit=None):
hmo = get_crom_object(data)
sales_record = get_crom_object(data.get('_record'))
format = data.get('format')
if format:
formatstmt = vocab.PhysicalStatement(ident='', content=format)
if sales_record:
formatstmt.referred_to_by = sales_record
hmo.referred_to_by = formatstmt
materials = data.get('materials')
if materials:
matstmt = vocab.MaterialStatement(ident='', content=materials)
if sales_record:
matstmt.referred_to_by = sales_record
hmo.referred_to_by = matstmt
dimstr = data.get('dimensions')
if dimstr:
dimstmt = vocab.DimensionStatement(ident='', content=dimstr)
if sales_record:
dimstmt.referred_to_by = sales_record
hmo.referred_to_by = dimstmt
for dim in extract_physical_dimensions(dimstr, default_unit=default_unit):
if sales_record:
dim.referred_to_by = sales_record
hmo.dimension = dim
else:
pass
# print(f'No dimension data was parsed from the dimension statement: {dimstr}')
| python |
import logging
import os
import random
from collections import defaultdict, namedtuple
from threading import Lock, Thread
from time import sleep
from consul import Consul
instance = namedtuple('serviceinstance', ['address', 'port'])
service = namedtuple('service', ['ts', 'instances'])
class ServiceInstance(instance):
def as_uri(self, scheme='http', path=""):
return "{0}://{1}:{2}/{3}".format(scheme, self.address, self.port, path)
class ServiceCatalog:
def __init__(self, host='localhost', port=8500, interval=30, env=os.environ):
self.online_mode = self._get_online_mode(env)
self.service_overrides = self._get_service_overrides(env)
self._lock = Lock()
self.cache = defaultdict(list)
if self.online_mode:
self.client = Consul(host=host, port=port, consistency='stale')
self.interval = interval
self.updater = Thread(name="Consul-update", target=self._update)
self.updater.daemon = True
self.updater.start()
def _get_online_mode(self, env):
"""
Method returns flag whether this library should run in online mode (thus talking to consul)
or offline mode - thus only use environment variables to serve the
:return:
"""
offline_mode = env.get('SERVICECATALOG_OFFLINE_MODE', '0')
# online mode is by default, so it's only disabled
# when offline mode env. var is set to 1
return not offline_mode == '1'
def _get_service_overrides(self, env):
"""
Method returns a map of service_name=ServiceInstance(host, port) which is read from environment variables.
Eg. by setting these env. variables:
SERVICECATALOG_SERVICE_HOST_AVAILABILITY_VARNISH=http://varnish
SERVICECATALOG_SERVICE_PORT_AVAILABILITY_VARNISH=80
the service instance that will be returned for availability-varnish is ServiceInstance("http://varnish", 80).
The port 80 is default and will be returned if it's not specified in env. vars.
:param env:
:return:
"""
service_host_prefix = "SERVICECATALOG_SERVICE_HOST_"
service_port_prefix = "SERVICECATALOG_SERVICE_PORT_"
result = {}
hosts = {}
ports = {}
for key, value in env.items():
if key.startswith(service_host_prefix):
# this should turn "SERVICECATALOG_SERVICE_HOST_AVAILABILITY_VARNISH" into "availability-varnish"
service_name = key.replace(service_host_prefix, '').replace('_', '-').lower()
hosts[service_name] = value
elif key.startswith(service_port_prefix):
# this should turn "SERVICECATALOG_SERVICE_PORT_AVAILABILITY_VARNISH" into "availability-varnish"
service_name = key.replace(service_port_prefix, '').replace('_', '-').lower()
try:
ports[service_name] = int(value)
except Exception:
logging.error(f"Unsupported value {value} for {key} - should be number.")
raise
for service_name, host in hosts.items():
port = ports.get(service_name, 80)
result[service_name] = service(None, [ServiceInstance(host, port)])
return result
def fetch(self, name, index=None):
overriden_value = self.service_overrides.get(name)
if overriden_value:
return overriden_value
if not self.online_mode:
return service(index, [])
try:
idx, result = self.client.catalog.service(name, index=index)
return service(index, [
ServiceInstance(x['ServiceAddress'] or x["Address"],
x["ServicePort"]) for x in result
])
except Exception as e:
logging.error(
"Failed while fetching data for %s", name, exc_info=True)
def _update(self):
self._isrunning = True
while self._isrunning:
for k, v in self.cache.items():
service = self.fetch(k)
if service:
self._lock.acquire()
self.cache[k] = service
self._lock.release()
sleep(self.interval)
def stop(self):
self._isrunning = False
def __getitem__(self, name):
self._lock.acquire()
if not self.cache[name]:
logging.info(
"Adding new service `%s` to the service catalog" % name)
self.cache[name] = self.fetch(name)
result = random.choice(self.cache[name].instances)
self._lock.release()
if not result:
raise KeyError("Can't find service with name %s" % name)
return result
def all(self, name):
self._lock.acquire()
if not self.cache[name]:
logging.info(
"Adding new service `%s` to the service catalog" % name)
self.cache[name] = self.fetch(name)
self._lock.release()
return self.cache[name].instances
| python |
from django.urls import path
from material.admin.sites import site
urlpatterns = [
path('', site.urls, name='base')
]
| python |
## this version of get_freq collects %AT-richness, gene expression data and SumFreq statistic on top of the data collated by get_freq.py
import pandas as pd
import numpy as np
## NOTE: All filenames are placeholders
raw = pd.read_csv("REDItools_processed_dedup-filt.genann.txt", header = 0, sep = "\t")
exp = pd.read_csv("Expression_Data/quant.sf", header=0, sep="\t")
at_richness = pd.read_csv("at_richness.txt", header=0, sep="\t")
gene_ann = pd.read_csv("Gene_Length_Data.txt", header = 0, sep = "\t")
counting = raw[["GeneID", "Frequency"]]
#print(test.head(5))
counting["NumLoci"] = 1
counting = counting.groupby("GeneID", as_index = False).sum()
counting = counting[counting["GeneID"] != "-"]
merged = pd.merge(counting, gene_ann, on = "GeneID")
merged = merged[["GeneID", "Frequency", "NumLoci", "Length", "TranscriptID"]]
merged["AvgFreq"] = merged["Frequency"]/merged["NumLoci"]
exp_merged = pd.merge(merged, exp, left_on="TranscriptID", right_on="Name")
exp_mergedClean = exp_merged[["GeneID", "Frequency", "NumLoci", "Length_x", "TranscriptID", "AvgFreq", "Name", "TPM", "NumReads"]]
exp_mergedClean.rename(columns = {"Frequency" : "SumFreq"}, inplace=True)
final_merged = pd.merge(exp_mergedClean, at_richness, on="TranscriptID")
final_merged = final_merged[["GeneID", "SumFreq", "NumLoci", "Length_x", "TranscriptID", "AvgFreq", "TPM", "NumReads", "%AT_Richness"]]
final_merged["SumFreq"] = final_merged["SumFreq"].round(decimals = 3)
final_merged["AvgFreq"] = final_merged["AvgFreq"].round(decimals = 3)
final_merged["%AT_Richness"] = final_merged["%AT_Richness"].round(decimals = 3)
final_merged["TPM"] = final_merged["TPM"].round(decimals = 3)
final_merged.rename(columns = {"Length_x" : "Length"}, inplace=True)
#print(final_merged.head(5))
final_merged.to_csv("Sample_getFreq.txt", sep = "\t", header = True, index = False)
| python |
"""
Pipeline code for training and evaluating the sentiment classifier.
We use the Deepmoji architecture here, see https://github.com/bfelbo/DeepMoji for detail.
"""
import re
import codecs
import random
import numpy as np
import sys
import json
import argparse
import pandas as pd
import glob, os
import matplotlib.pylab as plt
sys.path.append("DeepMoji/deepmoji/")
from sentence_tokenizer import SentenceTokenizer
from model_def import deepmoji_architecture, load_specific_weights
from finetuning import load_benchmark, finetune
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.model_selection import train_test_split
MAX_LEN = 150
# def load_data(filename):
# f = codecs.open(filename, "r", "utf-8")
# data_pair = []
# for line in f:
# line = line.strip().split("\t")
# line = line.strip().split(",")
# data_pair.append((line[0], line[1]))
# return data_pair
def load_data(filename):
df = pd.read_csv(filename, sep="\t")
data_pair = []
for index, row in df.iterrows():
data_pair.append((row[0], row[1], row[2]))
return data_pair
def prepare_5fold(data_pair):
sind = 0
eind = 0
random.shuffle(data_pair)
fold_size = int(len(data_pair) / 5)
for fold in range(0, 5):
sind = eind
eind = sind + fold_size
train_pair = data_pair[0:sind] + data_pair[eind:len(data_pair)]
test_pair = data_pair[sind:eind]
yield (train_pair, test_pair)
def get_train_test_data(infile, dataset, fold):
df_all = pd.read_excel(input_file, sheet_name="Sheet1", usecols="S, AF, T, AX",
names=['dataset', 'oracle', 'text', 'id'])
# df_all.insert(loc=0, column="id", value=df_all.index + 1)
# df_all['id'] = df_all.index
df_all = df_all[['id', 'text', 'oracle', 'dataset']]
# print("length of all datasets %d" % len(df_all))
df_all.loc[df_all.oracle == 'o', 'oracle'] = '0'
df_all.loc[df_all.oracle == 'n', 'oracle'] = '-1'
df_all.loc[df_all.oracle == 'p', 'oracle'] = '1'
# print(df_all.columns)
dataset_df = df_all[df_all['dataset'].astype(str).str.lower().str.contains(dataset)]
# print("lenght of the dataset %s is : %d"% (dataset, len(dataset_df)))
dataset_test = dataset + "_test_" + str(fold)
if(dataset == "datasetlinjira"):
dataset_test = dataset + "_cleaned_test_" + str(fold)
test_df = dataset_df[dataset_df['dataset'].str.lower() == dataset_test]
test_ids = test_df['id'].tolist()
train_df = dataset_df[~dataset_df['id'].isin(test_ids)]
train_df = train_df.drop('dataset', axis = 1) # 0 means rows 1 means column
test_df = test_df.drop('dataset', axis = 1) # 0 means rows 1 means column
print("len of test_df %d and len of train_df %d"%(len(test_df), len(train_df)))
assert len(train_df) + len(test_df) == len(dataset_df)
train_pair = []
test_pair = []
for index, row in train_df.iterrows():
train_pair.append((row['id'], row['text'], row['oracle']))
for index, row in test_df.iterrows():
test_pair.append((row['id'], row['text'], row['oracle']))
# dataset_dir = "/home/mdabdullahal.alamin/alamin/sentiment/bert/dataset/"
# train_df.to_csv( dataset_dir + "train.tsv", sep='\t', index=False, header = None)
# test_df.to_csv( dataset_dir + "test.tsv", sep='\t', index=False, header = None)
return train_pair, test_pair
def get_train_test(infile, dataset, fold):
train_pair, test_pair = get_train_test_data(infile=input_file, dataset = dataset, fold=fold)
train_id = [p[0] for p in train_pair]
train_text = [str(p[1]) for p in train_pair]
train_label = [str(p[2]) for p in train_pair]
test_id = [p[0] for p in test_pair]
test_text = [str(p[1]) for p in test_pair]
test_label = [str(p[2]) for p in test_pair]
return train_id, train_text, train_label, test_id, test_text, test_label
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, required=True, choices=["SEntiMoji", "SEntiMoji-T", "SEntiMoji-G"], help="name of pretrained representation model")
parser.add_argument("--task", type=str.lower, required=True, choices=["sentiment", "emotion"], help="specify task (sentiment or emotion)")
parser.add_argument("--benchmark_dataset_name", type=str, required=False, choices=["Jira", "StackOverflow", "CodeReview", "JavaLib"], help="name of benchmark dataset")
parser.add_argument("--emotion_type", type=str.lower, required=False, default=None, choices=["anger", "love", "deva", "joy", "sad"], help="specify emotion dataset")
parser.add_argument("--use_own_dataset", action='store_true', help="whether use your own dataset or not")
parser.add_argument("--own_dataset_dir", type=str, required=False, default=None, help="directory of your train data file")
parser.add_argument("--own_dataset_file", type=str, required=False, default=None, help="file name of your train data file")
parser.add_argument("--sentisead", action='store_true', help="This will load code to run sentisead")
args = parser.parse_args()
print("args:")
d = args.__dict__
for key,value in d.items():
print("%s = %s"%(key,value))
# parse arguments
model_path = "../../model/representation_model/model_%s.hdf5" % args.model
vocab_path = "vocabulary/vocabulary_%s.json" % args.model
out_dir = "../../output/out/"
base_dir = "/home/mdabdullahal.alamin/alamin/sentiment/sentimoji"
# load vocabulary
with open(vocab_path, "r") as f_vocab:
vocabulary = json.load(f_vocab)
try:
# use provided dataset
if not args.use_own_dataset:
if args.benchmark_dataset_name is None:
raise ValueError("should provide benchmark dataset name")
if args.task == "sentiment":
# data_path = "../../data/benchmark_dataset/sentiment/%s.txt" % args.benchmark_dataset_name
data_path = "../../data/benchmark_dataset/sentiment/%s.tsv" % args.benchmark_dataset_name
label2index_path = "label2index/sentiment/label2index_%s.json" % args.benchmark_dataset_name
else:
trans_dict = {"Jira" : "JIRA", "StackOverflow" : "SO"}
if args.benchmark_dataset_name not in trans_dict:
raise ValueError("invalid dataset name for emotion task")
data_file_name = "%s_%s" % (trans_dict[args.benchmark_dataset_name ], args.emotion_type.upper())
data_path = "../../data/benchmark_dataset/emotion/%s/%s.txt" % (args.benchmark_dataset_name , data_file_name)
if args.emotion_type == 'deva':
if args.benchmark_dataset_name != "Jira":
raise ValueError("invalide dataset name for deva, requires Jira")
label2index_path = "label2index/emotion/label2index_5class.json"
else:
label2index_path = "label2index/emotion/label2index_2class.json"
# load data and label2index file
data_pair = load_data(data_path)
with open(label2index_path, "r") as f_label:
label2index = json.load(f_label)
index2label = {i: l for l, i in label2index.items()}
elif args.sentisead is not None:
print("=============== We are going to train SentiMoji against Sentisead dataset ==============")
label2index = {"0": 0, "1": 1, "-1": 2}
index2label = {i: l for l, i in label2index.items()}
# prepare your own data
else:
if args.own_dataset_dir is None or args.own_dataset_file is None:
raise ValueError("should specify your own dataset directory and filename")
# load data
data_path = "{}/{}".format(args.own_dataset_dir, args.own_dataset_file)
data_pair = load_data(data_path)
# generate label2index file
labels = set([pair[1] for pair in data_pair])
label2index = {}
for label in labels:
label2index[label] = len(label2index)
index2label = {i: l for l, i in label2index.items()}
label2index_path = "{}/{}".format(args.own_dataset_dir, "label2index.json")
with open(label2index_path, 'w') as f:
json.dump(label2index, f)
except RuntimeError as e:
print("Error:", repr(e))
# split 5 fold
# data_5fold = prepare_5fold(data_pair)
# sentence tokenizer (MAXLEN means the max length of input text)
st = SentenceTokenizer(vocabulary, MAX_LEN)
fold = 0
# print(label2index)
# 5 fold
# dataset = dataset.lower()
input_file = os.path.join(base_dir, "data", "Disa_ResultsConsolidatedWithEnsembleAssessment.xlsx")
datasets = ["DatasetLinJIRA", "BenchmarkUddinSO", "DatasetLinAppReviews",
"DatasetLinSO", "DatasetSenti4SDSO", "OrtuJIRA"]
# datasets = [ "OrtuJIRA"]
# dataset = "OrtuJIRA"
# model
# model = deepmoji_architecture(nb_classes=nb_classes,
# nb_tokens=nb_tokens,
# maxlen=MAX_LEN, embed_dropout_rate=0.25, final_dropout_rate=0.5, embed_l2=1E-6)
# # model.summary()
# # load pretrained representation model
# load_specific_weights(model, model_path, nb_tokens, MAX_LEN,
# exclude_names=["softmax"])
for dataset in datasets:
dataset = dataset.lower()
for fold in range(10):
# for item in data_5fold:
# prepare training, validation, testing set
# train_pair, test_pair = get_train_test_data(infile=input_file, dataset = dataset, fold=fold)
train_id, train_text, train_label, test_id, test_text, test_label = get_train_test(infile=input_file, dataset = dataset, fold=fold)
# print(type(train_text[0]))
train_X, _, _ = st.tokenize_sentences(train_text)
test_X, _, _ = st.tokenize_sentences(test_text)
train_y = np.array([label2index[l] for l in train_label])
test_y = np.array([label2index[l] for l in test_label])
nb_classes = len(label2index)
nb_tokens = len(vocabulary)
# use 20% of the training set for validation
train_X, val_X, train_y, val_y = train_test_split(train_X, train_y,
test_size=0.2, random_state=0)
# # model
model = deepmoji_architecture(nb_classes=nb_classes,
nb_tokens=nb_tokens,
maxlen=MAX_LEN, embed_dropout_rate=0.25, final_dropout_rate=0.5, embed_l2=1E-6)
# # model.summary()
# # load pretrained representation model
load_specific_weights(model, model_path, nb_tokens, MAX_LEN,
exclude_names=["softmax"])
#
# # train model
model, acc = finetune(model, [train_X, val_X, test_X], [train_y, val_y, test_y], nb_classes, 100,
method="chain-thaw", verbose=2, nb_epochs=1)
pred_y_prob = model.predict(test_X)
if nb_classes == 2:
pred_y = [0 if p < 0.5 else 1 for p in pred_y_prob]
else:
pred_y = np.argmax(pred_y_prob, axis=1)
# evaluation
print("*****************************************")
print("Fold %d" % fold)
accuracy = accuracy_score(test_y, pred_y)
print("Accuracy: %.3f" % accuracy)
# precision = precision_score(test_y, pred_y, average=None)
# recall = recall_score(test_y, pred_y, average=None)
# f1score = f1_score(test_y, pred_y, average=None)
labels = list(set(test_y))
precision = precision_score(test_y, pred_y, average=None, labels = labels)
recall = recall_score(test_y, pred_y, average=None, labels = labels)
f1score = f1_score(test_y, pred_y, average=None, labels = labels)
for index in range(0, len(labels)):
print("label: %s" % index2label[index])
print("Precision: %.3f, Recall: %.3f, F1 score: %.3f" % (precision[index], recall[index], f1score[index]))
print("*****************************************")
# save predict result
if not args.use_own_dataset:
if args.task == "sentiment":
save_name = "result_%s_%s_fold%d.txt" % (args.model, args.benchmark_dataset_name, fold)
elif args.task == "emotion":
save_name = "result_%s_%s_%s_fold%d.txt" % (args.model, args.benchmark_dataset_name, args.emotion_type, fold)
elif args.sentisead:
save_name = dataset +"_result_fold%d.txt" % fold
# os.path.join(dataset, save_name)
else:
save_name = "result_fold%d.txt" % fold
save_name = os.path.join(out_dir, save_name)
# if(not os.path.exists(save_name)):
# os.makedirs(save_name)
with open(save_name, "w", encoding="utf-8") as f:
for i in range(0, len(test_text)):
f.write("%s\t%s\t%s\t%s\r\n" % (test_id[i], test_text[i], index2label[pred_y[i]], test_label[i]))
print("#%d test results has been saved to: %s" % (len(test_text), save_name))
fold += 1
output_dir = "../../model/trained_model" + str(fold) + ".h5"
if args.sentisead:
output_dir = "../../model/sentisead/"
output_dir = os.path.join(output_dir, dataset)
if(not os.path.exists(output_dir)):
print("creating model file %s" % output_dir)
os.makedirs(output_dir)
output_dir = os.path.join(output_dir, "trained_model" + str(fold) + ".h5" )
# model.save_weights(output_dir)
# print("Trained Models output has been saved to " + output_dir)
# if(fold == 2):
# break # break
| python |
# SPDX-License-Identifier: MIT
# Copyright (C) 2021 Max Bachmann
from rapidfuzz.cpp_process import extract, extractOne, extract_iter
try:
from rapidfuzz.cpp_process_cdist import cdist
except ImportError:
def cdist(*args, **kwargs):
raise NotImplementedError("implementation requires numpy to be installed")
| python |
from django.shortcuts import render, redirect
from hujan_ui import maas
from hujan_ui.maas.utils import MAAS
from .forms import VlanForm, VlanEditForm
from django.utils.translation import ugettext_lazy as _
import sweetify
from hujan_ui.maas.exceptions import MAASError
def index(request):
try:
vlans = maas.get_vlans()
except (MAASError, ConnectionError, TimeoutError) as e:
vlans = None
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': 'Vlan List',
'vlans': vlans
}
return render(request, 'maas/vlans/index.html', context)
def add(request):
form = VlanForm(request.POST or None)
if form.is_valid():
try:
m = MAAS()
data = form.clean()
fabId = data['fabric_id']
resp = m.post(f'fabrics/{fabId}/vlans/', data=data)
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Added Successful'), timer=3000)
return redirect('maas:subnets:index')
sweetify.warning(request, _(resp.text), timer=5000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': _('Add Vlan'),
'form': form
}
return render(request, 'maas/vlans/add.html', context)
def edit(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
form = VlanEditForm(request.POST or None, initial=vlan)
if form.is_valid():
m = MAAS()
data = form.clean()
fabId = data['fabric_id']
vid = data['vid']
resp = m.put(f'fabrics/{fabId}/vlans/{vid}/',data=data)
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Updated Successful'), timer=3000)
return redirect('maas:subnets:index')
sweetify.warning(request, _(resp.text), timer=5000)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
context = {
'title': 'Edit Vlan',
'form': form
}
return render(request, 'maas/vlans/add.html', context)
def detail(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
if vlan:
context = {
'title': _('Detail Vlan - {}'.format(vlan['fabric'])),
'vlan': vlan
}
return render(request, 'maas/vlans/detail.html', context)
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
return redirect('maas:vlans:index')
def delete(request, vlan_id):
try:
vlan = maas.get_vlans(vlan_id)
fid = vlan['fabric_id']
vid = vlan['vid']
m = MAAS()
resp = m.delete(f'fabrics/{fid}/vlans/{vid}/')
if resp.status_code in m.ok:
sweetify.success(request, _('Vlan Deleted Successful'), timer=5000)
return redirect('maas:subnets:index')
return redirect('maas:subnets:index')
except (MAASError, ConnectionError, TimeoutError) as e:
sweetify.sweetalert(request, 'Warning', icon='error', text=str(e), button='Ok', timer=5000)
| python |
# Generated by Django 2.1.7 on 2019-03-01 13:53
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('news', '0002_auto_20161125_0846'),
]
operations = [
migrations.AlterModelOptions(
name='news',
options={'ordering': ('pub_date',), 'verbose_name_plural': 'news'},
),
]
| python |
import datetime
import re
import socket
from jwt.exceptions import ExpiredSignatureError, InvalidSignatureError
from mongoengine.errors import (
DoesNotExist,
NotUniqueError,
ValidationError as MongoValidationError,
)
from pymongo.errors import DocumentTooLarge
from thriftpy2.thrift import TException
from tornado.web import HTTPError, RequestHandler
import bg_utils
import bg_utils.mongo.models
import brew_view
from brew_view.authorization import AuthMixin, coalesce_permissions
from brew_view.metrics import http_api_latency_total, request_latency
from brewtils.errors import (
ConflictError,
ModelError,
ModelValidationError,
RequestForbidden,
RequestPublishException,
WaitExceededError,
AuthorizationRequired,
)
from brewtils.models import Event
class BaseHandler(AuthMixin, RequestHandler):
"""Base handler from which all handlers inherit"""
MONGO_ID_PATTERN = r".*/([0-9a-f]{24}).*"
REFRESH_COOKIE_NAME = "refresh_id"
REFRESH_COOKIE_EXP = 14
charset_re = re.compile(r"charset=(.*)$")
error_map = {
MongoValidationError: {"status_code": 400},
ModelError: {"status_code": 400},
bg_utils.bg_thrift.InvalidSystem: {"status_code": 400},
ExpiredSignatureError: {"status_code": 401},
AuthorizationRequired: {"status_code": 401},
RequestForbidden: {"status_code": 403},
InvalidSignatureError: {"status_code": 403},
DoesNotExist: {"status_code": 404, "message": "Resource does not exist"},
WaitExceededError: {"status_code": 408, "message": "Max wait time exceeded"},
ConflictError: {"status_code": 409},
NotUniqueError: {"status_code": 409, "message": "Resource already exists"},
DocumentTooLarge: {"status_code": 413, "message": "Resource too large"},
RequestPublishException: {"status_code": 502},
bg_utils.bg_thrift.BaseException: {
"status_code": 502,
"message": "An error occurred " "on the backend",
},
TException: {"status_code": 503, "message": "Could not connect to Bartender"},
socket.timeout: {"status_code": 504, "message": "Backend request timed out"},
}
def get_refresh_id_from_cookie(self):
token_id = self.get_secure_cookie(self.REFRESH_COOKIE_NAME)
if token_id:
return token_id.decode()
return None
def _get_user_from_cookie(self):
refresh_id = self.get_refresh_id_from_cookie()
if not refresh_id:
return None
token = bg_utils.mongo.models.RefreshToken.objects.get(id=refresh_id)
now = datetime.datetime.utcnow()
if not token or token.expires < now:
return None
principal = token.get_principal()
if not principal:
return None
_, principal.permissions = coalesce_permissions(principal.roles)
token.expires = now + datetime.timedelta(days=self.REFRESH_COOKIE_EXP)
token.save()
return principal
def get_current_user(self):
user = AuthMixin.get_current_user(self)
if not user or user == brew_view.anonymous_principal:
cookie_user = self._get_user_from_cookie()
if cookie_user:
user = cookie_user
return user
def set_default_headers(self):
"""Headers set here will be applied to all responses"""
self.set_header("BG-Version", brew_view.__version__)
if brew_view.config.cors_enabled:
self.set_header("Access-Control-Allow-Origin", "*")
self.set_header("Access-Control-Allow-Headers", "Content-Type")
self.set_header(
"Access-Control-Allow-Methods", "GET, POST, PATCH, DELETE, OPTIONS"
)
@property
def prometheus_endpoint(self):
"""Removes Mongo ID from endpoint."""
to_return = self.request.path.rstrip("/")
for mongo_id in re.findall(self.MONGO_ID_PATTERN, self.request.path):
to_return = to_return.replace(mongo_id, "<ID>")
return to_return
def prepare(self):
"""Called before each verb handler"""
# Used for calculating request handling duration
self.request.created_time = datetime.datetime.utcnow()
# This is used for sending event notifications
self.request.event = Event()
self.request.event_extras = {}
content_type = self.request.headers.get("content-type", "")
if self.request.method.upper() in ["POST", "PATCH"] and content_type:
content_type = content_type.split(";")
self.request.mime_type = content_type[0]
if self.request.mime_type not in [
"application/json",
"application/x-www-form-urlencoded",
]:
raise ModelValidationError("Unsupported or missing content-type header")
# Attempt to parse out the charset and decode the body, default to utf-8
charset = "utf-8"
if len(content_type) > 1:
search_result = self.charset_re.search(content_type[1])
if search_result:
charset = search_result.group(1)
self.request.charset = charset
self.request.decoded_body = self.request.body.decode(charset)
def on_finish(self):
"""Called after a handler completes processing"""
# This is gross, but in some cases we have to do these in the handler
if getattr(self.request, "publish_metrics", True):
http_api_latency_total.labels(
method=self.request.method.upper(),
route=self.prometheus_endpoint,
status=self.get_status(),
).observe(request_latency(self.request.created_time))
if self.request.event.name and getattr(self.request, "publish_event", True):
brew_view.event_publishers.publish_event(
self.request.event, **self.request.event_extras
)
def options(self, *args, **kwargs):
if brew_view.config.cors_enabled:
self.set_status(204)
else:
raise HTTPError(403, reason="CORS is disabled")
def write_error(self, status_code, **kwargs):
"""Transform an exception into a response.
This protects controllers from having to write a lot of the same code over and
over and over. Controllers can, of course, overwrite error handlers and return
their own responses if necessary, but generally, this is where error handling
should occur.
When an exception is handled this function makes two passes through error_map.
The first pass is to see if the exception type can be matched exactly. If there
is no exact type match the second pass will attempt to match using isinstance.
If a message is provided in the error_map it takes precedence over the
exception message.
***NOTE*** Nontrivial inheritance trees will almost definitely break. This is a
BEST EFFORT using a simple isinstance check on an unordered data structure. So
if an exception class has both a parent and a grandparent in the error_map
there is no guarantee about which message / status code will be chosen. The
same applies to exceptions that use multiple inheritance.
***LOGGING***
An exception raised in a controller method will generate logging to the
tornado.application logger that includes a stacktrace. That logging occurs
before this method is invoked. The result of this method will generate logging
to the tornado.access logger as usual. So there is no need to do additional
logging here as the 'real' exception will already have been logged.
:param status_code: a status_code that will be used if no match is found in the
error map
:return: None
"""
code = 0
message = ""
if "exc_info" in kwargs:
typ3 = kwargs["exc_info"][0]
e = kwargs["exc_info"][1]
error_dict = None
if typ3 in self.error_map.keys():
error_dict = self.error_map[typ3]
else:
for error_type in self.error_map.keys():
if isinstance(e, error_type):
error_dict = self.error_map[error_type]
break
if error_dict:
code = error_dict.get("status_code", 500)
message = error_dict.get("message", str(e))
elif brew_view.config.debug_mode:
message = str(e)
code = code or status_code or 500
message = message or (
"Encountered unknown exception. Please check "
"with your System Administrator."
)
self.request.event.error = True
self.request.event.payload = {"message": message}
self.set_header("Content-Type", "application/json; charset=UTF-8")
self.set_status(code)
self.finish({"message": message})
| python |
from data_interface import Dataset, Data_Interface
from utils import functions as ufunc
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
import os
import rasterio as rio
import rasterio.mask as riom
import shapely
from IPython import embed
import sys
sys.path.append('/home/seba/Projects/swisssmartfarming')
rgb_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/rgb/'
'20200626_flight2_blackfly_rgb_transparent_mosaic_group1.tif')
ms_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/nir/'
'20200626_flight2_photonfocus_nir_transparent_reflectance_group1.tif')
masks_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'trees.shp')
boundary_path = ('/media/seba/Samsung_2TB/forest-project/qgis/gubler/shapes/'
'boundary.shp')
dataset = rio.open(rgb_path)
shapefile = gpd.read_file(masks_path)
shapes = shapefile.geometry
# (img_mask, transf_mask) = riom.mask(dataset, shapes)
# img_mask = np.swapaxes(img_mask, 0, 2)
# plt.imshow(img_mask[:,:,0:3])
boundary = gpd.read_file(boundary_path)
tree_masks = gpd.read_file(masks_path)
dataset = Dataset(
name='gubler',
date='20200626',
rgb_path=rgb_path,
ms_path=ms_path,
mask_shapefile=tree_masks,
outer_shapefile=boundary,
rgb_bands_to_read=[0, 1, 2],
ms_bands_to_read=None,
)
dataset = [dataset]
di_train = Data_Interface(dataset, {'tree': 1, 'car': 2})
img, msk = di_train.get_pair()
# plt.imshow(msk)
save_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train'
di_train.save(save_path=save_path)
# x1003_path = '/media/seba/Samsung_2TB/forest-project/qgis/gubler/train/masks/x1003_y1009.png'
# x1003 = ufunc.read_img2array(x1003_path)
| python |
#!/usr/bin/env python3
# Copyright (c) 2022, the Dart project authors. Please see the AUTHORS file
# for details. All rights reserved. Use of this source code is governed by a
# BSD-style license that can be found in the LICENSE file.
# Invoke the `tools/generate_package_config.dart` script.
import os
import os.path
import platform
import subprocess
import sys
USE_PYTHON3 = True
def is_windows():
os_id = platform.system()
return os_id == 'Windows'
def checked_in_sdk_path():
tools_dir = os.path.dirname(os.path.realpath(__file__))
return os.path.join(tools_dir, 'sdks', 'dart-sdk')
def checked_in_sdk_executable():
name = 'dart'
if is_windows():
name = 'dart.exe'
return os.path.join(checked_in_sdk_path(), 'bin', name)
def generate_package_config():
tools_dir = os.path.dirname(os.path.realpath(__file__))
process = subprocess.run([
checked_in_sdk_executable(),
os.path.join(tools_dir, 'generate_package_config.dart')
])
return process.returncode
def Main():
sys.exit(generate_package_config())
if __name__ == '__main__':
Main()
| python |
# -*- coding: UTF-8 -*-
"""
cookie_parser.py
Copyright 2015 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import Cookie
import sys
# Cookie pickling bug is fixed in Python 2.7.9 and Python 3.4.3+
# http://bugs.python.org/issue22775
COOKIE_PICKLES_PROPERLY = (
(sys.version_info[:2] == (2, 7) and sys.version_info >= (2, 7, 9)) or
sys.version_info >= (3, 4, 3)
)
COOKIE_HEADERS = ('set-cookie', 'cookie', 'cookie2')
class SerializableSimpleCookie(Cookie.SimpleCookie):
"""
Had to sub-class in order to be able to correctly serialize cookies
https://code.djangoproject.com/ticket/15863
https://code.djangoproject.com/attachment/ticket/15863/ticket_15863.diff
"""
if not COOKIE_PICKLES_PROPERLY:
def __setitem__(self, key, value):
# Apply the fix from http://bugs.python.org/issue22775 where
# it's not fixed in Python itself
if isinstance(value, Cookie.Morsel):
# allow assignment of constructed Morsels (e.g. for pickling)
dict.__setitem__(self, key, value)
else:
super(SerializableSimpleCookie, self).__setitem__(key, value)
def parse_cookie(cookie_header_value):
"""
Parses the value of a "Set-Cookie" header into a Cookie.SimpleCookie object
:param cookie_header_value: The value of the "Set-Cookie" header
:return: A Cookie.SimpleCookie instance. Might raise exceptions if the
cookie value is not in valid format
"""
cookie_object = SerializableSimpleCookie()
# FIXME: Workaround for bug in Python's Cookie.py
#
# if type(rawdata) == type(""):
# self.__ParseString(rawdata)
#
# Should read "if isinstance(rawdata, basestring)"
cookie_header_value = cookie_header_value.encode('utf-8')
# Note to self: This line may print some chars to the console
cookie_object.load(cookie_header_value)
return cookie_object
| python |
""" The variables submodule.
This module contains symbolic representations of all ARTS workspace variables.
The variables are loaded dynamically when the module is imported, which ensures that they
up to date with the current ARTS build.
TODO: The group names list is redudant w.rt. group_ids.keys(). Should be removed.
Attributes:
group_names([str]): List of strings holding the groups of ARTS WSV variables.
group_ids(dict): Dictionary mapping group names to the group IDs which identify
groups in the ARTS C API.
"""
import ctypes as c
import os
import numpy as np
import re
import scipy as sp
import tempfile
from typhon.arts.workspace.api import arts_api
from typhon.arts.workspace.agendas import Agenda
from typhon.arts.xml.names import tensor_names
class WorkspaceVariable:
"""
The WorkspaceVariable represents ARTS workspace variables in a symbolic way. This
means that they are not associated with a single workspace and therefore do not have a
unique value. Their value in a given workspacecan be accessed, however, using the value()
method.
Attributes:
ws_id(int): The Index variable identifying the variable in the ARTS C API.
name(str): The name of the workspace variable.
group(str): The name of the group this variable belongs to.
description(str): The documentation of the variable as in methods.cc
"""
def __init__(self, ws_id, name, group, description, ws = None):
self.ws_id = ws_id
self.name = name
self.group = group
self.group_id = group_ids[group]
self.description = description
self.ws = ws
self.ndim = None
if self.group == "Vector":
self.ndim = 1
if self.group == "Matrix":
self.ndim = 2
m = re.match(r"^Tensor(\d)$", self.group)
if m:
self.ndim = int(m.group(1))
self.update()
def __getstate__(self):
return self.ws_id, self.name, self.group, \
self.group_id, self.description, self.ndim
def __setstate__(self, state):
self.ws_id, self.name, self.group, self.group_id, self.description,\
self.ndim = state
def __repr__(self):
s = "ARTS Workspace Variable\n\n"
s += "Name: " + self.name + "\n"
s += "Group: " + self.group + "\n\n"
s += self.description
return s
def __str__(self):
return self.__repr__()
def __setattr__(self, name, value):
if name == "value":
if self.ws is None:
raise Exception("Cannot set value of WSV without associated "
" workspace.")
else:
self.ws.__setattr__(self.name, value)
else:
super().__setattr__(name, value)
def print(self):
""" Print variable value using ARTS Print(...) WSM.
Raises:
Exception: If the variable has no associated workspace.
"""
if (self.ws):
self.ws.Print(self, 1)
else:
raise Exception("Can't print variable without associated ARTS workspace.")
@staticmethod
def get_variable_name(i):
"""
Lookup the name of a variable given its workspace index.
Args:
i(int): The index of the workspace variable.
Returns:
str: The name of the workspace variable.
"""
s = arts_api.get_variable(i)
name = s.name.decode("utf8")
return name
@staticmethod
def get_group_id(value):
""" This static method is used to determine how (and if) a given python variable can
be mapped to a ARTS workspace variable group. The returned group id is required to
add the variable to a workspace.
Args:
value(any): The python variable to map to the ARTS group.
Returns:
int: The index of the group which can be used to represent the python variable
or None if the type is not supported.
"""
if isinstance(value, WorkspaceVariable):
return group_ids[value.group]
elif isinstance(value, Agenda):
return group_ids["Agenda"]
elif isinstance(value, np.long):
return group_ids["Index"]
elif isinstance(value, (float, np.double)):
return group_ids["Numeric"]
elif isinstance(value, str):
return group_ids["String"]
elif isinstance(value, np.ndarray):
if value.ndim == 1:
return group_ids["Vector"]
elif value.ndim == 2:
return group_ids["Matrix"]
elif value.ndim == 3:
return group_ids["Tensor3"]
elif value.ndim == 4:
return group_ids["Tensor4"]
elif value.ndim == 5:
return group_ids["Tensor5"]
elif value.ndim == 6:
return group_ids["Tensor6"]
elif value.ndim == 7:
return group_ids["Tensor7"]
else:
raise ValueError(
"Numpy arrays are only supported up to 7 dimensions."
)
elif sp.sparse.issparse(value):
return group_ids["Sparse"]
elif type(value) == list:
group_name = ""
nested_value = value
while type(nested_value) == list and len(nested_value) > 0:
nested_value = nested_value[0]
group_name += "ArrayOf"
if type(nested_value) == list and len(nested_value) == 0:
raise ValueError("Empty lists are currently not handled.")
else:
typename = type(nested_value).__name__
if isinstance(nested_value, str):
group_name += "String"
return group_ids[group_name]
elif isinstance(nested_value, np.long):
group_name += "Index"
return group_ids[group_name]
elif isinstance(nested_value, (float, np.double)):
raise ValueError("Vectors, Matrices or Tensors should be"
" passed as numpy.ndarray and not as"
" lists.")
elif hasattr(nested_value, 'write_xml') and typename in group_names:
return group_ids[group_name + typename]
elif isinstance(nested_value, np.ndarray):
group_name += tensor_names[len(nested_value.shape) - 1]
return group_ids[group_name]
else:
raise ValueError(
f"Nested array with internal type "
f"{type(nested_value)} not supported.")
elif hasattr(value, 'write_xml') and type(value).__name__ in group_names:
return group_ids[type(value).__name__]
else:
raise ValueError(f"Type {type(value)} currently not supported.")
@classmethod
def convert(cls, group, value):
""" Tries to convert a given python object to an object of the python class
representing the given ARTS WSV group.
Args:
group(string): The name of an ARTS WSV group.
group(any): The object to convert
Returns:
(any): The converted object.
"""
if (group == "Index"):
return int(value)
if (group == "String"):
return value
if (group == "ArrayOfString"):
return [str(i) for i in value]
if (group == "Numeric"):
return np.float64(value)
if (group == "Vector"):
return np.array(value, dtype=np.float64, order='C', ndmin=1)
if (group == "Matrix"):
return np.array(value, dtype=np.float64, order='C', ndmin=2)
if (group == "Sparse"):
return sp.sparse.coo_matrix(value)
if (group[:6] == "Tensor"):
dim = int(group[6])
return np.array(value, dtype=np.float64, order='C', ndmin=dim)
if group.startswith("ArrayOf"):
subgroup = group[7:]
if hasattr(value, "__iter__"):
return [cls.convert(subgroup, v) for v in value]
else:
return [cls.convert(subgroup, value)]
return None
@staticmethod
def iter():
"""
Iterator returning a WorkspaceVariable object for each ARTS WSV available.
"""
for i in range(arts_api.get_number_of_variables()):
s = arts_api.get_variable(i)
name = s.name.decode("utf8")
description = s.description.decode("utf")
group = group_names[s.group]
yield WorkspaceVariable(i, name, group, description)
@property
def initialized(self):
ws = self.ws
if ws is None:
raise ValueError("WorkspaceVariable object needs associated"
" Workspace to determine value.")
v = arts_api.get_variable_value(ws.ptr, self.ws_id, self.group_id)
return v.initialized
@property
def value(self):
""" Return the value of the variable in a given workspace.
By default this function will check the value in the workspace associated
with the variable of in the workspace object provided as argument to the
function call. If the variable has an associated workspace the workspace
provided as argument will be ignored.
Returns:
The value of the workspace variable represented by an object of
the corresponding python types.
Raises:
Exception: If the type of the workspace variable is not supported
by the interface.
"""
from typhon.arts.types import classes as typhon_classes
if (self.ws):
ws = self.ws
if not ws:
raise ValueError("WorkspaceVariable object need Workspace to determine value.")
v = arts_api.get_variable_value(ws.ptr, self.ws_id, self.group_id)
if not v.initialized:
raise Exception("WorkspaceVariable " + self.name + " is uninitialized.")
if self.group in typhon_classes:
cls = typhon_classes[self.group]
if hasattr(cls, "__from_variable_value_struct__"):
return cls.__from_variable_value_struct__(v)
if self.group == "Index":
return c.cast(v.ptr, c.POINTER(c.c_long))[0]
elif self.group == "Numeric":
return c.cast(v.ptr, c.POINTER(c.c_double))[0]
elif self.group == "String":
return (c.cast(v.ptr, c.c_char_p)).value.decode("utf8")
elif self.group == "ArrayOfIndex":
return [c.cast(v.ptr, c.POINTER(c.c_long))[i]
for i in range(v.dimensions[0])]
elif self.group == "Sparse":
m = v.dimensions[0]
n = v.dimensions[1]
nnz = v.dimensions[2]
if nnz == 0:
return sp.sparse.csr_matrix(0)
else:
print(m, n, nnz)
data = np.ctypeslib.as_array(c.cast(v.ptr,
c.POINTER(c.c_double)),
(nnz,))
row_indices = np.ctypeslib.as_array(v.inner_ptr, (nnz,))
col_starts = np.ctypeslib.as_array(v.outer_ptr, (m + 1,))
return sp.sparse.csr_matrix((data, row_indices, col_starts),
shape=(m,n))
elif self.group == "Agenda":
return Agenda(v.ptr)
elif self.ndim:
shape = []
size = 1
for i in range(self.ndim):
shape.append(v.dimensions[i])
size *= v.dimensions[i]
if size > 0:
self.__array_interface__ = {"shape" : tuple(shape),
"typestr" : "|f8",
"data" : (v.ptr, False),
"version" : 3}
return np.asarray(self)
else:
return np.zeros(shape)
else:
try:
return self.to_typhon()
except:
raise Exception("Type of workspace variable is not supported "
+ " by the interface.")
def update(self):
""" Update data references of the object.
References to vector, matrices and tensors may change and must therefore
be updated dynamically to ensure they are consistent with the state of
the associated workspace. This method takes care of that.
"""
if not self.ws==None and self.ndim:
v = arts_api.get_variable_value(self.ws.ptr, self.ws_id, self.group_id)
shape = []
for i in range(self.ndim):
shape.append(v.dimensions[i])
self.__array_interface__ = {"shape" : tuple(shape),
"typestr" : "|f8",
"data" : (v.ptr, False),
"version" : 3}
def erase(self):
"""
Erase workspace variable from its associated workspace.
"""
if self.ws:
arts_api.erase_variable(self.ws.ptr, self.ws_id, self.group_id)
self.ws = None
def describe(self):
"""
Print the description of the variable as given in ARTS methods.cc
"""
print(self.description.format())
def to_typhon(self):
"""
Return the value of this variable as a typhon type. This function
writes the value of the variable to a temporary file and reads it
into Python using typhon load function. The purpose of this function
is to access WSV whose groups are not natively supported by the
C API.
Returns:
A typhon object with the same value as the WSV in the associated
workspace.
"""
from typhon.arts.xml import load
if not self.ws:
raise Exception("Cannot retrieve the value of a variable without "
+ " associated Workspace.")
with tempfile.TemporaryDirectory() as tmpdir:
tfile = os.path.join(tmpdir, 'wsv.xml')
self.ws.WriteXML("binary", self, tfile)
v = load(tfile)
return v
def from_typhon(self, var):
"""
Set the value of this WSV in the associated workspace to the given
typhon type. This function writes the value in ASCII format to a
temporary file and reads it into the workspace
Args:
var: The value to which this WSV should be set in the associated
workspace.
"""
from typhon.arts.xml import save
if not self.ws:
raise Exception("Cannot set the value of a variable without "
+ " associated Workspace.")
with tempfile.TemporaryDirectory() as tmpdir:
tfile = os.path.join(tmpdir, 'wsv.xml')
save(var, tfile, format='binary')
self.ws.ReadXML(self, tfile)
# Get ARTS WSV groups
group_names = [arts_api.get_group_name(i).decode("utf8")
for i in range(arts_api.get_number_of_groups())]
group_ids = dict([(id, name) for (name,id) in enumerate(group_names)])
workspace_variables = dict()
for v in WorkspaceVariable.iter():
globals()[v.name] = v
workspace_variables[v.name] = v
| python |
from collections import OrderedDict
from time import time
import unittest
try:
from django.test.runner import DiscoverRunner
except ImportError:
raise("Django 1.8 or 1.9 needs to be installed to use this test runner.")
from .tabulate import tabulate
class Bcolors:
MAGENTA = '\033[95m'
BLUE = '\033[1;94m'
TURQ = '\033[96m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
END = '\033[0m'
def disable(self):
self.HEADER = ''
self.OKBLUE = ''
self.OKGREEN = ''
self.WARNING = ''
self.FAIL = ''
self.ENDC = ''
def get_color(runtime, longest_test):
"""
Returns color based on test time.
Tests under .5s get GREEN
Tests higher than .5 are divided into three segments
slow, painful, agonizing
Yellow, Magenta, Red
"""
if runtime < .5:
return Bcolors.GREEN
segment = ((longest_test - .5) / 3)
runtime -= .5
if runtime <= segment:
return Bcolors.YELLOW
elif runtime <= segment * 2:
return Bcolors.MAGENTA
return Bcolors.RED
class BenchTextTestResult(unittest.TextTestResult):
"""Overrides TextTestRunner to add benchmartk tool"""
def __init__(self, *args, **kwargs):
self.benchmark = kwargs.pop('benchmark')
super(BenchTextTestResult, self).__init__(*args, **kwargs)
self.bench_dict = OrderedDict()
def startTestRun(self):
pass
def write_totals(self, table, class_name, totals):
table.append({
"Test": "---------------------------",
"Runtime": "-------",
"Percent": "-------",
})
table.append({
"Test": "{}{}{}".format(Bcolors.TURQ, class_name, Bcolors.END),
"Runtime": "{0}{1:.5f}{2}".format(
Bcolors.TURQ, totals['runtime'], Bcolors.END
),
"Percent": "{}{:>7.2f}%{}".format(
Bcolors.TURQ, totals['percent'], Bcolors.END)
})
def stopTestRun(self):
if not self.benchmark:
return
total_run_time = 0
longest_test = 0
# Loop through tests to get total run time
for class_name, runtimes in self.bench_dict.items():
runtimes['runtime'] = runtimes['stop'] - runtimes['start']
total_run_time += runtimes['runtime']
longest_test = max(longest_test, runtimes['runtime'])
table = list()
totals = {'runtime': 0, 'percent': 0}
class_name = ''
for full_path, runtimes in self.bench_dict.items():
runtime = runtimes['runtime']
color = get_color(runtime, longest_test)
# Write header/divider for new class
if class_name != runtimes['class_name']:
if totals['runtime'] > 0:
self.write_totals(table, class_name, totals)
totals = {'runtime': 0, 'percent': 0}
class_name = runtimes['class_name']
module = runtimes['module']
table.append({})
table.append({"Test": "{}{}.{}{}".format(
Bcolors.BLUE, module, class_name, Bcolors.END
)})
percent = runtime / total_run_time * 100
totals['runtime'] += runtime
totals['percent'] += percent
table.append({
"Test": ": " + runtimes['test_name'],
"Runtime": "{0}{1:.5f}{2}".format(
color, runtime, Bcolors.END
),
"Percent": "{:>7.2f}%".format(percent)
})
self.write_totals(table, class_name, totals)
self.stream.writeln()
self.stream.writeln()
self.stream.writeln(tabulate(
table,
headers="keys",
aligns=('left', 'right', 'right')
))
def parseTest(self, test):
module = test.__module__
class_name = test.__class__.__name__
test_name = test._testMethodName
uniq = "{}.{}.{}".format(module, class_name, test_name)
return uniq, module, class_name, test_name
def startTest(self, test):
# Run at start of each test method
uniq, module, class_name, test_name = self.parseTest(test)
self.bench_dict[uniq] = {
'start': time(),
'test_name': test_name,
'class_name': class_name,
'module': module,
}
super(BenchTextTestResult, self).startTest(test)
def stopTest(self, test):
uniq, module, class_name, test_name = self.parseTest(test)
super(BenchTextTestResult, self).stopTest(test)
self.bench_dict[uniq]['stop'] = time()
class BenchTextTestRunner(unittest.TextTestRunner):
"""Overrides TextTestRunner to add benchmartk tool"""
resultclass = BenchTextTestResult
def __init__(self, *args, **kwargs):
self.benchmark = kwargs.pop('benchmark')
super(BenchTextTestRunner, self).__init__(*args, **kwargs)
def _makeResult(self):
return self.resultclass(
self.stream, self.descriptions, self.verbosity,
benchmark=self.benchmark
)
class BenchRunner(DiscoverRunner):
test_runner = BenchTextTestRunner
def __init__(self, *args, **kwargs):
super(BenchRunner, self).__init__(*args, **kwargs)
self.benchmark = kwargs.get('benchmark', False)
@classmethod
def add_arguments(cls, parser):
super(BenchRunner, cls).add_arguments(parser)
parser.add_argument('-b', '--benchmark',
action='store_true', dest='benchmark', default=False,
help='Record and display a benchark of the run tests.')
def run_suite(self, suite, **kwargs):
resultclass = self.get_resultclass()
return self.test_runner(
verbosity=self.verbosity,
failfast=self.failfast,
resultclass=resultclass,
benchmark=self.benchmark,
).run(suite)
| python |
#!/usr/bin/env python
# Copyright (C) 2013 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import re
def cssminify(css):
rules = (
(r"\/\*.*?\*\/", ""), # delete comments
(r"\n", ""), # delete new lines
(r"\s+", " "), # change multiple spaces to one space
(r"\s?([;{},~>!])\s?", r"\1"), # delete space where it is not needed
(r":\s", ":"), # delete spaces after colons, but not before. E.g. do not break selectors "a :focus", "b :matches(...)", "c :not(...)" where the leading space is significant
(r"\s?([-+])(?:\s(?![0-9(])(?!var))", r"\1"), # delete whitespace around + and - when not followed by a number, paren, or var(). E.g. strip for selector "a + b" but not "calc(a + b)" which requires spaces.
(r";}", "}") # change ';}' to '}' because the semicolon is not needed
)
css = css.replace("\r\n", "\n")
for rule in rules:
css = re.compile(rule[0], re.MULTILINE | re.UNICODE | re.DOTALL).sub(rule[1], css)
return css
if __name__ == "__main__":
import sys
if sys.version_info[0] == 3 and sys.stdin.encoding != 'UTF-8':
import io
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='UTF-8')
sys.stdout.write(cssminify(sys.stdin.read()))
| python |
import click
import requests
from bs4 import BeautifulSoup
from ....utils.logging import logger
url = "https://www.codechef.com"
def login_web(self):
global codechef_session
codechef_session = self.session
username = click.prompt('username')
password = click.prompt('password', hide_input=True)
login(username, password)
session_data = {
'cookies': codechef_session.cookies
}
logger.debug('returning session data\n %s' % session_data)
return session_data
def login(username, password):
login_url = url+"/"
login_page = codechef_session.get(login_url)
form_feilds = BeautifulSoup(login_page.text, "html.parser").findAll("input")
form_data = {"pass": password,
"name": username}
for i in form_feilds:
attrs = i.attrs
if "name" in attrs:
if "value" in attrs and attrs["value"]:
form_data[attrs["name"]] = attrs["value"]
try:
logged_page = codechef_session.post(login_url, form_data)
except BaseException:
raise
else:
# logout all other sessions as codechef doesn't allow multiple sessions
if("session/limit" in logged_page.url):
click.confirm("Session limit exceeded\n" +
"Do you want to logout of other sessions",
default=True, abort=True)
logger.info("logging you out of all other sessions\n" +
"this may take some time...")
if "session/limit" in logged_page.url:
logout_other_session()
# codechef doesn't check cookies and trivially displays
# the latest as current session
# handle this using modifying logout_other_session by
# logging out after checking session cookies
# and matching with form data. trivially the following solution works
logged_page = codechef_session.post(url, form_data)
if len(
BeautifulSoup(
logged_page.text,
"html.parser").findAll("input")) > 0 and is_logged_in():
click.confirm(
"You are/have tried to login to codechef while" +
"the script was running\nDo you want to try login again?",
default=True,
abort=True)
login(username, password)
else:
if(is_logged_in()):
return
else:
raise Exception("credential_error")
def logout_other_session():
global codechef_session
sess_url = url+"/session/limit"
try:
session_page = codechef_session.get(sess_url)
except BaseException:
raise
form_feilds = BeautifulSoup(
session_page.text,
"html.parser").findAll("input")
form_data = {}
logger.debug(form_feilds)
for j in range(len(form_feilds)-5):
i = form_feilds[j]
attrs = i.attrs
if "name" in attrs:
if "value" in attrs and attrs["value"]:
form_data[attrs["name"]] = attrs["value"]
for j in [-1, -2, -3, -4]:
i = form_feilds[j]
attrs = i.attrs
if "name" in attrs:
if "value" in attrs and attrs["value"]:
form_data[attrs["name"]] = attrs["value"]
try:
# no need to assign to a variable
logger.debug(form_data)
codechef_session.post(sess_url, data=form_data)
except BaseException:
raise
def is_logged_in():
global codechef_session
user_url = "https://www.codechef.com/api/user/me"
try:
page = codechef_session.get(user_url).json()
except BaseException:
return None
if(not page["user"]["username"]):
return False
else:
return True
| python |
import socket, argparse, termcolor, threading
open_ports = []
def get_open_ports(host, ports):
global open_ports
for port in ports:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(0.5)
s.connect((host, port))
open_ports.append(port)
print(f"{termcolor.colored('[+] Open:', 'green')} {port}")
s.close()
except:
pass
return open_ports
def divide_chunks(l, n):
for i in range(0, len(l), n):
yield l[i:i + n]
parser = argparse.ArgumentParser()
parser.add_argument("-t", "--threads", help="Number of threads", type=int, default=10)
parser.add_argument("-p", "--ports", help="Ports to scan", type=list, default=range(1, 65536))
parser.add_argument("-i", "--ip", help="IP to scan", type=str, default="", required=True)
args = parser.parse_args()
host = args.ip
ports = args.ports
threads = args.threads
print(f"[+] Scanning {host}")
chunks = list(divide_chunks(ports, len(ports)//threads+1))
for i in range(threads):
t = threading.Thread(target=get_open_ports, args=(host, chunks[i]))
t.start()
t.join(0.1)
| python |
from urllib.parse import ParseResult, urlencode, urlparse
class URLUtility(object):
"""Contains different helper methods simplifying URL construction."""
@staticmethod
def build_url(base_url, query_parameters):
"""Construct a URL with specified query parameters.
:param base_url: Base URL
:type base_url: str
:param query_parameters: Dictionary containing query parameters
:type query_parameters: Dict
:return: Constructed URL
:rtype: str
"""
result = urlparse(base_url)
result = ParseResult(
result.scheme,
result.netloc,
result.path,
result.params,
urlencode(query_parameters),
result.fragment,
)
return result.geturl()
| python |
import sys
from queue import PriorityQueue
from utils import Point
import numpy as np
class Astar(object):
def __init__(self):
self.N = 0
self.V = []
self.E = []
self.closed = set([])
def goalTest(self, u):
return u == self.N - 1
def moveGen(self, u):
return zip(range(self.N), self.E[u])
def f(self, u):
return self.g(u) + self.h2(u)
def g(self, u):
return self.V[u].value
def h1(self, u):
return 10*self.V[u].distance(self.V[-1])
def h2 (self, u):
return np.exp(- self.V[u].distance(self.V[-1]) )
def h3(self, u):
return self.V[u].distance(self.V[-1])
def takeInput(self, filename):
with open(filename, "r") as file:
self.N = int( next(file).rstrip() )
readLine = lambda x: list(map(int, x.rstrip().split()))
self.V = [ Point( *readLine(next(file)) ) for i in range(self.N) ]
self.E = [ readLine(next(file)) for i in range(self.N) ]
def reconstructPath(self):
v = self.N - 1
path = []
while v is not None:
path.append(v)
v = self.V[v].parent
path.reverse()
cost = sum(self.E[path[i-1]][path[i]] for i in range(1, len(path)))
return cost, path
def propagateImprovement(self, u):
for v, w in self.moveGen(u):
if w != 0:
newVal = self.g(u) + w
if newVal < self.g(v):
self.V[v].parent = u
self.V[v].value = newVal
if(v in self.closed):
self.propagateImprovement(v)
def getShortestPath(self):
"""
calculate the shortest path from vertex 0 and N-1
returns cost, path
g(u): path length from 0 to u
h1(u): euclidean distance from u to goal
f(u) = g(u) + h1(u), used as p in priority queue
"""
Q = PriorityQueue() # implemented with lazy update
self.V[0].value = 0
Q.put( (self.f(0), 0) )
self.closed = set([0])
while not Q.empty():
f, u = Q.get()
if self.goalTest(u):
return self.reconstructPath()
self.closed.add(u)
for v, w in self.moveGen(u):
if w != 0 and v not in self.closed:
# add to queue only if this reduces the path length
newValue = self.g(u) + w
if newValue < self.g(v):
self.V[v].value = newValue
self.V[v].parent = u
Q.put( (self.f(v), v) )
if w != 0 and v in self.closed:
newValue = self.g(u) + w
if newValue < self.g(v):
self.V[v].parent = u
self.V[v].value = newValue
self.propagateImprovement(v)
def testPrint(self, filename):
self.takeInput(filename)
cost, path = self.getShortestPath()
print(cost)
print(*path, sep=" ")
A = Astar()
A.testPrint(sys.argv[1]) | python |
import os
import re
import tempfile
import subprocess
import typing
from typing import Any
import logging
from rever.tools import replace_in_file
from conda_forge_tick.xonsh_utils import indir
from conda_forge_tick.utils import eval_cmd
from conda_forge_tick.recipe_parser import CondaMetaYAML
from conda_forge_tick.migrators.core import (
MiniMigrator,
_get_source_code,
)
LOGGER = logging.getLogger("conda_forge_tick.migrators.cross_compile")
class UpdateConfigSubGuessMigrator(MiniMigrator):
post_migration = True
def filter(self, attrs: "AttrsTypedDict", not_bad_str_start: str = "") -> bool:
build_reqs = attrs.get("requirements", {}).get("build", set())
needed = False
for compiler in [
"fortran_compiler_stub",
"c_compiler_stub",
"cxx_compiler_stub",
]:
if compiler in build_reqs:
needed = True
break
return not needed
def migrate(self, recipe_dir: str, attrs: "AttrsTypedDict", **kwargs: Any) -> None:
cb_work_dir = _get_source_code(recipe_dir)
if cb_work_dir is None:
return
directories = set()
with indir(cb_work_dir):
for dp, dn, fn in os.walk("."):
for f in fn:
if f != "config.sub":
continue
if os.path.exists(os.path.join(dp, "config.guess")):
directories.add(dp)
if not directories:
return
with indir(recipe_dir):
if not os.path.exists("build.sh"):
return
with open("build.sh", "r") as f:
lines = list(f.readlines())
insert_at = 0
if lines[0].startswith("#"):
insert_at = 1
for d in directories:
lines.insert(
insert_at,
f"cp $BUILD_PREFIX/share/libtool/build-aux/config.* {d}\n",
)
lines.insert(insert_at, "# Get an updated config.sub and config.guess\n")
with open("build.sh", "w") as f:
f.write("".join(lines))
with open("meta.yaml") as f:
lines = f.splitlines()
for i, line in enumerate(lines):
if line.strip().startswith("- {{ compiler"):
new_line = " " * (len(line) - len(line.lstrip()))
new_line += "- libtool # [unix]\n"
lines.insert(i, new_line)
break
with open("meta.yaml", "w") as f:
f.write("".join(lines))
| python |
from argparse import ArgumentParser
from functools import partial
from traceback import StackSummary
import asyncio
import enum
import logging
import ssl
import time
import os
from stem import CircStatus # type: ignore
from stem.control import Controller, EventType # type: ignore
from stem.response.events import CircuitEvent, FFMeasEvent # type: ignore
from transitions import Machine # type: ignore
from typing import Tuple, Union, Set, Dict
from .. import tor_client
from .. import msg
from ..tor_ctrl_msg import MeasrStartMeas
class CoordProtocol(asyncio.Protocol):
transport = None
def connection_made(self, transport):
log.debug('Connected to coord')
self.transport = transport
def connection_lost(self, exc):
machine.change_state_nonfatal_error('Lost connection with coord')
pass
def data_received(self, data: bytes):
''' Receive data from the coordinator. Parse it into a FFMsg and tell
other code about the message.
It's possible that this is called before the entire message is
received. In that case, we'll need to edit this function to buffer
bytes until the entire message has arrived. '''
log.info('Received %d bytes: %s', len(data), data)
m = msg.FFMsg.deserialize(data)
machine.notif_coord_msg(m)
class Measurement:
''' State related to a single measurement. '''
#: keep a copy of :class:`flashflow.msg.ConnectToRelay` command so we can
#: send it back to the coord when we're ready to go (or have failed)
connect_msg: msg.ConnectToRelay
#: Our circuit ids with the relay. Filled in once we know what they are
#: (they're launched) but not yet bullt
circs: Set[int]
#: Our built circuit ids with the relay. Filled in as we learn of launched
#: circuits becoming built.
ready_circs: Set[int]
#: Our circuit ids that we've been told have CLOSED or FAILED at any point
bad_circs: Set[int]
def __init__(self, connect_msg: msg.ConnectToRelay):
self.connect_msg = connect_msg
self.circs = set()
self.ready_circs = set()
self.bad_circs = set()
@property
def meas_id(self) -> int:
''' The measurement ID '''
return self.connect_msg.meas_id
@property
def relay_fp(self) -> str:
''' The fingerprint of the relay to measure '''
return self.connect_msg.fp
@property
def meas_duration(self) -> int:
''' The duration, in seconds, that active measurement should last. '''
return self.connect_msg.dur
@property
def waiting_circs(self) -> Set[int]:
''' Circs that we have LAUNCHED but have not yet added to ready_circs
because we haven't seen BUILT yet.
Note that as far as this function is concerned, there's no such thing
as a circuit becoming un-BUILT. This functiion doesn't know anything
about circuits closing. Other code needs to manipulate circs and
ready_circs as it deems fit.
'''
return self.circs - self.ready_circs
class States(enum.Enum):
''' States that we, as a FlashFlow measurer, can be in. '''
#: State in which we are created and to which we return when there's a
#: non-fatal error
START = enum.auto()
#: First "real" state. Launch a tor client and connect to it.
ENSURE_CONN_W_TOR = enum.auto()
#: Second real state. Connect to the coordinator.
ENSURE_CONN_W_COORD = enum.auto()
#: Normal state. We're doing measurements or waiting to be told to do them.
#: We are usually here.
READY = enum.auto()
#: There was some sort of error that calls for cleaning everything up and
#: essentially relaunching, but we shouldn't outright die.
NONFATAL_ERROR = enum.auto()
#: There is a serious error that isn't recoverable. Just cleanup and die.
FATAL_ERROR = enum.auto()
class StateMachine(Machine):
''' State machine and main control flow hub for FlashFlow measurer.
change_state_*:
State transitions are named change_state_* and don't exist here in the
code. The Machine class takes care of making them based on the triggers
in the list of possible transitions. For example: change_state_starting
is named as the trigger for transitions from either START or
NONFATAL_ERROR into ENSURE_CONN_W_TOR.
on_enter_*:
This is how the Machine class finds functions to call upon entering the
given state. For example, on_enter_NONFATAL_ERROR() is called when we
are transitioning to the NONFATAL_ERROR state. These functions should
be kept short. Significant work/logic should be done in other functions
that these call or schedule for calling later.
_*:
Other internal functions. See their documentation for more information
on them.
'''
# conf # This is set in __init__
tor_client: Controller
# how we communicate with the coord
coord_trans: asyncio.WriteTransport
coord_proto: CoordProtocol
measurements: Dict[int, Measurement]
def __init__(self, conf):
self.conf = conf
self.measurements = {}
super().__init__(
model=self,
states=States,
transitions=[
{
'trigger': 'change_state_starting',
'source': [States.START, States.NONFATAL_ERROR],
'dest': States.ENSURE_CONN_W_TOR,
},
{
'trigger': 'change_state_connected_to_tor',
'source': States.ENSURE_CONN_W_TOR,
'dest': States.ENSURE_CONN_W_COORD,
},
{
'trigger': 'change_state_connected_to_coord',
'source': States.ENSURE_CONN_W_COORD,
'dest': States.READY,
},
{
'trigger': 'change_state_nonfatal_error',
'source': '*',
'dest': States.NONFATAL_ERROR,
},
{
'trigger': 'change_state_fatal_error',
'source': '*',
'dest': States.FATAL_ERROR,
},
],
initial=States.START,
# Do not create .to_<state>() methods, which allow transition to
# <state> regardless of current state
auto_transitions=False,
)
def _ensure_conn_w_tor(self):
''' Main function in the ENSURE_CONN_W_TOR state. Launch a tor client
and connect to it. Save the Controller object. '''
assert self.state == States.ENSURE_CONN_W_TOR
# TODO: what happens if tor client disappears? Exception thrown? What??
# And what should we do about it? Try to relaunch? Just die? Choose
# **something**
c = tor_client.launch(
self.conf.getpath('tor', 'tor_bin'),
self.conf.getpath('measurer', 'tor_datadir'),
self.conf.get('tor', 'torrc_extra_lines')
)
if not c:
log.error('Unable to launch and connect to tor client')
self.change_state_fatal_error()
return
c.add_event_listener(self.notif_circ_event, EventType.CIRC)
c.add_event_listener(self.notif_ffmeas_event, EventType.FF_MEAS)
self.tor_client = c
self.change_state_connected_to_tor()
def _ensure_conn_w_coord(self, delay: float):
''' Main function in the ENSURE_CONN_W_COORD state. Repeatedly try
connecting to the coordinator until we are successful or have a fatal
error warranting completely giving up on life.
This function uses asynchronous python: the connection is represented
by a transport and protocol, and we try connecting asynchronously and
use a callback to find out the result. That said, the work done here
should probably be the only thing going on.
'''
assert self.state == States.ENSURE_CONN_W_COORD
# TODO: what if connection goes away?
# Get the (host, port) from "host:port"
coord_addr_port = self.conf.getaddr('measurer', 'coord_addr')
if coord_addr_port is None:
log.error('Don\'t know where coord is')
self.change_state_fatal_error()
return
# Callback to get the result of one connection attempt. If it didn't
# work and it wasn't fatal, schedule calling this function again some
# time in the future. If fatal, die. If successful, save the transport
# and protocol and move on!
def cb(fut):
nonlocal delay
# It's possible that the programmer didn't catch all exceptions.
# If the result is an exception, this *should* bubble up to the
# default exception handler, _exception_handler(...).
success_code, stuff_or_error = fut.result()
# Now check if we were successful, fatally unable to connect, or if
# we should retry.
if success_code == CoordConnRes.FATAL_ERROR:
log.error(
'Fatal error connecting to coordinator: %s',
stuff_or_error)
self.change_state_fatal_error()
return
elif success_code == CoordConnRes.RETRY_ERROR:
delay = min(2 * delay, 60)
log.warn(
'Unable to connect to coordinator: %s. Retrying in %.2fs.',
stuff_or_error, delay)
loop.call_later(
delay, partial(self._ensure_conn_w_coord, delay))
return
assert success_code == CoordConnRes.SUCCESS
assert not isinstance(stuff_or_error, str)
self.coord_trans, self.coord_proto = stuff_or_error
self.change_state_connected_to_coord()
# Kick off the asyncronous attempt to connect and attach the above
# callback so we can get the result.
task = asyncio.Task(_try_connect_to_coord(
coord_addr_port,
self.conf.getpath('measurer', 'key'),
self.conf.getpath('measurer', 'coord_cert'),
))
task.add_done_callback(cb)
# This is asynchronous python. We end immediately and the callback will
# eventually be called with the connection results. Nothing left to do
# for now.
def _complete_cleanup(self):
''' Cleanup all of our state while being very careful to not allow any
exceptions to bubble up. Use this when in an error state and you want
to cleanup before starting over or just dying. '''
if hasattr(self, 'tor_client') and self.tor_client:
log.info('cleanup: closing tor')
try:
self.tor_client.close()
except Exception as e:
log.error('Error closing tor: %s', e)
if hasattr(self, 'coord_trans') and self.coord_trans:
log.info('cleanup: closing coord transport')
try:
self.coord_trans.close()
except Exception as e:
log.error('Error closing transport with coord: %s', e)
if hasattr(self, 'coord_proto') and self.coord_proto:
# nothing to do
pass
if hasattr(self, 'measurements') and self.measurements:
log.info(
'cleanup: forgetting about %d measurements',
len(self.measurements))
self.measurements = {}
def _die(self):
''' End execution of the program. '''
loop.stop()
# ########################################################################
# STATE CHANGE EVENTS. These are called when entering the specified state.
# ########################################################################
def on_enter_READY(self):
pass
def on_enter_ENSURE_CONN_W_TOR(self):
loop.call_soon(self._ensure_conn_w_tor)
def on_enter_ENSURE_CONN_W_COORD(self):
loop.call_soon(partial(self._ensure_conn_w_coord, 0.5))
def on_enter_NONFATAL_ERROR(self, err_msg: str):
log.error('nonfatal error: %s', err_msg)
loop.call_soon(self._complete_cleanup)
loop.call_soon(self.change_state_starting)
def on_enter_FATAL_ERROR(self):
# log.error('We encountered a fatal error :(')
self._complete_cleanup()
self._die()
# ########################################################################
# MESSAGES FROM COORD. These are called when the coordinator tells us
# something.
# ########################################################################
def notif_coord_msg(self, message: msg.FFMsg):
msg_type = type(message)
if self.state != States.READY:
log.warn(
'Coord sent us message but we are not ready. Dropping. %s',
message)
return
# The asserts below are for shutting up mypy
if msg_type == msg.ConnectToRelay:
assert isinstance(message, msg.ConnectToRelay)
return self._notif_coord_msg_ConnectToRelay(message)
elif msg_type == msg.Failure:
assert isinstance(message, msg.Failure)
return self._notif_coord_msg_Failure(message)
elif msg_type == msg.Go:
assert isinstance(message, msg.Go)
return self._notif_coord_msg_Go(message)
log.warn(
'Unexpected/unhandled %s message. Dropping. %s',
msg_type, message)
def _notif_coord_msg_ConnectToRelay(self, message: msg.ConnectToRelay):
# caller should have verified and logged about this already
assert self.state == States.READY
meas_id = message.meas_id
if meas_id in self.measurements:
fail_msg = msg.Failure(msg.FailCode.M_DUPE_MEAS_ID, meas_id)
log.error(fail_msg)
self.coord_trans.write(fail_msg.serialize())
return
meas = Measurement(message)
ret = tor_client.send_msg(
self.tor_client,
MeasrStartMeas(
meas.meas_id, meas.relay_fp, message.n_circs,
meas.meas_duration))
# Make sure the circuit launches went well. Note they aren't built yet.
# It's just that tor found nothing obviously wrong with trying to build
# these circuits.
if not ret.is_ok():
fail_msg = msg.Failure(
msg.FailCode.LAUNCH_CIRCS, meas_id,
extra_info=str(ret))
log.error(fail_msg)
self.coord_trans.write(fail_msg.serialize())
return
# We expect to see "250 FF_MEAS 0 LAUNCHED CIRCS=1,2,3,4,5", where the
# 0 is the measurement ID we told the tor client, and the actual list
# of launched circuits is CIRCS the comma-separated list
code, _, content = ret.content()[0]
# Already checked this above with ret.is_ok()
assert code == '250'
parts = content.split()
if len(parts) != 4 or \
not parts[0] == 'FF_MEAS' or \
not parts[2] == 'LAUNCHED' or \
not parts[3].startswith('CIRCS='):
fail_msg = msg.Failure(
msg.FailCode.MALFORMED_TOR_RESP, meas_id,
extra_info=str(ret))
log.error(fail_msg)
self.coord_trans.write(fail_msg.serialize())
return
meas.circs.update({
int(circ_id_str) for circ_id_str in
parts[3].split('=')[1].split(',')
})
log.info(
'Launched %d circuits with relay %s: %s', len(meas.circs),
meas.relay_fp, meas.circs)
self.measurements[meas_id] = meas
# That's all for now. We stay in this state until Tor tells us it has
# finished building all circuits
def _notif_coord_msg_Go(self, go_msg: msg.Go):
# caller should have verified and logged about this already
assert self.state == States.READY
meas_id = go_msg.meas_id
if meas_id not in self.measurements:
fail_msg = msg.Failure(msg.FailCode.M_UNKNOWN_MEAS_ID, meas_id)
log.error(fail_msg)
self.coord_trans.write(fail_msg.serialize())
# TODO: cleanup Measurement
return
meas = self.measurements[meas_id]
start_msg = MeasrStartMeas(
meas.meas_id, meas.relay_fp, len(meas.ready_circs),
meas.meas_duration)
ret = tor_client.send_msg(self.tor_client, start_msg)
if not ret.is_ok():
fail_msg = msg.Failure(msg.FailCode.M_START_ACTIVE_MEAS, meas_id)
log.error(fail_msg)
self.coord_trans.write(fail_msg.serialize())
# TODO: cleanup Measurement
return
# ########################################################################
# MISC EVENTS. These are called from other parts of the measr code.
# ########################################################################
def notif_ffmeas_event(self, event: FFMeasEvent):
''' Called from stem to tell us about FF_MEAS events.
These events come from a different thread. We tell the main thread's
loop (in a threadsafe manner) to handle this event in the similarly
named function with a leading underscore.
'''
loop.call_soon_threadsafe(partial(self._notif_ffmeas_event, event))
def _notif_ffmeas_event(self, event: FFMeasEvent):
''' Actually handle the FF_MEAS event.
We look for:
- per-second BW_REPORTs of the amount of measurement traffic sent and
received, and we will fowarded those on to the coordinator.
- a END message at the end signally success.
'''
if event.ffmeas_type == 'BW_REPORT':
log.debug(
'Forwarding report of %d/%d sent/recv meas bytes',
event.sent, event.recv)
report = msg.BwReport(
event.meas_id, time.time(), event.sent, event.recv)
self.coord_trans.write(report.serialize())
return
elif event.ffmeas_type == 'END':
log.info(
'Tor client tells us meas %d finished %ssuccessfully%s',
event.meas_id, '' if event.success else 'un',
'. Cleaning up.' if event.meas_id in self.measurements else
', but we don\'t know about it. Dropping.')
if event.meas_id not in self.measurements:
return
del self.measurements[event.meas_id]
return
log.warn(
'Unexpected FF_MEAS event type %s. Dropping.', event.ffmeas_type)
return
def notif_circ_event(self, event: CircuitEvent):
''' Called from stem to tell us about circuit events.
These events come from a different thread. We tell the main thread's
loop (in a threadsafe manner) to handle this event in the similarly
named function with a leading underscore.
'''
loop.call_soon_threadsafe(partial(self._notif_circ_event, event))
def _notif_circ_event(self, event: CircuitEvent):
''' Actually handle the circuit event. We usually don't care, but
sometimes we are waiting on circuits to be built with a relay.
This runs in the main thread's loop unlike the similarly named function
(without a leading underscore) that tells the loop to call us.
'''
circ_id = int(event.id)
# We don't care about anything unless we're in the main state where we
# do measurements
if self.state != States.READY:
return
# Make sure it's a circuit we care about
all_circs: Set[int] = set.union(
# in case there's no measurements, add empty set to avoid errors
set(),
*[meas.circs for meas in self.measurements.values()])
waiting_circs: Set[int] = set.union(
# in case there's no measurements, add empty set to avoid errors
set(),
*[meas.waiting_circs for meas in self.measurements.values()])
if circ_id not in all_circs:
# log.warn(
# 'Ignoring CIRC event not for us. %d not in any '
# 'measurement\'s set of all circuits',
# circ_id)
return
# Act based on the type of CIRC event
if event.status == CircStatus.BUILT:
if circ_id not in waiting_circs:
log.warn(
'CIRC BUILT event for circ %d we do care about but that '
'isn\'t waiting. Shouldn\'t be possible. %s. Ignoring.',
circ_id, event)
return
# Tell all interested Measurements (should just be one, but do all
# that claim to care about this circuit, just in case) that the
# circuit is built
for meas in self.measurements.values():
if circ_id not in meas.circs:
continue
meas.ready_circs.add(circ_id)
log.debug(
'Circ %d added to meas %d\'s built circs. Now '
'have %d/%d', circ_id, meas.meas_id,
len(meas.ready_circs), len(meas.circs))
# If all are built, then tell coord this measurement is ready
if len(meas.ready_circs) < len(meas.circs):
continue
log.info('Meas %d built all circs', meas.meas_id)
self.coord_trans.write(msg.ConnectedToRelay(
meas.connect_msg).serialize())
return
elif event.status in [CircStatus.LAUNCHED, CircStatus.EXTENDED]:
# ignore these
return
elif event.status in [CircStatus.CLOSED, CircStatus.FAILED]:
# Tell all interested Measurements (should just be one, but do all
# that claim to care about this circuit, just in case) that the
# circuit has closed or failed
for meas in self.measurements.values():
if circ_id not in meas.circs:
continue
meas.bad_circs.add(circ_id)
log.info(
'Meas %d\'s circ %d is now closed/failed: %s',
meas.meas_id, circ_id, event)
return
# It's for us, but don't know how to handle it yet
log.warn('Not handling CIRC event for us: %s', event)
class CoordConnRes(enum.Enum):
''' Part of the return value of :meth:`_try_connect_to_coord`. '''
#: We successfully connected to the coord, shook our TLS hands, and all is
#: well.
SUCCESS = enum.auto()
#: We were not successful, but whatever happened may be temporary and it's
#: logical to try connecting again in the future.
RETRY_ERROR = enum.auto()
#: We were not successful, and trying again in the future is extremely
#: unlikely to be successful. We should give up.
FATAL_ERROR = enum.auto()
async def _try_connect_to_coord(
addr_port: Tuple[str, int],
our_key: str,
coord_cert: str,
) -> Tuple[
CoordConnRes, Union[
str, Tuple[asyncio.BaseTransport, asyncio.BaseProtocol]]]:
''' Try to connect to the coordinator at the given (host, port) tuple.
Perform the TLS handshake using our client TLS key in the file `our_key`
and only trusting the coord server cert in the file `coord_cert`.
Returns a tuple in all cases. The first item indicates success with
CoordConnRes. If it is an *_ERROR, then the second item is a string with
more details. If it is SUCCESS, then the second item is the transport and
protocol with the coordinator.
This function is a coroutine and all exceptions **should** be handled
within this function's body. If they aren't, that's a programming error.
To handle the case of unhandled exceptions, wrap this function in a
Task/Future, then catch and handle the generic Exception.
def cb(fut):
# handle the completion of the Task, whether successful or not
pass
task = asyncio.Task(_try_connect_to_coord(...))
task.add_done_callback(cb)
try:
result = task.result()
except Exception as e:
log.error(
'An unhandled exception occurred. Tell your programmer: %s', e)
# Additional code to handle the error, as necessary
'''
if not os.path.isfile(our_key):
return CoordConnRes.FATAL_ERROR, our_key + ' does not exist'
if not os.path.isfile(coord_cert):
return CoordConnRes.FATAL_ERROR, coord_cert + ' does not exist'
ssl_context = ssl.SSLContext()
# Load our TLS private key and certificate
ssl_context.load_cert_chain(our_key)
# Load the certificate of the coord
ssl_context.load_verify_locations(coord_cert)
ssl_context.verify_mode = ssl.CERT_REQUIRED
try:
res = await loop.create_connection(
CoordProtocol,
addr_port[0],
addr_port[1],
ssl=ssl_context,
)
except OSError as e:
return CoordConnRes.RETRY_ERROR, str(e)
return CoordConnRes.SUCCESS, res
def _exception_handler(loop, context):
log.error('%s', context['message'])
if 'exception' in context:
log.error(context['exception'])
if 'handle' in context:
log.error(context['handle'])
if 'source_traceback' in context:
log.error('Traceback:')
summary = StackSummary.from_list(context['source_traceback'])
for line_super in summary.format():
# The above line has multiple lines in it
for line in line_super.split('\n'):
if len(line):
log.error(' %s', line)
else:
log.error('Traceback not available. Run with PYTHONASYNCIODEBUG=1')
machine.change_state_fatal_error()
# # Not sure if this would actually work here. Maybe add to the logging config
# # file?
# # https://docs.python.org/3.6/library/asyncio-dev.html#logging
# logging.getLogger('asyncio').setLevel(logging.WARNING)
log = logging.getLogger(__name__)
loop = asyncio.get_event_loop()
machine: StateMachine
def gen_parser(sub) -> ArgumentParser:
''' Add the cmd line options for this FlashFlow command '''
d = 'Run as a FlashFlow measurer.'
p = sub.add_parser('measurer', description=d)
return p
# This function needs **some sort** of type annotation so that mypy will check
# the things it does. Adding the return value (e.g. '-> None') is enough
def main(args, conf) -> None:
global machine
os.makedirs(conf.getpath('measurer', 'datadir'), mode=0o700, exist_ok=True)
os.makedirs(conf.getpath('measurer', 'keydir'), mode=0o700, exist_ok=True)
machine = StateMachine(conf)
loop.set_exception_handler(_exception_handler)
loop.call_soon(machine.change_state_starting)
try:
loop.run_forever()
finally:
loop.run_until_complete(loop.shutdown_asyncgens())
loop.close()
return
| python |
from collections import Counter
def read_sequence(datapath):
protein_sequence = []
cleavage_site = []
# Loop condition conveniently discards the description lines
with open(datapath, 'r') as f:
while f.readline() is not '':
# Slicing with :-1 to discard "\n" character
protein_sequence.append(f.readline()[:-1])
cleavage_site.append(f.readline()[:-1])
return protein_sequence, cleavage_site
def return_alphabet(sequence_list):
# Returns the alphabet present in sequence_list. Useful for dimension minimality.
alphabet = Counter()
for seq in sequence_list:
for letter in seq:
alphabet[letter] += 1
alphabet = sorted(list(alphabet))
return alphabet
def return_cleavpos(cleavage_list):
# Returns a list with the position of the cleavage point for each sequence in cleavage_list.
position_list = [0] * len(cleavage_list)
cont = 0
for seq in cleavage_list:
# Index is found using binary search.
start = 0
end = len(seq)
index = int((end + start) / 2)
while seq[index] is not 'C':
if seq[index] == 'S':
start = index
else:
end = index
index = int((end + start) / 2)
position_list[cont] = index
cont += 1
return position_list
def all_subsequences(sequence, p, q):
n = len(sequence)
subseq_list = []
i = 0
while i < n - p - q:
subseq_list.append(sequence[i:i + p + q])
i += 1
return subseq_list
if __name__ == "__main__":
# Functionality testing
data_path = "/Users/bernardoveronese/Documents/INF442/INF442_Project2/Datasets/"
data_file = "EUKSIG_13.red.txt"
seq, cleav = read_sequence(data_path + data_file)
arr = return_cleavpos(cleav)
print(arr)
alphabet = return_alphabet(seq)
print(alphabet)
print(dim)
| python |
리스트 = [100,200,300]
for i in 리스트:
print(i+10)
menu = ["김밥","라면","튀김"]
for i in menu:
print("오늘의 메뉴:", i)
리스트 = ["하이닉스","삼성전자","LG전자"]
for i in 리스트:
print(len(i))
리스트 = ['dog','cat', 'parrot']
for i in 리스트:
print(i[0])
리스트 = [1,2,3]
for i in 리스트:
print("3 x ", i)
리스트 = [1,2,3]
for i in 리스트:
print("3 x ", i, "=", i*3)
리스트 = ['가','나','다','라']
for i in 리스트[1:]:
print(i)
리스트 = ['가','나','다','라']
for i in 리스트[::2]:
print(i)
리스트 = ['가','나','다','라']
for i in 리스트[::-1]:
print(i) | python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 17 09:17:38 2021
@author: maxmhuggins
"""
import matplotlib.pyplot as plt
PV_i = 640
k = .25
time = range(0, 24)
PV = [PV_i]
for i in range(0, len(time)-1):
PV.append(PV[i-1]*k+PV[i-1])
plt.plot(time, PV)
print('Final value: %.2f' % PV[-1])
| python |
#! python3
# imageSiteDownloader.py
'''
Write a program that goes to a photo-sharing site like Flickr or Imgur,
searches for a category of photos, and then downloads all the resulting
images. You could write a program that works with any photo site that has
a search feature.
'''
import requests, bs4, os, pprint, re
os.chdir('C:\\Users\\Mack W\\Documents\\Python\\automateTheBoringStuffWithPython\\Chapter 11 Web Scraping\\Practice Projects')
url = re.compile(r'c1.*\.jpg')
# Which image site
print('Website: ', end='')
website = input().lower()
# Which category
print('Search: ', end='')
search = input().lower()
path = os.getcwd() + '\\' + search + '.txt'
# Request site
if os.path.isfile(path) == False:
if website == 'flickr':
res = requests.get('https://www.flickr.com/search/?text=%s' % search)
elif website == 'imgur':
res = requests.get('https://imgur.com/search?q=%s' % search)
elif website == 'instagram':
print('Instagram')
else:
print('It has to be either: flickr, imgur, or instagram.')
print('Please run the program again.')
res.raise_for_status()
# Write html to file
file = open('%s.txt' % search, 'wb')
for chunk in res.iter_content(100000):
file.write(chunk)
file.close()
# Create beautiful soup object
file = open('%s.txt' % search)
soup = bs4.BeautifulSoup(file, features="lxml")
if os.path.isdir(search) == False:
os.makedirs(search)
os.chdir(search)
if website == 'flickr':
elems = soup.select('div[class="view photo-list-photo-view requiredToShowOnServer awake"]')
for i in range(len(elems)):
# find image url
mo = url.search(elems[i].get('style'))
res = requests.get('http://'+ mo.group())
res.raise_for_status()
# Save image
file = open('%d.jpg' % i, 'wb')
for chunk in res.iter_content(100000):
file.write(chunk)
file.close()
elif website == 'imgur':
elems = soup.select('')
elif website == 'instagram':
elems = soup.select('')
| python |
# Python RegExp Syntax to Javascript RegExp Syntax Translator
# This code was pulled from the repository at:
# https://github.com/GULPF/rescrypt
# Original license was MIT but was converted to Apache v2 for
# ease of integrating with the Transcrypt project
#
# XXX: don't redefine those here
T = (1<<0)
TEMPLATE = T
I = (1<<1)
IGNORECASE = I
# Deprecated
L = (1<<2)
LOCALE = L
M = (1<<3)
MULTILINE = M
S = (1 << 4)
DOTALL = S
# Legacy - Unicode by default in Python 3
U = (1 << 5)
UNICODE = U
X = (1 << 6)
VERBOSE = X
DEBUG = (1<<7)
A = (1<<8)
ASCII = A
# This is a javascript specific flag
Y = (1 << 16)
STICKY = Y
G = (1 << 17)
GLOBAL = G
# This flag is used to indicate that re module should use
# the javascript regex engine directly and not attempt to
# translate the regex string into a python regex
J = (1<<19)
JSSTRICT = J
__pragma__ ('tconv')
def _read_escape(rgx, idx, append_to):
# XXX: This must handle the case in `if c == '\\'`?:
if rgx[idx] == '\\':
if idx + 1 >= len(rgx):
pass # XXX: exception
# These three are regex escape codes
# that doesn't exist in JS.
if rgx[idx + 1] == 'A':
append_to.append('^')
elif rgx[idx + 1] == 'a':
append_to.append('\\07')
elif rgx[idx + 1] == 'Z':
append_to.append('$')
# Otherwise just leave as is.
else:
append_to.append('\\' + rgx[idx + 1])
idx += 2
else:
append_to.append(rgx[idx])
idx += 1
return idx
def _read_until(rgx, start, char, append_to):
idx = start
while idx < len(rgx) and rgx[idx] != char:
idx = _read_escape(rgx, idx, append_to)
if idx >= len(rgx):
raise Exception("Unexpected end of input")
# append_to.append(rgx[idx])
# idx += 1
return idx
def _safe_char_at(s, idx):
if idx >= len(s):
return ''
return s[idx]
def translate(rgx, flags=0):
# import re
idx = 0
flagdict = {
'i': IGNORECASE,
'L': LOCALE,
'm': MULTILINE,
's': DOTALL,
'u': UNICODE,
'x': VERBOSE,
'a': ASCII
}
tokens = []
groupindex = {}
n_capturings = 1 # Capturing indices start at 1
while idx < len(rgx):
# The current and next character
c = rgx[idx]
n = _safe_char_at(rgx, idx + 1)
# TODO: use read_escape instead
if c == '\\':
# These three are regex escape codes
# that doesn't exist in JS.
if n == 'A':
value = '^'
elif n == 'a':
value = '\\07'
elif n == 'Z':
value = '$'
# Otherwise just leave as is.
else:
value = '\\' + n
tokens.append(value)
idx += 2
elif c == '$':
# '$' has slightly different semantics in Python and JS.
# Note that /\Z/ in Python is equal to /$/ in JS.
tokens.append('(?=\\n?$)')
idx += 1
elif c == '{':
# Rewrite `{,b}` to `{0,b}`.
# Leave others as is.
if n == ',':
itr = idx + 2
b_nbrs = []
while rgx[itr] in '0123456789' and itr < len(rgx):
b_nbrs.append(rgx[itr])
itr += 1
if rgx[itr] == '}':
tokens.extend(["{", "0", ","] + b_nbrs)
idx += 2 + len(b_nbrs)
else:
tokens.extend(["{", ","])
idx += 2
else:
tokens.append(rgx[idx])
idx += 1
elif c == '[':
# This requires no rewriting, but we need
# to consume everything until the next
# unescaped ']' to make sure that what's
# inside of the set isn't interpreted as something
# special (e.g /[(]/ is valid but /(/ isn't)
tokens.append(rgx[idx])
idx += 1
if rgx[idx] == '^':
tokens.append(rgx[idx])
idx += 1
# As a special rule, Python allows a literal ']' as the first
# member of a set. E.g /[]]/ is a set containing ']',
# and /[^]]/ is an inverted set containing ']'.
if n == ']':
tokens.append('\\' + n)
idx += 1
idx = _read_until(rgx, idx, ']', tokens)
tokens.append(']')
idx += 1
elif c == '(' and n == '?':
# Extension notation.
n2 = _safe_char_at(rgx, idx + 2)
n3 = _safe_char_at(rgx, idx + 3)
# Named group def.
# XXX: For simplicity this allows any chars in group name
# but Python only allows valid identfiers.
if n2 == 'P' and n3 == '<':
namearr = []
idx += 4 # skip (?P<
idx = _read_until(rgx, idx, '>', namearr)
idx += 1 # skip '>'
groupindex["".join(namearr)] = n_capturings
n_capturings += 1
tokens.append('(')
# Named group ref.
elif n2 == 'P' and n3 == '=':
namearr = []
idx += 4 # skip (?P=
idx = _read_until(rgx, idx + 4, ')', namearr)
idx += 1 # skip '>'
name = "".join(namearr)
if name not in groupindex:
raise error("Unknown named capturing group: " + name)
tokens.append('\\' + groupindex[name])
# Comment
elif n2 == '#':
idx = _read_until(rgx, idx, ')', [])
idx += 1 # Skip )
# Flag
elif n2 in flagdict:
idx += 2
# NOTE: No reason to care about escape
# sequences here since the only
# valid letters are 'iLmsux'.
while idx < len(rgx) and rgx[idx] != ')':
if rgx[idx] not in flagdict:
break
flags |= flagdict[rgx[idx]]
idx += 1
if idx == len(rgx):
raise error("Expected '(' but found " + rgx[idx])
if rgx[idx] != ')':
if rgx[idx] in ["-", ":"]:
raise error("The '(?imsx-imsx:...)' regex syntax " +
"is not supported by Transcrypt.")
raise error("Unknown regex flag '" + rgx[idx] + "'")
idx += 1
elif (n2 == '<' and n3 == '=') or (n2 == '<' and n3 == '!'):
raise Exception("Regex lookbehinds are not supported by Transcrypt")
elif n2 == ':':
tokens.append(rgx[idx])
idx += 1
# XXX: implement
# One of:
# - lookahead
# - neg lookahead
# - if then else
# Note that we are probably not able to implement
# lookbehinds.
else:
raise Exception("Unknown regex extension '" + n2 + "'")
else:
if c == '(':
n_capturings += 1
tokens.append(rgx[idx])
idx += 1
if flags & DOTALL:
for idx, token in enumerate(tokens):
if token == '.':
tokens[idx] = r'[\s\S]'
return "".join(tokens), flags, groupindex, n_capturings - 1
print("input", r"(?P<prefix>[a-zA-Z]+)://(?P<suffix>[^/]*)")
print("output", translate(r"(?P<prefix>[a-zA-Z]+)://(?P<suffix>[^/]*)"))
| python |
from deepmath.deephol import predictions
def _proof_state_from_search(predictor, node):
return predictor.ProofState(goal='goal')
| python |
from matplotlib import pyplot as plt
import numpy as np
from math import ceil
def comp_dist(sample: list):
y = list()
for i in range(len(sample)):
y.append(i)
y = np.array(y)
sample_ = np.array(sample)
plot = plt.plot(y, sample_, 'r.', markersize=1)
plt.ylabel('Complexity')
axis_x_max = int(ceil(len(sample) / 100.0)) * 100
plt.axis([0, axis_x_max, 0, 1])
plt.savefig("plot", dpi=250)
plt.show()
def triple(sample1, sample2, sample3, ws1, ws2, ws3):
y1 = list()
y2 = list()
y3 = list()
for i in range(len(sample1)):
y1.append(i)
y = np.array(y1)
y1 = np.array(y1)
for i in range(len(sample2)):
y2.append(i)
y = np.array(y2)
y2 = np.array(y2)
for i in range(len(sample3)):
y3.append(i)
y = np.array(y3)
y3 = np.array(y3)
sample_1 = np.array(sample1)
sample_2 = np.array(sample2)
sample_3 = np.array(sample3)
marker_size_ = 0.75
plot = plt.plot(y1, sample_1, 'g.', label=str('Window ' + str(ws1)), markersize=marker_size_)
plot = plt.plot(y2, sample_2, 'b.', label=str('Window ' + str(ws2)), markersize=marker_size_)
plot = plt.plot(y3, sample_3, 'r.', label=str('Window ' + str(ws3)), markersize=marker_size_)
plt.ylabel('Complexity')
ax_x1 = int(ceil(len(sample1) / 100.0)) * 100
ax_x2 = int(ceil(len(sample2) / 100.0)) * 100
ax_x3 = int(ceil(len(sample3) / 100.0)) * 100
axis_x_max = max(ax_x1, ax_x2, ax_x3)
plt.axis([0, axis_x_max, 0, 1])
plt.legend()
plt.savefig("tplot", dpi=250)
plt.show()
| python |
# Copied from the uvloop project. If you add a new unittest here,
# please consider contributing it to the uvloop project.
#
# Portions copyright (c) 2015-present MagicStack Inc. http://magic.io
import asyncio
import logging
import os
import threading
import time
import weakref
from unittest import mock
import pytest
import uvloop
def test_close(loop):
assert not loop.is_closed()
loop.close()
assert loop.is_closed()
# it should be possible to call close() more than once
loop.close()
loop.close()
# operation blocked when the loop is closed
f = asyncio.Future(loop=loop)
with pytest.raises(RuntimeError):
loop.run_forever()
with pytest.raises(RuntimeError):
loop.run_until_complete(f)
def test_handle_weakref(loop):
wd = weakref.WeakValueDictionary()
h = loop.call_soon(lambda: None)
wd['h'] = h # Would fail without __weakref__ slot.
def test_call_soon(loop):
calls = []
def cb(inc):
calls.append(inc)
loop.stop()
loop.call_soon(cb, 10)
h = loop.call_soon(cb, 100)
# self.assertIn('.cb', repr(h))
h.cancel()
# self.assertIn('cancelled', repr(h))
loop.call_soon(cb, 1)
loop.run_forever()
assert calls == [10, 1]
def test_call_soon_base_exc(loop):
def cb():
raise KeyboardInterrupt()
loop.call_soon(cb)
with pytest.raises(KeyboardInterrupt):
loop.run_forever()
assert not loop.is_closed()
@pytest.mark.parametrize('debug', [True, False])
@pytest.mark.parametrize(
'name, meth',
[('call_soon', lambda loop, *args: loop.call_soon(*args)),
('call_later', lambda loop, *args: loop.call_later(0.01, *args))])
def test_calls_debug_reporting(loop, debug, name, meth):
context = None
def handler(loop, ctx):
nonlocal context
context = ctx
loop.set_debug(debug)
loop.set_exception_handler(handler)
def cb():
1 / 0
meth(loop, cb)
assert context is None
loop.run_until_complete(asyncio.sleep(0.05, loop=loop))
assert type(context['exception']) is ZeroDivisionError
assert context['message'].startswith('Exception in callback')
if debug:
tb = context['source_traceback']
assert tb[-2].name == 'test_calls_debug_reporting'
else:
assert 'source_traceback' not in context
del context
def test_now_update(loop):
async def run():
st = loop.time()
time.sleep(0.05)
return loop.time() - st
delta = loop.run_until_complete(run())
assert delta > 0.049 and delta < 0.6
def test_call_later_1(loop):
calls = []
def cb(inc=10, stop=False):
calls.append(inc)
assert loop.is_running()
if stop:
loop.call_soon(loop.stop)
loop.call_later(0.05, cb)
# canceled right away
h = loop.call_later(0.05, cb, 100, True)
# assert '.cb' in repr(h)
h.cancel()
# assert 'cancelled' in repr(h)
loop.call_later(0.05, cb, 1, True)
loop.call_later(1000, cb, 1000) # shouldn't be called
started = time.monotonic()
loop.run_forever()
finished = time.monotonic()
assert calls == [10, 1]
assert not loop.is_running()
assert finished - started < 0.1
assert finished - started > 0.04
def test_call_later_2(loop):
# Test that loop.call_later triggers an update of
# libuv cached time.
async def main():
await asyncio.sleep(0.001, loop=loop)
time.sleep(0.01)
await asyncio.sleep(0.01, loop=loop)
started = time.monotonic()
loop.run_until_complete(main())
delta = time.monotonic() - started
assert delta > 0.019
def test_call_later_negative(loop):
calls = []
def cb(arg):
calls.append(arg)
loop.stop()
loop.call_later(-1, cb, 'a')
loop.run_forever()
assert calls == ['a']
@pytest.mark.skipif(os.environ.get('TRAVIS_OS_NAME') is not None,
reason='time is not monotonic on Travis')
def test_call_at(loop):
i = 0
def cb(inc):
nonlocal i
i += inc
loop.stop()
at = loop.time() + 0.05
loop.call_at(at, cb, 100).cancel()
loop.call_at(at, cb, 10)
started = time.monotonic()
loop.run_forever()
finished = time.monotonic()
assert i == 10
assert finished - started < 0.07
assert finished - started > 0.045
def test_check_thread(loop, other_loop):
def check_thread(loop, debug):
def cb():
pass
loop.set_debug(debug)
if debug:
msg = ("Non-thread-safe operation invoked on an "
"event loop other than the current one")
with pytest.raises(RuntimeError) as exc:
loop.call_soon(cb)
exc.match(msg)
with pytest.raises(RuntimeError) as exc:
loop.call_later(60, cb)
exc.match(msg)
with pytest.raises(RuntimeError) as exc:
loop.call_at(loop.time() + 60, cb)
exc.match(msg)
else:
loop.call_soon(cb)
loop.call_later(60, cb)
loop.call_at(loop.time() + 60, cb)
def check_in_thread(loop, event, debug, create_loop, fut):
# wait until the event loop is running
event.wait()
try:
if create_loop:
try:
asyncio.set_event_loop(other_loop)
check_thread(loop, debug)
finally:
asyncio.set_event_loop(None)
else:
check_thread(loop, debug)
except Exception as exc:
loop.call_soon_threadsafe(fut.set_exception, exc)
else:
loop.call_soon_threadsafe(fut.set_result, None)
def test_thread(loop, debug, create_loop=False):
event = threading.Event()
fut = asyncio.Future(loop=loop)
loop.call_soon(event.set)
args = (loop, event, debug, create_loop, fut)
thread = threading.Thread(target=check_in_thread, args=args)
thread.start()
loop.run_until_complete(fut)
thread.join()
# raise RuntimeError if the thread has no event loop
# test_thread(loop, True)
# check disabled if debug mode is disabled
# test_thread(loop, False)
# raise RuntimeError if the event loop of the thread is not the called
# event loop
# test_thread(loop, True, create_loop=True)
# check disabled if debug mode is disabled
# test_thread(loop, False, create_loop=True)
def test_run_once_in_executor_plain(loop):
called = []
def cb(arg):
called.append(arg)
async def runner():
await loop.run_in_executor(None, cb, 'a')
loop.run_until_complete(runner())
assert called == ['a']
def test_set_debug(loop):
loop.set_debug(True)
assert loop.get_debug()
loop.set_debug(False)
assert not loop.get_debug()
def test_run_until_complete_type_error(loop):
with pytest.raises(TypeError):
loop.run_until_complete('blah')
def test_run_until_complete_loop(loop, other_loop):
task = asyncio.Future(loop=loop)
with pytest.raises(ValueError):
other_loop.run_until_complete(task)
def test_run_until_complete_error(loop):
async def foo():
raise ValueError('aaa')
with pytest.raises(ValueError, message='aaa'):
loop.run_until_complete(foo())
@pytest.mark.skip(reason='tokio does not support this')
def test_debug_slow_callbacks(loop):
logger = logging.getLogger('asyncio')
loop.set_debug(True)
loop.slow_callback_duration = 0.2
loop.call_soon(lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
loop.run_until_complete(asyncio.sleep(0, loop=loop))
assert log.call_count == 1
# format message
msg = log.call_args[0][0] % log.call_args[0][1:]
assert 'Executing <Handle' in msg
assert 'test_debug_slow_callbacks' in msg
@pytest.mark.skip(reason='tokio does not support this')
def test_debug_slow_timer_callbacks(loop):
logger = logging.getLogger('asyncio')
loop.set_debug(True)
loop.slow_callback_duration = 0.2
loop.call_later(0.01, lambda: time.sleep(0.3))
with mock.patch.object(logger, 'warning') as log:
loop.run_until_complete(asyncio.sleep(0.02, loop=loop))
assert log.call_count == 1
# format message
# msg = log.call_args[0][0] % log.call_args[0][1:]
# self.assertIn('Executing <Handle', msg)
# self.assertIn('test_debug_slow_callbacks', msg)
@pytest.mark.skip(reason='tokio does not support this')
def test_default_exc_handler_callback(loop, mock_pattern):
loop._process_events = mock.Mock()
def zero_error(fut):
fut.set_result(True)
1 / 0
logger = logging.getLogger('asyncio')
# Test call_soon (events.Handle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=loop)
loop.call_soon(zero_error, fut)
fut.add_done_callback(lambda fut: loop.stop())
loop.run_forever()
log.assert_called_with(
mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY)
# Test call_later (events.TimerHandle)
with mock.patch.object(logger, 'error') as log:
fut = asyncio.Future(loop=loop)
loop.call_later(0.01, zero_error, fut)
fut.add_done_callback(lambda fut: loop.stop())
loop.run_forever()
log.assert_called_with(
mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY)
@pytest.mark.skip(reason='need tokio logging decision')
def test_set_exc_handler_custom(loop, mock_pattern, match):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
errors = []
def handler(loop, exc):
errors.append(exc)
loop.set_debug(True)
if hasattr(loop, 'get_exception_handler'):
# Available since Python 3.5.2
assert loop.get_exception_handler() is None
loop.set_exception_handler(handler)
if hasattr(loop, 'get_exception_handler'):
assert loop.get_exception_handler() is handler
run_loop()
assert len(errors) == 1
assert match(errors[-1]['message'], 'Exception in callback.*zero_error')
loop.set_exception_handler(None)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
mock_pattern('Exception in callback.*zero'), exc_info=mock.ANY)
assert len(errors) == 1
@pytest.mark.skip(reason='need tokio logging decision')
def test_set_exc_handler_broken(loop, mock_pattern):
logger = logging.getLogger('asyncio')
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
def handler(loop, context):
raise AttributeError('spam')
loop._process_events = mock.Mock()
loop.set_exception_handler(handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
mock_pattern('Unhandled error in exception handler'),
exc_info=mock.ANY)
def test_default_exc_handler_broken(loop, mock_pattern):
logger = logging.getLogger('asyncio')
_context = None
class Loop(uvloop.Loop):
_selector = mock.Mock()
_process_events = mock.Mock()
def default_exception_handler(self, context):
nonlocal _context
_context = context
# Simulates custom buggy "default_exception_handler"
raise ValueError('spam')
loop = Loop()
# self.addCleanup(loop.close)
asyncio.set_event_loop(loop)
def run_loop():
def zero_error():
loop.stop()
1 / 0
loop.call_soon(zero_error)
loop.run_forever()
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
'Exception in default exception handler',
exc_info=True)
def custom_handler(loop, context):
raise ValueError('ham')
_context = None
loop.set_exception_handler(custom_handler)
with mock.patch.object(logger, 'error') as log:
run_loop()
log.assert_called_with(
mock_pattern('Exception in default exception.*'
'while handling.*in custom'),
exc_info=True)
# Check that original context was passed to default
# exception handler.
assert 'context' in _context
assert (type(_context['context']['exception']) is
ZeroDivisionError)
@pytest.mark.skip(reason='need impl')
def test_set_task_factory_invalid(loop):
with pytest.raises(
TypeError, message='task factory must be a callable or None'):
loop.set_task_factory(1)
assert loop.get_task_factory() is None
@pytest.mark.skip(reason='need impl')
def test_set_task_factory(loop):
# loop._process_events = mock.Mock()
class MyTask(asyncio.Task):
pass
@asyncio.coroutine
def coro():
pass
def factory(loop, coro):
return MyTask(coro, loop=loop)
assert loop.get_task_factory() is None
loop.set_task_factory(factory)
assert loop.get_task_factory() is factory
task = loop.create_task(coro())
assert isinstance(task, MyTask)
loop.run_until_complete(task)
loop.set_task_factory(None)
assert loop.get_task_factory() is None
task = loop.create_task(coro())
assert isinstance(task, asyncio.Task)
assert not isinstance(task, MyTask)
loop.run_until_complete(task)
| python |
"""Numpy to Javascript (JSON) conversion
Assumes numpy matrices are nx8 where first 3 columns contain x, y, z
respectively. Checks for `data/*.npy` by default, below. Uses the filename,
stripped, for the data dictionary key.
Remember that classes 1, 2, 3 are colored red, green, blue respectively.
All other classes are colored grey.
Usage:
to_json.py
to_json.py <folder> <start> <end>
"""
import glob
import os
import json
import numpy as np
import sys
folder, start, end = '0005_pred', 0, 50
arguments = sys.argv
if len(arguments) == 4:
folder, (start, end) = arguments[1], map(int, arguments[2:])
def convert(format):
data = {}
for path in list(sorted(glob.iglob(format)))[start:end]:
key = os.path.basename(path).replace('.npy', '')
datum = np.load(path)
delta_w = (datum.shape[1] - 512) // 2
datum = datum[:, delta_w: datum.shape[1] - delta_w:, :]
datum = datum.reshape((-1, datum.shape[-1])).astype(float)
data[key] = {'vertices': [{'x': r[0], 'y': r[1], 'z': r[2], 'class': int(r[5])} for r in datum]}
with open('js/output.js', 'w') as f:
f.write('var data = %s' % json.dumps(data).replace('"', "'"))
print('wrote to js/output.js')
def main():
print('Read from', folder)
convert('data/%s/*.npy' % folder)
if __name__ == '__main__':
main()
| python |
from setuptools import setup
setup(
name = 'objectDetectionD3MWrapper',
version = '0.1.0',
description = 'Keras implementation of RetinaNet as a D3M primitive.',
author = 'Sanjeev Namjoshi',
author_email = '[email protected]',
packages = ['objectDetectionD3MWrapper'],
install_requires = ['numpy>=1.15.4,<=1.17.3',
'object_detection_retinanet @ git+https://github.com/NewKnowledge/object-detection-retinanet@beca7ff86faa2295408e46fe221a3c7437cfdc81#egg=object_detection_retinanet'],
entry_points = {
'd3m.primitives': [
'object_detection.retinanet = objectDetectionD3MWrapper:ObjectDetectionRNPrimitive'
],
},
)
| python |
import sys
import interpreter
from interpreter.main import Interpreter
# main
def main():
# check passed parameter length
if len(sys.argv) != 2:
return
code = ''
with open(sys.argv[1], "r") as file:
code = file.read()
i = Interpreter(code)
msg, code, _, _ = i.run()
print('\nReturned with code ' + str(code) + ' : ' + msg)
return
if __name__ == "__main__":
main() | python |
from mmdet.apis import init_detector, inference_detector, show_result
config_file = 'configs/faster_rcnn_r50_fpn_1x.py'
checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth'
# build the model from a config file and a checkpoint file
# model = init_detector(config_file, checkpoint_file, device='cuda:0')
model = init_detector(config_file, checkpoint_file, device='cuda:0')
# test a single image and show the results
img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once
result = inference_detector(model, img)
show_result(img, result, model.CLASSES)
# test a list of images and write the results to image files
imgs = ['test1.jpg', 'test2.jpg']
for i, result in enumerate(inference_detector(model, imgs)):
show_result(imgs[i], result, model.CLASSES, out_file='result_{}.jpg'.format(i)) | python |
import setuptools
setuptools.setup(
name = 'django-livereload-notifier',
keywords = 'django, development, server, runserver, livereload',
description = 'LiveReload with the Django development server',
long_description = open('README.md').read(),
author = 'n4bz0r',
author_email = '[email protected]',
version = '0.1',
license = 'MIT License',
url = 'https://github.com/n4bz0r/django-livereload-notifier',
include_package_data = True,
packages = setuptools.find_packages(),
classifiers = [
'Framework :: Django',
'Environment :: Web Environment',
'Programming Language :: Python :: 3',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires = [
'beautifulsoup4>=4.3.2',
'watchdog>=0.10.3',
],
) | python |
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from data_refinery_api.test.test_api_general import API_VERSION
from data_refinery_common.models import (
ComputationalResult,
Organism,
OrganismIndex,
Processor,
Sample,
SampleResultAssociation,
)
class ProcessorTestCases(APITestCase):
def setUp(self):
salmon_quant_env = {
"os_distribution": "Ubuntu 16.04.4 LTS",
"os_pkg": {"python3": "3.5.1-3", "python3-pip": "8.1.1-2ubuntu0.4"},
"cmd_line": {"salmon --version": "salmon 0.9.1"},
"python": {"Django": "2.0.6", "data-refinery-common": "0.5.0"},
}
self.salmon_quant_proc = Processor.objects.create(
name="Salmon Quant",
version="0.45",
docker_image="ccdl/salmon_img:v1.23",
environment=salmon_quant_env,
)
salmontools_env = {
"os_distribution": "Ubuntu 16.04.4 LTS",
"os_pkg": {
"python3": "3.5.1-3",
"python3-pip": "8.1.1-2ubuntu0.4",
"g++": "4:5.3.1-1ubuntu1",
"cmake": "3.5.1-1ubuntu3",
},
"cmd_line": {"salmontools --version": "Salmon Tools 0.1.0"},
"python": {"Django": "2.0.6", "data-refinery-common": "0.5.0"},
}
Processor.objects.create(
name="Salmontools",
version="1.83",
docker_image="ccdl/salmontools_img:v0.45",
environment=salmontools_env,
)
def tearDown(self):
ComputationalResult.objects.all().delete()
Organism.objects.all().delete()
OrganismIndex.objects.all().delete()
Processor.objects.all().delete()
Sample.objects.all().delete()
SampleResultAssociation.objects.all().delete()
def test_endpoint(self):
response = self.client.get(reverse("processors", kwargs={"version": API_VERSION}))
self.assertEqual(response.status_code, status.HTTP_200_OK)
processors = response.json()["results"]
self.assertEqual(processors[0]["name"], "Salmon Quant")
self.assertEqual(processors[0]["environment"]["os_pkg"]["python3"], "3.5.1-3")
self.assertEqual(processors[1]["name"], "Salmontools")
self.assertEqual(
processors[1]["environment"]["cmd_line"]["salmontools --version"], "Salmon Tools 0.1.0"
)
def test_processor_and_organism_in_sample(self):
sample = Sample.objects.create(accession_code="ACCESSION", title="fake sample")
homo_sapiens = Organism(name="HOMO_SAPIENS", taxonomy_id=9606, is_scientific_name=True)
homo_sapiens.save()
transcriptome_result = ComputationalResult.objects.create()
organism_index = OrganismIndex.objects.create(
organism=homo_sapiens, result=transcriptome_result, index_type="TRANSCRIPTOME_LONG"
)
result = ComputationalResult.objects.create(
processor=self.salmon_quant_proc, organism_index=organism_index
)
SampleResultAssociation.objects.create(sample=sample, result=result)
response = self.client.get(
reverse(
"samples_detail",
kwargs={"accession_code": sample.accession_code, "version": API_VERSION},
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
processor = response.json()["results"][0]["processor"]
self.assertEqual(processor["name"], self.salmon_quant_proc.name)
self.assertEqual(
processor["environment"]["os_pkg"]["python3"],
self.salmon_quant_proc.environment["os_pkg"]["python3"],
)
organism_index = response.json()["results"][0]["organism_index"]
self.assertEqual(organism_index["result_id"], transcriptome_result.id)
self.assertEqual(organism_index["index_type"], "TRANSCRIPTOME_LONG")
| python |
"""
Zeroing out gradients in PyTorch
================================
It is beneficial to zero out gradients when building a neural network.
This is because by default, gradients are accumulated in buffers (i.e,
not overwritten) whenever ``.backward()`` is called.
Introduction
------------
When training your neural network, models are able to increase their
accuracy through gradient descent. In short, gradient descent is the
process of minimizing our loss (or error) by tweaking the weights and
biases in our model.
``torch.Tensor`` is the central class of PyTorch. When you create a
tensor, if you set its attribute ``.requires_grad`` as ``True``, the
package tracks all operations on it. This happens on subsequent backward
passes. The gradient for this tensor will be accumulated into ``.grad``
attribute. The accumulation (or sum) of all the gradients is calculated
when .backward() is called on the loss tensor.
There are cases where it may be necessary to zero-out the gradients of a
tensor. For example: when you start your training loop, you should zero
out the gradients so that you can perform this tracking correctly.
In this recipe, we will learn how to zero out gradients using the
PyTorch library. We will demonstrate how to do this by training a neural
network on the ``CIFAR10`` dataset built into PyTorch.
Setup
-----
Since we will be training data in this recipe, if you are in a runable
notebook, it is best to switch the runtime to GPU or TPU.
Before we begin, we need to install ``torch`` and ``torchvision`` if
they aren’t already available.
::
pip install torchvision
"""
######################################################################
# Steps
# -----
#
# Steps 1 through 4 set up our data and neural network for training. The
# process of zeroing out the gradients happens in step 5. If you already
# have your data and neural network built, skip to 5.
#
# 1. Import all necessary libraries for loading our data
# 2. Load and normalize the dataset
# 3. Build the neural network
# 4. Define the loss function
# 5. Zero the gradients while training the network
#
# 1. Import necessary libraries for loading our data
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# For this recipe, we will just be using ``torch`` and ``torchvision`` to
# access the dataset.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
######################################################################
# 2. Load and normalize the dataset
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# PyTorch features various built-in datasets (see the Loading Data recipe
# for more information).
#
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
######################################################################
# 3. Build the neural network
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# We will use a convolutional neural network. To learn more see the
# Defining a Neural Network recipe.
#
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
######################################################################
# 4. Define a Loss function and optimizer
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Let’s use a Classification Cross-Entropy loss and SGD with momentum.
#
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
######################################################################
# 5. Zero the gradients while training the network
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# This is when things start to get interesting. We simply have to loop
# over our data iterator, and feed the inputs to the network and optimize.
#
# Notice that for each entity of data, we zero out the gradients. This is
# to ensure that we aren’t tracking any unnecessary information when we
# train our neural network.
#
for epoch in range(2): # loop over the dataset multiple times
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
######################################################################
# You can also use ``model.zero_grad()``. This is the same as using
# ``optimizer.zero_grad()`` as long as all your model parameters are in
# that optimizer. Use your best judgement to decide which one to use.
#
# Congratulations! You have successfully zeroed out gradients PyTorch.
#
# Learn More
# ----------
#
# Take a look at these other recipes to continue your learning:
#
# - `Loading data in PyTorch <https://pytorch.org/tutorials/recipes/recipes/loading_data_recipe.html>`__
# - `Saving and loading models across devices in PyTorch <https://pytorch.org/tutorials/recipes/recipes/save_load_across_devices.html>`__
| python |
#
# Copyright (c) 2016, deepsense.io
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from future.builtins import object, str
from neptune.generated.swagger_client.path_constants import REST_PATH, WS_PATH
class Address(object):
def __init__(self, host, port):
self.host = host
self.port = port
def __str__(self):
_port_part = ':' + str(self.port) if self.port else ''
return self.host + _port_part
def to_url(self):
return str(self)
def http_url_from_address(address, secure):
protocol = "https://" if secure else "http://"
return protocol + address.to_url()
def rest_url_from_address(address, secure):
return http_url_from_address(address, secure) + REST_PATH
def ws_url_from_address(address, secure):
protocol = "wss://" if secure else "ws://"
return protocol + address.to_url() + WS_PATH
| python |
import sys
sys.path.append('../pycaruna')
import json
import os
from datetime import date, datetime, timedelta
from pycaruna import Caruna, Resolution
def make_min_hour_datetime(date):
return datetime.combine(date, datetime.min.time())
def make_max_hour_datetime(date):
return datetime.combine(date, datetime.max.time()).replace(microsecond=0)
if __name__ == '__main__':
username = os.getenv('CARUNA_USERNAME')
password = os.getenv('CARUNA_PASSWORD')
if username is None or password is None:
raise Exception('CARUNA_USERNAME and CARUNA_PASSWORD must be defined')
client = Caruna(username, password)
client.login()
# Get customer details and metering points so we can get the required identifiers
customer = client.get_user_profile()
metering_points = client.get_metering_points(customer['username'])
# Fetch data from midnight 00:00 7 days ago to 23:59 today
start_time = make_min_hour_datetime(date.today() - timedelta(days=7)).astimezone().isoformat()
end_time = make_max_hour_datetime(date.today()).astimezone().isoformat()
metering_point = metering_points[0]['meteringPoint']['meteringPointNumber']
consumption = client.get_consumption(customer['username'],
metering_points[0]['meteringPoint']['meteringPointNumber'],
Resolution.DAYS, True,
start_time, end_time)
# Extract the relevant data, filter out days without values (usually the most recent datapoint)
filtered_consumption = [item for item in consumption if item['values']]
mapped_consumption = list(map(lambda item: {
'date': make_max_hour_datetime(
date.today().replace(year=item['year'], month=item['month'], day=item['day'])).isoformat(),
'kwh_total': item['values']['EL_ENERGY_CONSUMPTION#0']['value'],
'kwh_night': item['values']['EL_ENERGY_CONSUMPTION#2']['value'],
'kwh_day': item['values']['EL_ENERGY_CONSUMPTION#3']['value'],
}, filtered_consumption))
print(json.dumps(mapped_consumption))
| python |
import FWCore.ParameterSet.Config as cms
from RecoBTag.Skimming.btagDijet_SkimPaths_cff import *
from RecoBTag.Skimming.btagElecInJet_SkimPaths_cff import *
from RecoBTag.Skimming.btagMuonInJet_SkimPaths_cff import *
from RecoBTag.Skimming.btagGenBb_SkimPaths_cff import *
| python |
import math
def get_divisors(n):
divisors = 0
max = math.sqrt(n)
i = 1
while i <= max:
if n % i == 0:
divisors += 2
i += 1
return divisors
triangle = 1
counter = 2
testing = True
while testing:
if get_divisors(triangle) >= 500:
print(triangle)
testing = False
triangle += counter
counter += 1 | python |
#
# ECE 5725 final project
# RPi Robot Mover
# Fall 2021
# Authors: Xu Hai (xh357), Yaqun Niu (yn232)
#
import cv2
import colorList
import picamera
import io
import os
import time
import threading
import numpy as np
from piecamera import PieCamera
import pygame.mixer
# Capture the main color in front of the camera for one frame
def get_color(frame):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
maxsum = -100
color = None
color_dict = colorList.getColorList()
# Image process to get
for d in color_dict:
mask = cv2.inRange(hsv, color_dict[d][0], color_dict[d][1])
cv2.imwrite(d + '.jpg', mask)
binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
binary = cv2.dilate(binary, None, iterations=2)
cnts, h = cv2.findContours(binary.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]
sum = 0
for c in cnts:
sum += cv2.contourArea(c)
if sum > maxsum:
maxsum = sum
color = d
return color
# Get the hsv of the main color in front of the camera during the period
def get_hsv():
# Load color hsv from a pre-built color list
color_dict = colorList.getColorList()
camera = PieCamera()
key = -1
result_1 = "None"
i = 0
same_color = True
# Play the sound to inform the user
# that the robot starts to capture the color
pygame.mixer.init()
pygame.mixer.music.load(os.getcwd() + "/sound/test.wav")
pygame.mixer.music.play(-1)
time.sleep(1)
pygame.mixer.music.stop()
# Make sure the robot get the main color during the period
while key == -1:
ret, frame = camera.read()
if ret is True and same_color:
result = get_color(frame)
if result == result_1:
i += 1
if i >= 50:
same_color = False
print(result)
# Play the sound to inform the user
# that the robot has captured the color
pygame.mixer.music.load(os.getcwd() + "/sound/success.wav")
pygame.mixer.music.play(-1)
time.sleep(2)
pygame.mixer.music.stop()
break
else:
i = 0
result_1 = result
# Close the camera to release the resource
camera.close()
return result
| python |
#!/usr/bin/env python
#
# PyUSBtmc
# get_data.py
#
# Copyright (c) 2011 Mike Hadmack
# This code is distributed under the MIT license
import numpy
import sys
from matplotlib import pyplot
from pyusbtmc import RigolScope
""" Capture data from Rigol oscilloscope and write to a file
usage: python save_channel.py <filename>
if filename is not given STDOUT will be used"""
try:
filename = sys.argv[1]
except:
filename = ""
if filename == "--help":
print """Usage: 1%s [filename]\n Reads both traces from oscilloscope and writes as ASCII tabular data to filename. If no filename is given the program outputs to STDOUT. STDOUT can be directed into a file or piped into another application. For example:\n 1%s myfile\n 1%s > myfile\n 1%s | ./plot_data.py"""%sys.argv[0]
sys.exit(1)
print filename
scope = RigolScope("/dev/usbtmc0")
scope.grabData()
scope.writeWaveformToFile(filename)
scope.close()
| python |
"""Script to load model from file"""
import pickle
from sympy.utilities.lambdify import lambdify
from mihifepe.simulation import model
# pylint: disable = invalid-name
config_filename = "GEN_MODEL_CONFIG_FILENAME_PLACEHOLDER" # This string gets replaced by name of config file during simulation
with open(config_filename, "rb") as config_file:
model_filename = pickle.load(config_file)
noise_multiplier = pickle.load(config_file)
noise_type = pickle.load(config_file)
with open(model_filename, "rb") as model_file:
sym_vars = pickle.load(model_file)
sym_features, sym_noise, sym_model_fn = sym_vars
model_fn = lambdify([sym_features, sym_noise], sym_model_fn, "numpy")
model = model.Model(model_fn, noise_multiplier, noise_type)
| python |
from django.urls import path
from . import views
urlpatterns=[
path('', views.index,name='index'),
path('login/',views.login, name='login'),
path('register/', views.register, name='register'),
path('profile/', views.profile, name='profile'),
path('logout/', views.logout, name='logout'),
path('notifications/', views.notifications, name='notifications'),
path('homepage/<int:auth_id>/', views.homepage, name='homepage'),
]
| python |
import pandas as pd
import matplotlib.pyplot as plt
from datetime import date
lower_limit = 25
date = str(date.today())
df = pd.read_excel(date + ".xlsx")
lower_limit_list = []
for i in df['Sr No.']:
lower_limit_list.append(lower_limit)
plt.figure()
plt.subplot(3, 1, (1, 2))
plt.plot(df['Sr No.'], df['Ready To Buy Price'], color='r', label='Sold')
plt.plot(df['Sr No.'], df['Ready To Sell Price'], color='g', label='Bought')
plt.grid(b=True, which='both', axis='both')
plt.legend()
plt.ylabel('Price')
plt.title('Summary of ' + date)
plt.subplot(3, 1, 3)
plt.plot(df['Sr No.'], df['RSI'], color='blue', label='RSI')
plt.plot(df['Sr No.'], lower_limit_list, color='yellow')
plt.grid(b=True, which='both', axis='both')
plt.legend()
plt.xlabel('minute')
plt.ylabel('RSI')
plt.savefig('graph.png')
| python |
import cv2
import urllib.request as req
url = 'http://uta.pw/shodou/img/28/214.png'
req.urlretrieve(url, '../datasets/opencv/downimage.png')
img = cv2.imread('../datasets/opencv/downimage.png')
print(img)
import matplotlib.pyplot as plt
img = cv2.imread('../datasets/opencv/test.jpg')
plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
plt.show()
cv2.imwrite('../datasets/opencv/result/test.png',img)
# img_resize
img2 = cv2.resize(img, (600,300))
cv2.imwrite('../datasets/opencv/result/test_resize.png', img2)
plt.imshow(cv2.cvtColor(img2, cv2.COLOR_BGR2RGB))
plt.show()
# img_crop
img3 = img[150:450, 150:450]
cv2.imwrite('../datasets/opencv/result/test_crop.png', img3)
plt.imshow(cv2.cvtColor(img3, cv2.COLOR_BGR2RGB))
plt.show()
| python |
def menu():
print("")
print("")
print(" Welcome to Hotel Database Management Software")
print("")
print("")
print("1-Add new customer details")
print("2-Modify already existing customer details")
print("3-Search customer details")
print("4-View all customer details")
print("5-Delete customer details")
print("6-Exit the program")
print("")
user_input=int(input("Enter your choice(1-6): "))
if user_input==1:
add()
elif user_input==2:
modify()
elif user_input==3:
search()
elif user_input==4:
view()
elif user_input==5:
remove()
elif user_input==6:
exit()
def add():
print("")
Name1=input("Enter your first name: ")
print("")
Name2=input("Enter your last name: ")
print("")
Phone_Num=input("Enter your phone number(without +91): ")
print("")
print("These are the rooms that are currently available")
print("1-Normal (500/Day)")
print("2-Deluxe (1000/Day)")
print("3-Super Deluxe (1500/Day)")
print("4-Premium Deluxe (2000/Day)")
print("")
Room_Type=int(input("Which type you want(1-4): "))
print("")
if Room_Type==1:
x=500
Room_Type="Normal"
elif Room_Type==2:
x=1000
Room_Type='Deluxe'
elif Room_Type==3:
x=1500
Room_Type='Super Deluxe'
elif Room_Type==4:
x=2000
Room_Type='Premium'
Days=int(input("How many days you will stay: "))
Money=x*Days
Money=str(Money)
print("")
print("You have to pay ",(Money))
print("")
Payment=input("Mode of payment(Card/Cash/Online): ").capitalize()
if Payment == "Card":
print("Payment with card")
elif Payment == "Cash":
print("Payment with cash")
elif Payment == "Online":
print("Online payment")
print("")
File=open('Management.txt','r')
string=File.read()
string = string.replace("\'", "\"")
dictionary=json.loads(string)
File.close()
if len(dictionary.get('Room'))==0:
Room_num='501'
else:
listt=dictionary.get('Room')
tempp=len(listt)-1
temppp=int(listt[tempp])
Room_num=(1+temppp)
Room_num=str(Room_num)
print('You have been assigned Room Number',Room_num)
print(f"name : {Name1} {Name2}")
print(f"phone number : +91{Phone_Num}")
print(f"Room type : {Room_Type}")
print(f"Stay (day) : {Days}")
dictionary['First_Name'].append(Name1)
dictionary['Last_Name'].append(Name2)
dictionary['Phone_num'].append(Phone_Num)
dictionary['Room_Type'].append(Room_Type)
dictionary['Days'].append(Days)
dictionary['Price'].append(Money)
dictionary['Room'].append(Room_num)
File=open("Management.txt",'w',encoding="utf-8")
File.write(str(dictionary))
File.close()
print("")
print("Your data has been successfully added to our database.")
exit_menu()
import os
import json
filecheck = os.path.isfile('Management.txt')
if filecheck == False :
File = open("Management.txt", 'a', encoding="utf-8")
temp1 = {'First_Name': [], 'Last_Name': [], 'Phone_num': [], 'Room_Type': [], 'Days': [], 'Price': [], 'Room':[]}
File.write(str(temp1))
File.close()
def modify():
File=open('Management.txt','r')
string=File.read()
string = string.replace("\'", "\"")
dictionary=json.loads(string)
File.close()
dict_num=dictionary.get("Room")
dict_len=len(dict_num)
if dict_len==0:
print("")
print("There is no data in our database")
print("")
menu()
else:
print("")
Room=(input("Enter your Room Number: "))
listt=dictionary['Room']
index=int(listt.index(Room))
print("")
print("1-Change your first name")
print("2-Change your last name")
print("3-Change your phone number")
print("")
choice=(input("Enter your choice: "))
print("")
File=open("Management.txt",'w',encoding="utf-8")
if choice == str(1):
user_input=input('Enter New First Name: ')
listt1=dictionary['First_Name']
listt1[index]=user_input
dictionary['First_Name']=None
dictionary['First_Name']=listt1
File.write(str(dictionary))
File.close()
elif choice == str(2):
user_input = input('Enter New Last Name: ')
listt1 = dictionary['Last_Name']
listt1[index] = user_input
dictionary['Last_Name'] = None
dictionary['Last_Name'] = listt1
File.write(str(dictionary))
File.close()
elif choice == str(3):
user_input = input('Enter New Phone Number: ')
listt1 = dictionary['Phone_num']
listt1[index] = user_input
dictionary['Phone_num'] = None
dictionary['Phone_num'] = listt1
File.write(str(dictionary))
File.close()
print("")
print("Your data has been successfully updated")
exit_menu()
def search():
File=open('Management.txt','r')
string=File.read()
string = string.replace("\'", "\"")
dictionary=json.loads(string)
File.close()
dict_num=dictionary.get("Room")
dict_len=len(dict_num)
if dict_len==0:
print("")
print("There is no data in our database")
print("")
menu()
else:
print("")
Room = (input("Enter your Room Number: "))
print("")
listt = dictionary['Room']
index = int(listt.index(Room))
listt_fname=dictionary.get('First_Name')
listt_lname=dictionary.get('Last_Name')
listt_phone=dictionary.get('Phone_num')
listt_type=dictionary.get('Room_Type')
listt_days=dictionary.get('Days')
listt_price=dictionary.get('Price')
listt_num=dictionary.get('Room')
print("")
print("First Name:",listt_fname[index])
print("Last Name:",listt_lname[index])
print("Phone number:",listt_phone[index])
print("Room Type:",listt_type[index])
print('Days staying:',listt_days[index])
print('Money paid:',listt_price[index])
print('Room Number:',listt_num[index])
exit_menu()
def remove():
File=open('Management.txt','r')
string=File.read()
string = string.replace("\'", "\"")
dictionary=json.loads(string)
File.close()
dict_num=dictionary.get("Room")
dict_len=len(dict_num)
if dict_len==0:
print("")
print("There is no data in our database")
print("")
menu()
else:
print("")
Room = (input("Enter your Room Number: "))
print("")
listt = dictionary['Room']
index = int(listt.index(Room))
listt_fname = dictionary.get('First_Name')
listt_lname = dictionary.get('Last_Name')
listt_phone = dictionary.get('Phone_num')
listt_type = dictionary.get('Room_Type')
listt_days = dictionary.get('Days')
listt_price = dictionary.get('Price')
listt_num = dictionary.get('Room')
del listt_fname[index]
del listt_lname[index]
del listt_phone[index]
del listt_type[index]
del listt_days[index]
del listt_price[index]
del listt_num[index]
dictionary['First_Name'] = None
dictionary['First_Name'] = listt_fname
dictionary['Last_Name']= None
dictionary['Last_Name']= listt_lname
dictionary['Phone_num']= None
dictionary['Phone_num']=listt_phone
dictionary['Room_Type']=None
dictionary['Room_Type']=listt_type
dictionary['Days']=None
dictionary['Days']=listt_days
dictionary['Price']=None
dictionary['Price']=listt_price
dictionary['Room']=None
dictionary['Room']=listt_num
file1=open('Management.txt','w',encoding="utf-8")
file1.write(str(dictionary))
file1.close()
print("Details has been removed successfully")
exit_menu()
def view():
File=open('Management.txt','r')
string=File.read()
string = string.replace("\'", "\"")
dictionary=json.loads(string)
File.close()
dict_num=dictionary.get("Room")
dict_len=len(dict_num)
if dict_len==0:
print("")
print("There is no data in our database")
print("")
menu()
else:
listt = dictionary['Room']
a = len(listt)
index=0
while index!=a:
listt_fname = dictionary.get('First_Name')
listt_lname = dictionary.get('Last_Name')
listt_phone = dictionary.get('Phone_num')
listt_type = dictionary.get('Room_Type')
listt_days = dictionary.get('Days')
listt_price = dictionary.get('Price')
listt_num = dictionary.get('Room')
print("")
print("First Name:", listt_fname[index])
print("Last Name:", listt_lname[index])
print("Phone number:", listt_phone[index])
print("Room Type:", listt_type[index])
print('Days staying:', listt_days[index])
print('Money paid:', listt_price[index])
print('Room Number:', listt_num[index])
print("")
index=index+1
exit_menu()
def exit():
print("")
print(' Thanks for visiting')
print(" Goodbye")
def exit_menu():
print("")
print("Do you want to exit the program or return to main menu")
print("1-Main Menu")
print("2-Exit")
print("")
user_input=int(input("Enter your choice: "))
if user_input==2:
exit()
elif user_input==1:
menu()
try:
menu()
except KeyboardInterrupt as exit:
print("\nexiting...!")
# menu() | python |
#!/usr/bin/env python
# coding: utf-8
# pipenv install grpcio==1.42.0 flask gunicorn keras-image-helper
# USE:
# (base) ➜ ~ curl -X POST -d "{\"url\":\"http://bit.ly/mlbookcamp-pants\"}" -H 'Content-Type: application/json' localhost:9696/predict
# {
# "dress": -1.8682903051376343,
# "hat": -4.761245250701904,
# "longsleeve": -2.316983461380005,
# "outwear": -1.0625708103179932,
# "pants": 9.887161254882812,
# "shirt": -2.8124334812164307,
# "shoes": -3.6662826538085938,
# "shorts": 3.200361728668213,
# "skirt": -2.6023378372192383,
# "t-shirt": -4.835046291351318
# }
# Call server:
# curl -X POST -d "{\"url\":\"http://bit.ly/mlbookcamp-pants\"}" -H 'Content-Type: application/json' localhost:9696/predict
#create grpc client, load predict image and return prediction
import grpc
import os
from tensorflow_serving.apis import predict_pb2
from tensorflow_serving.apis import prediction_service_pb2_grpc
from keras_image_helper import create_preprocessor
from proto import np_to_protobuf
model_classes = ['dress', 'hat', 'longsleeve', 'outwear', 'pants', 'shirt', 'shoes', 'shorts', 'skirt', 't-shirt']
tf_host = os.getenv("TF_SERVING_HOST", "localhost:8500")
print("TF host on " + str(tf_host))
channel = grpc.insecure_channel(tf_host)
stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)
preprocessor = create_preprocessor('xception', target_size=(299,299))
def prepare_request(inputX):
pb_request = predict_pb2.PredictRequest()
pb_request.model_spec.name="clothing-model"
pb_request.model_spec.signature_name = "serving_default"
pb_request.inputs['input_8'].CopyFrom(np_to_protobuf(inputX))
return pb_request
def prepare_response(pb_response):
preds = pb_response.outputs['dense_7'].float_val
return dict(zip(model_classes, preds))
def predict(url):
# url = 'http://bit.ly/mlbookcamp-pants'
X = preprocessor.from_url(url)
request = prepare_request(X)
pb_response = stub.Predict(request, timeout=20.0)
return prepare_response(pb_response)
from flask import Flask
from flask import request
from flask import jsonify
app = Flask('script')
@app.route('/predict', methods=['POST'])
def predict_endpoint():
print("Request: "+str(request))
data = request.get_json()
print("Request json: "+str(data))
url = data['url']
result = predict(url)
json_result = jsonify(result)
print("Response data: "+str(result))
print("Response: "+str(json_result))
return json_result
if __name__=='__main__':
# result = predict('http://bit.ly/mlbookcamp-pants')
# print(result)
app.run(debug=True, host='0.0.0.0', port=9696) | python |
#!/usr/bin/python3
"""Manage the image disk."""
import os
import argparse
from azure.mgmt.compute import ComputeManagementClient
from azure.common.credentials import ServicePrincipalCredentials
def connect():
"""Set up Azure Login Credentials from Environmental Variables."""
credentials = ServicePrincipalCredentials(
client_id=os.environ.get('ARM_CLIENT_ID'),
secret=os.environ.get('ARM_CLIENT_SECRET'),
tenant=os.environ.get('ARM_TENANT_ID')
)
compute_client = ComputeManagementClient(credentials, os.environ.get('ARM_SUBSCRIPTION_ID'))
imageName = 'nf-' + os.environ.get('IMAGE_TYPE') + '-' + os.environ.get('IMAGE_VERSION')
return compute_client, imageName
def image_create():
"""Try to create an image from a blob storage disk."""
imageId = os.environ.get('IMAGE_ID')
compute_client, imageName = connect()
async_image_creation = compute_client.images.create_or_update(
os.environ.get('GROUP_NAME'),
imageName,
{
'location': os.environ.get('DISK_LOC'),
'hyper_vgeneration': 'v1',
'storage_profile': {
'os_disk': {
'os_type': 'Linux',
'os_state': "Generalized",
'blob_uri': "https://clouddevimages.blob.core.windows.net/system/Microsoft.Compute/Images/builds/%s-osDisk.%s.vhd" % (imageName, imageId),
'caching': "ReadWrite"
}
}
}
)
async_image_creation.wait()
print(async_image_creation.result())
def image_delete():
"""Try to delete create image from blob storage disk."""
compute_client, imageName = connect()
async_image_deletion = compute_client.images.delete(
os.environ.get('GROUP_NAME'),
imageName,
custom_headers=None,
raw=False,
polling=True
)
async_image_deletion.wait()
print(async_image_deletion.result())
def image_get():
"""Try to show details of the created image from blob storage disk."""
compute_client, imageName = connect()
async_image_get = compute_client.images.get(
os.environ.get('GROUP_NAME'),
imageName,
custom_headers=None,
raw=False,
polling=True
)
print(async_image_get)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='New Image Operations')
parser.add_argument("--action", choices=['create', 'delete', 'get'],
help="Action you want to do on the new image create, delete",
required=True)
args = parser.parse_args()
if args.action == 'create':
image_create()
if args.action == 'delete':
image_delete()
if args.action == 'get':
image_get()
| python |
from django.urls import path
from blog.views import *
from blog.feeds import LatestEntriesFeed
app_name = 'blog'
urlpatterns = [
path('' , blog_view , name="index"),
path('<int:pid>' , blog_single , name="single"),
path('category/<str:cat_name>' , blog_view , name="category"),
path('tag/<str:tag_name>' , blog_view , name="tag"),
path('author/<str:author_username>' , blog_view , name='author'),
path('search/',blog_search , name='search'),
path('rss/feed/', LatestEntriesFeed()),
path('test' , test , name='test')
] | python |
from django.conf.urls import patterns, url
from django.contrib import admin
from django.views.generic import TemplateView
admin.autodiscover()
urlpatterns = patterns('brainstorming.views',
url(r'^$', 'index', name='home'),
url(r'^(?P<brainstorming_id>\w{12})/notification$', 'notification', name='notification'),
url(r'^(?P<brainstorming_id>\w{12})/edit$', 'edit', name='edit'),
url(r'^(?P<brainstorming_id>\w{12})/export$', 'export', name='export'),
url(r'^(?P<brainstorming_id>\w{12})/?', 'brainstorming', name='brainstorming'),
url(r'^.*$', TemplateView.as_view(template_name="index.html")),
) | python |
import torch
from torch import nn
from .mobilenet_v2 import MobileNetV2
class Block(nn.Module):
def __init__(self, num_residual_layers, in_channels, out_channels,
kernel_size=3, stride=2, padding=1, remove_last_relu=False):
super(Block, self).__init__()
if remove_last_relu and num_residual_layers == 0:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers = []
for i in range(num_residual_layers):
if remove_last_relu and i + 1 == num_residual_layers:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.PReLU(),
nn.BatchNorm2d(out_channels),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels)
)
else:
layer = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU(),
nn.Conv2d(out_channels, out_channels, kernel_size=kernel_size, padding=padding),
nn.BatchNorm2d(out_channels),
nn.PReLU()
)
layers.append(layer)
self.layers = nn.ModuleList(layers)
def forward(self, x):
x = self.conv(x)
for layer in self.layers:
residual = layer(x)
x = x + residual
return x
class AngularLinear(nn.Module):
def __init__(self, in_channels, out_channels):
super(AngularLinear, self).__init__()
self.fc = nn.Linear(in_channels, out_channels, bias=False)
def forward(self, x):
logits = self.fc(x)
weight_norm = (self.fc.weight ** 2).sum(dim=1, keepdim=True).sqrt()
logits = logits / weight_norm.t()
return logits
class SpereFaceNet(nn.Module):
def __init__(self, input_size, dim: int, num_residual_layers_per_block, out_channels_per_block):
super(SpereFaceNet, self).__init__()
blocks = []
in_channels = 3
for i, (num, out_channels) in enumerate(zip(num_residual_layers_per_block, out_channels_per_block)):
remove_last_relu = (i + 1 == len(num_residual_layers_per_block))
block = Block(num, in_channels, out_channels, remove_last_relu=remove_last_relu)
in_channels = out_channels
blocks.append(block)
self.blocks = nn.ModuleList(blocks)
if isinstance(input_size, int):
input_size = (input_size, input_size)
assert len(input_size) == 2
assert input_size[0] % 16 == 0
assert input_size[1] % 16 == 0
feature_map_size = (int(input_size[0]/16), int(input_size[1]/16))
self.fc = nn.Linear(feature_map_size[0] * feature_map_size[1] * out_channels_per_block[-1], dim)
def forward(self, x):
for block in self.blocks:
x = block(x)
x = x.view(x.size(0), -1)
features = self.fc(x)
return features
class SphereFace(nn.Module):
def __init__(self, base_net, dim: int, num_classes: int=None):
super(SphereFace, self).__init__()
self.base_net = base_net
if num_classes is not None:
self.fc = AngularLinear(dim, num_classes)
def forward(self, x):
x = self.base_net(x)
if self.training:
# normalize weight per class
logits = self.fc(x)
return x, logits
else:
return x
def save(self, model_path: str):
torch.save(self.state_dict(), model_path)
def load(self, model):
state_dict = torch.load(model, map_location=lambda storage, loc: storage)
if not hasattr(self, 'fc'):
state_dict = {k: v for k, v in state_dict.items() if k not in set(["fc.fc.weight"])}
self.load_state_dict(state_dict)
def mobilenet_sphereface(dim=512, input_size=160, num_classes: int=None):
base_net = MobileNetV2(n_class=dim, input_size=input_size, width_mult=1.,
use_batch_norm=True, onnx_compatible=True)
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface4(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 0, 0, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface10(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [0, 1, 2, 0], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface20(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 2, 4, 1], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface36(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [1, 4, 8, 2], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
def sphereface64(dim=512, input_size=(112, 96), num_classes: int=None):
base_net = SpereFaceNet(input_size, dim, [3, 8, 16, 3], [64, 128, 256, 512])
net = SphereFace(base_net, dim, num_classes)
return net
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains tpDcc-tools-scripteditor client implementation
"""
from __future__ import print_function, division, absolute_import
from tpDcc.core import client
class ScriptEditorClient(client.DccClient, object):
PORT = 43451
def get_dcc_completion_directory(self):
"""
Returns directory where DCC API completion stubs files are located
:return: str
"""
cmd = {
'cmd': 'get_dcc_completion_directory'
}
reply_dict = self.send(cmd)
if not self.is_valid_reply(reply_dict):
return False
return reply_dict['result']
def get_auto_import(self):
cmd = {
'cmd': 'get_auto_import'
}
reply_dict = self.send(cmd)
if not self.is_valid_reply(reply_dict):
return None
return reply_dict['result']
def wrap_dropped_text(self, namespace, text, alt_modifier=False):
cmd = {
'cmd': 'wrap_dropped_text',
'namespace': namespace,
'text': text,
'alt_modifier': alt_modifier,
}
reply_dict = self.send(cmd)
if not self.is_valid_reply(reply_dict):
return None
return reply_dict['result']
def completer(self, namespace, line):
cmd = {
'cmd': 'completer',
'namespace': namespace,
'line': line
}
reply_dict = self.send(cmd)
if not self.is_valid_reply(reply_dict):
return None, None
return reply_dict['result']
| python |
import re, sys
import base64
import json
def start_item(line):
regex = r"<item><type>(([A-Fa-f0-9]{2}){4})</type><code>(([A-Fa-f0-9]{2}){4})</code><length>(\d*)</length>"
matches = re.findall(regex, line)
typ = matches[0][0].decode('hex')
code = matches[0][2].decode('hex')
length = int(matches[0][4])
return (typ, code, length)
def start_data(line):
try:
assert line == '<data encoding="base64">\n'
except AssertionError:
if line.startswith("<data"):
return 0
return -1
return 0
def read_data(line, length):
b64size = 4*((length+2)/3);
try:
data = base64.b64decode(line[:b64size])
except TypeError:
data = ""
pass
return data
def guessImageMime(magic):
if magic.startswith('\xff\xd8'):
return 'image/jpeg'
elif magic.startswith('\x89PNG\r\n\x1a\r'):
return 'image/png'
else:
return "image/jpg"
if __name__ == "__main__":
metadata = {}
fi = sys.stdin
while True:
line = sys.stdin.readline()
if not line: #EOF
break
sys.stdout.flush()
if not line.startswith("<item>"):
continue
typ, code, length = start_item(line)
data = ""
if (length > 0):
r = start_data(sys.stdin.readline())
if (r == -1):
continue
data = read_data(sys.stdin.readline(), length)
# Everything read
if (typ == "core"):
if (code == "asal"):
metadata['Album Name'] = data
elif (code == "asar"):
metadata['Artist'] = data
#elif (code == "ascm"):
# metadata['Comment'] = data
#elif (code == "asgn"):
# metadata['Genre'] = data
elif (code == "minm"):
metadata['Title'] = data
#elif (code == "ascp"):
# metadata['Composer'] = data
#elif (code == "asdt"):
# metadata['File Kind'] = data
#elif (code == "assn"):
# metadata['Sort as'] = data
#elif (code == "clip"):
# metadata['IP'] = data
if (typ == "ssnc" and code == "snam"):
metadata['snam'] = data
if (typ == "ssnc" and code == "prgr"):
metadata['prgr'] = data
if (typ == "ssnc" and code == "pfls"):
metadata = {}
print json.dumps({})
sys.stdout.flush()
if (typ == "ssnc" and code == "pend"):
metadata = {}
print json.dumps({})
sys.stdout.flush()
if (typ == "ssnc" and code == "prsm"):
metadata['pause'] = False
if (typ == "ssnc" and code == "pbeg"):
metadata['pause'] = False
if (typ == "ssnc" and code == "PICT"):
if (len(data) == 0):
print json.dumps({"image": ""})
else:
mime = guessImageMime(data)
print json.dumps({"image": "data:" + mime + ";base64," + base64.b64encode(data)})
sys.stdout.flush()
if (typ == "ssnc" and code == "mden"):
print json.dumps(metadata)
sys.stdout.flush()
metadata = {}
| python |
# The Hirst Paining Project
# Create a painting with 10 by 10 rows of spots
# Each dot should be 20 in size and 50 spacing between them
from turtle import Turtle, Screen
import random
def main():
# Color palette
color_list = [
(203, 164, 109),
(154, 75, 48),
(223, 201, 135),
(53, 94, 125),
(173, 153, 39),
(137, 31, 20),
(133, 163, 185),
(199, 92, 72),
(46, 123, 87),
(72, 44, 36),
(13, 98, 72),
(145, 179, 147),
(93, 73, 75),
(233, 176, 165),
(161, 143, 159),
(54, 46, 51),
(184, 205, 172),
(35, 61, 75),
(21, 85, 90),
(153, 17, 19),
(84, 147, 130),
(39, 66, 90),
(184, 89, 93),
(11, 73, 67),
(105, 127, 155),
(218, 177, 182)
]
# Define turtle and screen
turtle = Turtle()
screen = Screen()
# Turtle speed
turtle.speed(0)
# Hide turtle
turtle.hideturtle()
# Setup screen mode to 255
screen.colormode(255)
# Make the turtle start from left bottom corner
turtle.penup()
turtle.sety(-300)
for j in range(10):
turtle.penup()
turtle.sety(turtle.ycor() + 50)
turtle.setx(-250)
for i in range(10):
turtle.color(random.choice(color_list))
turtle.dot(20)
turtle.penup()
turtle.forward(50)
turtle.pendown()
screen.exitonclick()
if __name__ == "__main__":
main()
| python |
import os
import db
from datetime import datetime
import logging
from config import Environment
from fuzzywuzzy import process, fuzz
import nltk
import multiprocessing
ev = Environment()
logger = logging.getLogger(ev.app_name)
# nltk punkt sentence trainer.
nltk.download('punkt')
detector = nltk.data.load('tokenizers/punkt/english.pickle')
def create_diff(data_dict):
current_report_file = data_dict['current_file']
last_report_file = data_dict['old_file']
record_id = data_dict['id']
with open(os.path.join(ev.output_cleaned_files, current_report_file)) as current_report:
current_report_list = current_report.read().splitlines()
with open(os.path.join(ev.output_cleaned_files, last_report_file)) as current_report:
last_report_list = current_report.read().splitlines()
# remove exact lines from each other
current_report_dedup_list = [line for line in current_report_list if line not in last_report_list]
last_report_dedup_list = [line for line in last_report_list if line not in current_report_list]
# list of sentences in each file
current_report_sentences = list(detector.tokenize(' '.join(current_report_dedup_list).strip()))
last_report_sentences = list(detector.tokenize(' '.join(last_report_dedup_list).strip()))
# for each new sentence in the report look to see if we have a fuzzy match of 85% of better against any
# sentence in the older report. If not consider it a new sentence.
new_sentences = list()
for sentence in current_report_sentences:
match = process.extractOne(sentence, last_report_sentences, score_cutoff=85, scorer=fuzz.QRatio)
if match is None:
new_sentences.append(sentence)
if new_sentences:
new_sentence = '\n'.join(new_sentences)
# Google Natural Language will not accept an input greater than 60K characters
if len(new_sentence) > 60000:
new_sentence = new_sentence[:59999]
conn = db.connect_to_db()
cursor = conn.cursor()
sql = 'UPDATE marko_finance SET difference_from_last_report=? WHERE id=?'
cursor.execute(sql, (new_sentence, record_id))
conn.commit()
conn.close()
logger.info(f'Difference logged between {current_report_file} and {last_report_file}')
return
def get_differences():
logger.info(f'Started processing differences.')
conn = db.connect_to_db()
sql = '''SELECT
id,
cik,
file_name,
date_accepted,
difference_from_last_report,
prc_change2
FROM
marko_finance
ORDER BY
cik,
date_accepted'''
cursor = conn.cursor()
cursor.execute(sql)
results = cursor.fetchall()
old_cik = None
old_date = None
old_filename = None
find_differences_list = list()
for record in results:
(record_id, cik, filename, date_accepted, difference, prc_change) = record
converted_date = datetime.strptime(date_accepted, '%Y-%m-%d %H:%M:%S')
if prc_change and difference is None and cik == old_cik:
week_difference = (converted_date - old_date).days / 7
if 9 <= week_difference <= 17:
find_differences_list.append({
'id': record_id,
'cik': cik,
'current_file': filename,
'old_file': old_filename
})
old_cik = cik
old_date = converted_date
old_filename = filename
conn.close()
with multiprocessing.Pool(processes=ev.number_of_cores) as pool:
pool.map(create_diff, find_differences_list)
pool.close()
pool.join()
logger.info(f'Finished processing differences.')
| python |
"""
Manual script for merging csvs into one large CSV per state with plan info.
FIXME: Incorporate this into a script with arguments.
"""
import gc
import logging
import pandas as pd
logging.basicConfig(level=logging.INFO)
HEALTHCARE_GOV_PATH = '/home/jovyan/work/data/healthcare_gov'
state = 'FL'
# Hard coded due to lack of Company info in Machine Readable PUF.
# TODO: Automate this dictionary creation.
issuer_dict = {
'16842': 'BCBS',
'21663': 'Celtic/Ambetter',
'30252': '30252',
'36194': '36194',
'43274': '43274',
'48129': '48129',
'54172': 'Molina',
'56503': '56503',
'93299': '93299',
'98869': '98869',
}
csvs = [HEALTHCARE_GOV_PATH + '/{}/{}.csv'.format(state, issuer) for issuer in issuer_dict.keys()]
logging.info('CSVs being read in: {}'.format(csvs))
dfs = [pd.read_csv(csv) for csv in csvs]
for issuer_id, df in zip(issuer_dict.keys(), dfs):
df['IssuerId'] = int(issuer_id)
df['CompanyName'] = issuer_dict[issuer_id]
logging.info('{} provider dataframes loaded in'.format(len(dfs)))
plans = pd.read_csv(HEALTHCARE_GOV_PATH + '/Plan_Attributes_PUF.csv')
plans = plans[plans.StateCode == state]
# Reduce the number of columns in the plans data.
plans = plans[[
'BusinessYear',
'StateCode',
'IssuerId',
'SourceName',
'ImportDate',
'MarketCoverage',
'DentalOnlyPlan',
'TIN',
'StandardComponentId',
'PlanMarketingName',
'HIOSProductId',
'HPID',
'NetworkId',
'ServiceAreaId',
'FormularyId',
'IsNewPlan',
'PlanType',
'MetalLevel',
'DesignType',
'UniquePlanDesign',
'QHPNonQHPTypeId',
'PlanEffectiveDate',
'PlanExpirationDate',
'NationalNetwork',
'FormularyURL',
'PlanId',
'PlanVariantMarketingName',
'CSRVariationType'
]]
# Reduce to 1 line per Standard Component Id (a.k.a plan_id in provider file).
plans.drop_duplicates(subset=['StandardComponentId'], inplace=True)
plans = plans[plans.DentalOnlyPlan == 'No']
logging.info('Number of rows in plans df: {}'.format(plans.shape[0]))
in_state_plan_ids = set(plans.StandardComponentId)
all_the_plans = pd.concat(dfs)
logging.info('Lines in concatenated provider dataframes: {}'.format(all_the_plans.shape[0]))
all_the_plans = all_the_plans[all_the_plans.Plan_Id.isin(in_state_plan_ids)]
logging.info('Lines in concatenated provider dataframes (in-state): {}'.format(
all_the_plans.shape[0]))
# Reduce memory consumption.
del dfs
gc.collect()
# Join Plan and Provider dataframes.
logging.info('Joining plan and provider dataframes...')
merged = pd.merge(
all_the_plans, plans, how='left', left_on='Plan_Id', right_on='StandardComponentId')
logging.info('Joining complete!')
logging.info('Number of lines in the final merged dataframe: {}'.format(merged.shape[0]))
del all_the_plans
gc.collect()
target_path = 'HEALTHCARE_GOV_PATH/all_of_{}.csv'.format(state)
merged.to_csv(target_path, index=False)
logging.info('{} lines of data for {} written to csv'.format(merged.shape[0], state))
| python |
import cairo
import vector
import rectangle
from .widget import Widget
class CheckBox(Widget):
_on_image = None
_off_image = None
_clicked_image = None
_disabled_image = None
_clicked = False
_moused = False
clickable = True
mousable = True
text = None
toggled_responder = None
is_toggled = None
is_disabled = None
def __init__(self, resource_manager, text, toggled):
Widget.__init__(self, resource_manager)
self._on_image = resource_manager.load_image("res/bzcoptionbuttonon.png")
self.image_resolution = vector.Resolution(self._on_image.get_width(), self._on_image.get_height())
self._off_image = resource_manager.load_image("res/bzcoptionbuttonoff.png")
self._clicked_image = resource_manager.load_image("res/bzcoptionbuttonclk.png")
self._disabled_image = resource_manager.load_image("res/bzcoptionbuttondis.png")
self.text = text
self.toggled_responder = toggled
self.is_toggled = False
self.is_disabled = False
def get_dimensions(self):
return vector.Vector(self._off_image.get_width(), self._off_image.get_height())
def get_rectangle(self):
return rectangle.Rectangle(self.position, self.get_dimensions())
def draw(self, cr, window, resource_manager):
image = self._off_image
if (not self.is_disabled and self._moused is True):
image = self._on_image
if (self.is_toggled is True):
image = self._clicked_image
cr.save()
cr.set_source_surface(self._disabled_image, self.position.x, self.position.y)
cr.paint()
# Draw the image first
if (not self.is_disabled):
cr.set_source_surface(image, self.position.x + 12, self.position.y)
cr.paint()
cr.set_source_rgb(0, 1, 0)
# Draw the text
cr.select_font_face("Arial", cairo.FONT_SLANT_NORMAL, cairo.FONT_WEIGHT_NORMAL)
cr.set_font_size(13)
(x, y, width, height, dx, dy) = cr.text_extents(self.text)
cr.move_to(self.position.x + self.image_resolution.x + 25, self.position.y + (self.image_resolution.y / 2) + (height/ 2))
cr.show_text(self.text)
cr.restore()
def on_mouse_click(self, window, resource_manager, location):
rect = self.get_rectangle()
if (not self.is_disabled and rect.contains_point(location)):
self._clicked = True
def on_mouse_release(self, window, resource_manager, location):
rect = self.get_rectangle()
if (not self.is_disabled and rect.contains_point(location)):
self._clicked = False
self.is_toggled = not self.is_toggled
if (self.toggled_responder is not None):
self.toggled_responder(self, window, resource_manager, self.is_toggled)
def on_mouse_move(self, window, resource_manager, location):
rect = self.get_rectangle()
if (rect.contains_point(location)):
self._moused = True
return
self._moused = False
| python |
from django.shortcuts import render
from django.views.generic.base import View
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.auth import login, logout
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from .forms import LoginForm, SettingsForm
from django.utils.translation import ugettext_lazy as _
class LoginView(View):
""" Backend for the login template in login.html """
template_login = "login.html"
def get(self, *args, **kwargs):
if self.request.user.is_authenticated:
return forward_if_authenticated(self.request)
next = None
if "next" in self.request.GET:
next = self.request.GET.get("next")
form = LoginForm()
content = {
"form": form,
"next": next
}
return render(self.request, self.template_login, content)
def post(self, *args, **kwargs):
# create a form instance and populate it with data from the request:
form = LoginForm(self.request.POST)
next = None
if "next" in self.request.GET:
next = self.request.GET.get("next")
if form.is_valid():
user = form.login(self.request)
if user is not None:
login(self.request, user)
return forward_if_authenticated(self.request)
content = {
"form": form,
"next": next
}
return render(self.request, self.template_login, content)
class LogoutView(View):
""" Backend for the logout template in logout.html """
def get(self, *args, **kwargs):
logout(self.request)
return HttpResponseRedirect(reverse('dashboard'))
def post(self, *args, **kwargs):
pass
class SettingsView(LoginRequiredMixin, View):
""" Backend for the settings template in settings.html """
template_settings = "settings.html"
def get(self, *args, **kwargs):
user = self.request.user
form = SettingsForm(
{'sending_email_once_a_day': user.sending_email_once_a_day})
content = {
"form": form
}
return render(self.request, self.template_settings, content)
def post(self, *args, **kwargs):
user = self.request.user
form = SettingsForm(self.request.POST)
if form.is_valid():
# Enables daily summary email
user.sending_email_once_a_day = form.cleaned_data[
"sending_email_once_a_day"]
user.save()
messages.success(self.request,
_('Einstellungen wurden erfolgreich übernommen!'))
else:
messages.error(self.request,
_('Die Einstellung konnte nicht übernommen werden!'))
content = {
"form": form
}
return render(self.request, self.template_settings, content)
def forward_if_authenticated(request):
"""
If the user is logged in successfully he will be forwarded to the page he
tried to access. If no page exists he will be forwarded to dashboard
:param request: Contains metadata about the request
:return: redirect to the corresponding page
"""
if "next" in request.POST:
return HttpResponseRedirect(request.POST.get('next'))
elif "next" in request.GET:
return HttpResponseRedirect(request.GET.get('next'))
else:
return HttpResponseRedirect(reverse('dashboard'))
| python |
#!/usr/bin/env python3
""" Lightmon Data Read Command
This script reads the data from the light sensor.
"""
import lm
import argparse
import time
import numpy
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Verify the calibration table')
parser.add_argument('-p',
dest='port',
help='Serial port device where sensor is connected, example: /dev/ttyACM0',
required=True)
parser.add_argument('-o',
dest='write_dir',
help='directory to store the data from the sensor',
required=True)
serial_number = input("Enter the serial number of the sensor: 00")
args = parser.parse_args()
sensor = lm.LightMon(args.port)
uid = sensor.get_uid()
fd = open(f"{args.write_dir.strip()}/Sensor_00{serial_number}_{uid.strip()}.csv","w")
fd.write(sensor.get_data())
fd.close()
sensor.close_port()
| python |
from . import transformer
from . import bert
| python |
#! /usr/bin/jython
# -*- coding: utf-8 -*-
#
# sqlite3_read.py
# Jan/12/2011
#
# ----------------------------------------------------------------
#
import sys
import string
from java.lang import System
#
import java
from java.sql import DriverManager
#
# ----------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/jython_common')
from jython_rdb_manipulate import display_proc
# ----------------------------------------------------------------
System.out.println ("*** start ***")
print ("*** 開始 ***")
java.lang.Class.forName("org.sqlite.JDBC")
#
sqlite3_file = "/var/tmp/sqlite3/cities.db"
url="jdbc:sqlite:" + sqlite3_file
user = ""
password = ""
conn = DriverManager.getConnection (url,user, password)
display_proc (conn)
#
conn.close()
print ("*** 終了 ***")
#
# ----------------------------------------------------------------
| python |
import contextlib
import logging
import os
from django import test
from django.test import Client
from djangae.environment import get_application_root
from google.appengine.api import apiproxy_stub_map, appinfo
from google.appengine.datastore import datastore_stub_util
from google.appengine.tools.devappserver2.application_configuration import ModuleConfiguration
from google.appengine.tools.devappserver2.module import _ScriptHandler
@contextlib.contextmanager
def inconsistent_db(probability=0, connection='default'):
"""
A context manager that allows you to make the datastore inconsistent during testing.
This is vital for writing applications that deal with the Datastore's eventual consistency
"""
from django.db import connections
conn = connections[connection]
if not hasattr(conn.creation, "testbed") or "datastore_v3" not in conn.creation.testbed._enabled_stubs:
raise RuntimeError("Tried to use the inconsistent_db stub when not testing")
stub = apiproxy_stub_map.apiproxy.GetStub('datastore_v3')
# Set the probability of the datastore stub
original_policy = stub._consistency_policy
stub.SetConsistencyPolicy(datastore_stub_util.PseudoRandomHRConsistencyPolicy(probability=probability))
try:
yield
finally:
# Restore to consistent mode
stub.SetConsistencyPolicy(original_policy)
def _get_queued_tasks(stub, queue_name=None, flush=True):
tasks = []
queues = stub.GetQueues()
if queue_name is not None:
queues = filter(lambda q: queue_name == q['name'], queues)
for queue in queues:
for task in stub.GetTasks(queue['name']):
tasks.append(task)
if flush:
stub.FlushQueue(queue["name"])
return tasks
def _flush_tasks(stub, queue_name=None):
if queue_name:
stub.FlushQueue(queue_name)
else:
for queue in stub.GetQueues():
stub.FlushQueue(queue["name"])
def process_task_queues(queue_name=None):
"""
Processes any queued tasks inline without a server.
This is useful for end-to-end testing background tasks.
"""
stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue")
tasks = _get_queued_tasks(stub, queue_name)
client = Client() # Instantiate a test client for processing the tasks
while tasks:
task = tasks.pop(0) # Get the first task
decoded_body = task['body'].decode('base64')
post_data = decoded_body
headers = { "HTTP_{}".format(x.replace("-", "_").upper()): y for x, y in task['headers'] }
#FIXME: set headers like the queue name etc.
method = task['method']
if method.upper() == "POST":
#Fixme: post data?
response = client.post(task['url'], data=post_data, content_type=headers['HTTP_CONTENT_TYPE'], **headers)
else:
response = client.get(task['url'], **headers)
if response.status_code != 200:
logging.info("Unexpected status (%r) while simulating task with url: %r", response.status_code, task['url'])
if not tasks:
#The map reduce may have added more tasks, so refresh the list
tasks = _get_queued_tasks(stub, queue_name)
class TestCaseMixin(object):
def setUp(self):
super(TestCaseMixin, self).setUp()
self.taskqueue_stub = apiproxy_stub_map.apiproxy.GetStub("taskqueue")
if self.taskqueue_stub:
_flush_tasks(self.taskqueue_stub) # Make sure we clear the queue before every test
def assertNumTasksEquals(self, num, queue_name='default'):
self.assertEqual(num, len(_get_queued_tasks(self.taskqueue_stub, queue_name, flush=False)))
def process_task_queues(self, queue_name=None):
process_task_queues(queue_name)
class HandlerAssertionsMixin(object):
"""
Custom assert methods which verifies a range of handler configuration
setting specified in app.yaml.
"""
msg_prefix = 'Handler configuration for {url} is not protected by {perm}.'
def assert_login_admin(self, url):
"""
Test that the handler defined in app.yaml which matches the url provided
has `login: admin` in the configuration.
"""
handler = self._match_handler(url)
self.assertEqual(
handler.url_map.login, appinfo.LOGIN_ADMIN, self.msg_prefix.format(
url=url, perm='`login: admin`'
)
)
def assert_login_required(self, url):
"""
Test that the handler defined in app.yaml which matches the url provided
has `login: required` or `login: admin` in the configruation.
"""
handler = self._match_handler(url)
login_admin = handler.url_map.login == appinfo.LOGIN_ADMIN
login_required = handler.url_map.login == appinfo.LOGIN_REQUIRED or login_admin
self.assertTrue(login_required, self.msg_prefix.format(
url=url, perm='`login: admin` or `login: required`'
)
)
def _match_handler(self, url):
"""
Load script handler configurations from app.yaml and try to match
the provided url path to a url_maps regex.
"""
app_yaml_path = os.path.join(get_application_root(), "app.yaml")
config = ModuleConfiguration(app_yaml_path)
url_maps = config.handlers
script_handlers = [
_ScriptHandler(maps) for
maps in url_maps if
maps.GetHandlerType() == appinfo.HANDLER_SCRIPT
]
for handler in script_handlers:
if handler.match(url):
return handler
raise AssertionError('No handler found for {url}'.format(url=url))
class TestCase(HandlerAssertionsMixin, TestCaseMixin, test.TestCase):
pass
class TransactionTestCase(HandlerAssertionsMixin, TestCaseMixin, test.TransactionTestCase):
pass
| python |
import uuid
from yggdrasil.tests import assert_raises, assert_equal
import yggdrasil.drivers.tests.test_ConnectionDriver as parent
from yggdrasil import runner, tools
class TestServerParam(parent.TestConnectionParam):
r"""Test parameters for ServerDriver class."""
def __init__(self, *args, **kwargs):
super(TestServerParam, self).__init__(*args, **kwargs)
self.driver = 'ServerDriver'
self.args = None
self.attr_list += ['comm', 'response_drivers', 'nclients',
'request_name']
# Increased to allow forwarding between IPC comms on MacOS
self.timeout = 5.0
self.route_timeout = 2 * self.timeout
# if tools.get_default_comm() == "IPCComm":
# self.route_timeout = 120.0
# self.debug_flag = True
# self.sleeptime = 0.5
# self.timeout = 10.0
self.comm_name = tools.get_default_comm()
self.client_comm = tools.get_default_comm()
self.icomm_name = self.client_comm
self.ocomm_name = self.comm_name
@property
def send_comm_kwargs(self):
r"""dict: Keyword arguments for send comm."""
out = self.cli_drv.icomm.opp_comm_kwargs()
out['comm'] = 'ClientComm'
return out
@property
def recv_comm_kwargs(self):
r"""dict: Keyword arguments for recv comm."""
out = self.instance.ocomm.opp_comm_kwargs()
out['comm'] = 'ServerComm'
return out
@property
def inst_kwargs(self):
r"""dict: Keyword arguments for tested class."""
out = super(TestServerParam, self).inst_kwargs
# out['request_name'] = self.cli_drv.request_name
out['comm'] = self.cli_drv.comm
out['comm_address'] = self.cli_drv.ocomm.opp_address
out['ocomm_kws']['comm'] = self.comm_name
return out
def setup(self, *args, **kwargs):
r"""Recover new server message on start-up."""
kwargs.setdefault('nprev_comm', self.comm_count)
self.cli_drv = self.create_client()
if not self.skip_start:
self.cli_drv.start()
super(TestServerParam, self).setup(*args, **kwargs)
def teardown(self):
r"""Recover end server message on teardown."""
if hasattr(self, 'cli_drv'):
self.remove_instance(self.cli_drv)
delattr(self, 'cli_drv')
super(TestServerParam, self).teardown()
def create_client(self, comm_address=None):
r"""Create a new ClientDriver instance."""
inst = runner.create_driver(
'ClientDriver', 'test_model_request.' + str(uuid.uuid4()),
comm=self.client_comm,
comm_address=comm_address,
namespace=self.namespace, working_dir=self.working_dir,
timeout=self.timeout)
return inst
class TestServerDriverNoStart(TestServerParam,
parent.TestConnectionDriverNoStart):
r"""Test class for ServerDriver class without start."""
def test_error_attributes(self):
r"""Test error raised when trying to access attributes set on recv."""
err_attr = ['request_id', 'response_address']
for k in err_attr:
assert_raises(AttributeError, getattr, self.instance, k)
class TestServerDriverNoInit(TestServerParam,
parent.TestConnectionDriverNoInit):
r"""Test class for ServerDriver class without init."""
pass
class TestServerDriver(TestServerParam, parent.TestConnectionDriver):
r"""Test class for ServerDriver class."""
def setup(self, *args, **kwargs):
r"""Wait for drivers to start."""
super(TestServerDriver, self).setup(*args, **kwargs)
T = self.instance.start_timeout()
while ((not T.is_out) and ((not self.instance.is_valid)
or (not self.cli_drv.is_valid))):
self.instance.sleep() # pragma: debug
self.instance.stop_timeout()
# # Disabled so that test message is not read by mistake
# def test_purge(self):
# r"""Test purge of queue."""
# pass
def test_client_count(self):
r"""Test to ensure client count is correct."""
T = self.instance.start_timeout()
while ((not T.is_out) and (self.instance.nclients != 1)): # pragma: debug
self.instance.sleep()
self.instance.stop_timeout()
assert_equal(self.instance.nclients, 1)
# Create new client
cli_drv2 = self.create_client(comm_address=self.cli_drv.comm_address)
cli_drv2.start()
T = self.instance.start_timeout()
while ((not T.is_out) and (self.instance.nclients != 2)):
self.instance.sleep()
self.instance.stop_timeout()
assert_equal(self.instance.nclients, 2)
# Send sign off
cli_drv2.icomm.close()
T = self.instance.start_timeout()
while ((not T.is_out) and (self.instance.nclients != 1)):
self.instance.sleep()
self.instance.stop_timeout()
assert_equal(self.instance.nclients, 1)
# Close client and wait for sign off
self.cli_drv.icomm.close()
T = self.instance.start_timeout()
while ((not T.is_out) and (self.instance.nclients != 0)):
self.instance.sleep()
self.instance.stop_timeout()
assert_equal(self.instance.nclients, 0)
# Clean up
cli_drv2.terminate()
def test_send_recv(self, msg_send=None):
r"""Test routing of a short message between client and server."""
if msg_send is None:
msg_send = self.test_msg
T = self.instance.start_timeout()
while ((not T.is_out) and ((not self.instance.is_valid)
or (not self.cli_drv.is_valid))):
self.instance.sleep() # pragma: debug
self.instance.stop_timeout()
# Send a message to local output
flag = self.send_comm.send(msg_send)
assert(flag)
# Receive on server side, then send back
flag, srv_msg = self.recv_comm.recv(timeout=self.route_timeout)
assert(flag)
assert_equal(srv_msg, msg_send)
flag = self.recv_comm.send(srv_msg)
assert(flag)
# Receive response on server side
flag, cli_msg = self.send_comm.recv(timeout=self.route_timeout)
assert(flag)
assert_equal(cli_msg, msg_send)
def test_send_recv_nolimit(self):
r"""Test routing of a large message between client and server."""
self.test_send_recv(msg_send=self.msg_long)
| python |
from performance.ConfusionMatrix import ConfusionMatrix
from performance.ConfusionMatrixToConfusionTable import ConfusionMatrixToConfusionTable
import numpy as np
class ModelPerformance:
BETA = 1
def __init__(self, model, test_set):
self.confusion_matrix = ConfusionMatrix(model, test_set)
self.matrix_to_table_parser = ConfusionMatrixToConfusionTable(self.confusion_matrix)
def f1_measure(self):
f1s = []
for klass in self.__matrix_classes():
f1s.append(self.__confusion_table_for(klass).f_score())
return np.mean(f1s)
def __confusion_table_for(self, klass):
return self.matrix_to_table_parser.confusion_table_for(klass)
def __matrix_classes(self):
return self.confusion_matrix.possible_classes()
| python |
"""Support for Nest devices."""
from datetime import datetime, timedelta
import logging
import threading
from nest import Nest
from nest.nest import APIError, AuthorizationError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_FILENAME,
CONF_STRUCTURE,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.entity import Entity
from . import local_auth
from .const import DATA_NEST, DATA_NEST_CONFIG, DOMAIN, SIGNAL_NEST_UPDATE
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["climate", "camera", "sensor", "binary_sensor"]
# Configuration for the legacy nest API
SERVICE_CANCEL_ETA = "cancel_eta"
SERVICE_SET_ETA = "set_eta"
NEST_CONFIG_FILE = "nest.conf"
ATTR_ETA = "eta"
ATTR_ETA_WINDOW = "eta_window"
ATTR_STRUCTURE = "structure"
ATTR_TRIP_ID = "trip_id"
AWAY_MODE_AWAY = "away"
AWAY_MODE_HOME = "home"
ATTR_AWAY_MODE = "away_mode"
SERVICE_SET_AWAY_MODE = "set_away_mode"
# Services for the legacy API
SET_AWAY_MODE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_AWAY_MODE): vol.In([AWAY_MODE_AWAY, AWAY_MODE_HOME]),
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
SET_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ETA): cv.time_period,
vol.Optional(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_ETA_WINDOW): cv.time_period,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
CANCEL_ETA_SCHEMA = vol.Schema(
{
vol.Required(ATTR_TRIP_ID): cv.string,
vol.Optional(ATTR_STRUCTURE): vol.All(cv.ensure_list, [cv.string]),
}
)
def nest_update_event_broker(hass, nest):
"""
Dispatch SIGNAL_NEST_UPDATE to devices when nest stream API received data.
Used for the legacy nest API.
Runs in its own thread.
"""
_LOGGER.debug("Listening for nest.update_event")
while hass.is_running:
nest.update_event.wait()
if not hass.is_running:
break
nest.update_event.clear()
_LOGGER.debug("Dispatching nest data update")
dispatcher_send(hass, SIGNAL_NEST_UPDATE)
_LOGGER.debug("Stop listening for nest.update_event")
async def async_setup_legacy(hass: HomeAssistant, config: dict) -> bool:
"""Set up Nest components using the legacy nest API."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
local_auth.initialize(hass, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET])
filename = config.get(CONF_FILENAME, NEST_CONFIG_FILE)
access_token_cache_file = hass.config.path(filename)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={"nest_conf_path": access_token_cache_file},
)
)
# Store config to be used during entry setup
hass.data[DATA_NEST_CONFIG] = conf
return True
async def async_setup_legacy_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Nest from legacy config entry."""
nest = Nest(access_token=entry.data["tokens"]["access_token"])
_LOGGER.debug("proceeding with setup")
conf = hass.data.get(DATA_NEST_CONFIG, {})
hass.data[DATA_NEST] = NestLegacyDevice(hass, conf, nest)
if not await hass.async_add_executor_job(hass.data[DATA_NEST].initialize):
return False
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def validate_structures(target_structures):
all_structures = [structure.name for structure in nest.structures]
for target in target_structures:
if target not in all_structures:
_LOGGER.info("Invalid structure: %s", target)
def set_away_mode(service):
"""Set the away mode for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
service.data[ATTR_AWAY_MODE],
)
structure.away = service.data[ATTR_AWAY_MODE]
def set_eta(service):
"""Set away mode to away and include ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
_LOGGER.info(
"Setting away mode for: %s to: %s",
structure.name,
AWAY_MODE_AWAY,
)
structure.away = AWAY_MODE_AWAY
now = datetime.utcnow()
trip_id = service.data.get(
ATTR_TRIP_ID, f"trip_{int(now.timestamp())}"
)
eta_begin = now + service.data[ATTR_ETA]
eta_window = service.data.get(ATTR_ETA_WINDOW, timedelta(minutes=1))
eta_end = eta_begin + eta_window
_LOGGER.info(
"Setting ETA for trip: %s, "
"ETA window starts at: %s and ends at: %s",
trip_id,
eta_begin,
eta_end,
)
structure.set_eta(trip_id, eta_begin, eta_end)
else:
_LOGGER.info(
"No thermostats found in structure: %s, unable to set ETA",
structure.name,
)
def cancel_eta(service):
"""Cancel ETA for a Nest structure."""
if ATTR_STRUCTURE in service.data:
target_structures = service.data[ATTR_STRUCTURE]
validate_structures(target_structures)
else:
target_structures = hass.data[DATA_NEST].local_structure
for structure in nest.structures:
if structure.name in target_structures:
if structure.thermostats:
trip_id = service.data[ATTR_TRIP_ID]
_LOGGER.info("Cancelling ETA for trip: %s", trip_id)
structure.cancel_eta(trip_id)
else:
_LOGGER.info(
"No thermostats found in structure: %s, "
"unable to cancel ETA",
structure.name,
)
hass.services.async_register(
DOMAIN, SERVICE_SET_AWAY_MODE, set_away_mode, schema=SET_AWAY_MODE_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_SET_ETA, set_eta, schema=SET_ETA_SCHEMA
)
hass.services.async_register(
DOMAIN, SERVICE_CANCEL_ETA, cancel_eta, schema=CANCEL_ETA_SCHEMA
)
@callback
def start_up(event):
"""Start Nest update event listener."""
threading.Thread(
name="Nest update listener",
target=nest_update_event_broker,
args=(hass, nest),
).start()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_up)
@callback
def shut_down(event):
"""Stop Nest update event listener."""
nest.update_event.set()
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shut_down)
)
_LOGGER.debug("async_setup_nest is done")
return True
class NestLegacyDevice:
"""Structure Nest functions for hass for legacy API."""
def __init__(self, hass, conf, nest):
"""Init Nest Devices."""
self.hass = hass
self.nest = nest
self.local_structure = conf.get(CONF_STRUCTURE)
def initialize(self):
"""Initialize Nest."""
try:
# Do not optimize next statement, it is here for initialize
# persistence Nest API connection.
structure_names = [s.name for s in self.nest.structures]
if self.local_structure is None:
self.local_structure = structure_names
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
return False
return True
def structures(self):
"""Generate a list of structures."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
yield structure
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
def thermostats(self):
"""Generate a list of thermostats."""
return self._devices("thermostats")
def smoke_co_alarms(self):
"""Generate a list of smoke co alarms."""
return self._devices("smoke_co_alarms")
def cameras(self):
"""Generate a list of cameras."""
return self._devices("cameras")
def _devices(self, device_type):
"""Generate a list of Nest devices."""
try:
for structure in self.nest.structures:
if structure.name not in self.local_structure:
_LOGGER.debug(
"Ignoring structure %s, not in %s",
structure.name,
self.local_structure,
)
continue
for device in getattr(structure, device_type, []):
try:
# Do not optimize next statement,
# it is here for verify Nest API permission.
device.name_long
except KeyError:
_LOGGER.warning(
"Cannot retrieve device name for [%s]"
", please check your Nest developer "
"account permission settings",
device.serial,
)
continue
yield (structure, device)
except (AuthorizationError, APIError, OSError) as err:
_LOGGER.error("Connection error while access Nest web service: %s", err)
class NestSensorDevice(Entity):
"""Representation of a Nest sensor."""
def __init__(self, structure, device, variable):
"""Initialize the sensor."""
self.structure = structure
self.variable = variable
if device is not None:
# device specific
self.device = device
self._name = f"{self.device.name_long} {self.variable.replace('_', ' ')}"
else:
# structure only
self.device = structure
self._name = f"{self.structure.name} {self.variable.replace('_', ' ')}"
self._state = None
self._unit = None
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def should_poll(self):
"""Do not need poll thanks using Nest streaming API."""
return False
@property
def unique_id(self):
"""Return unique id based on device serial and variable."""
return f"{self.device.serial}-{self.variable}"
@property
def device_info(self):
"""Return information about the device."""
if not hasattr(self.device, "name_long"):
name = self.structure.name
model = "Structure"
else:
name = self.device.name_long
if self.device.is_thermostat:
model = "Thermostat"
elif self.device.is_camera:
model = "Camera"
elif self.device.is_smoke_co_alarm:
model = "Nest Protect"
else:
model = None
return {
"identifiers": {(DOMAIN, self.device.serial)},
"name": name,
"manufacturer": "Nest Labs",
"model": model,
}
def update(self):
"""Do not use NestSensorDevice directly."""
raise NotImplementedError
async def async_added_to_hass(self):
"""Register update signal handler."""
async def async_update_state():
"""Update sensor state."""
await self.async_update_ha_state(True)
self.async_on_remove(
async_dispatcher_connect(self.hass, SIGNAL_NEST_UPDATE, async_update_state)
)
| python |
>>> print(*map(''.join, zip('abc', 'ABC', '123')), sep='\n')
aA1
bB2
cC3
>>>
| python |
from BS.utils import get_string_list_from_file, save_list_to_file
def fix_adjusted_participles():
socket_group_28_01 = list(get_string_list_from_file(
'src_dict/БГ 28.01.21 изм.txt', encoding='cp1251'))
socket_group_23_01 = list(get_string_list_from_file(
'src_dict/БГ 23.01.21.txt', encoding='cp1251'))
adjusted_participles_list = []
for count, socket_string in enumerate(socket_group_28_01[:]):
if socket_string.startswith('*'):
for replace_string in socket_group_23_01[:]:
if replace_string.startswith('*'):
if replace_string.split()[0].endswith(
socket_string.split()[1]
):
print(replace_string)
socket_group_28_01[count] = replace_string
adjusted_participles_list.append(replace_string)
save_list_to_file(sorted(adjusted_participles_list,
key=lambda x: x.replace('*', '').lower()),
'out/Адъектированные причастия.txt'
)
save_list_to_file(socket_group_28_01, 'out/БГ 28.01.21.txt',
encoding='cp1251')
if __name__ == '__main__':
fix_adjusted_participles()
| python |