content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
import time
import re
import argparse
import os
import yaml
def get_arg_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--log_dir',
help='Full path of log directory',
required=False,
default='./')
return parser
def read_config():
bs_dic = {}
cur_path = os.path.dirname(os.path.realpath(__file__))
config_path = os.path.join(cur_path, "config.yaml")
models=[]
with open(config_path, 'r', encoding='utf-8') as f:
config = yaml.safe_load(f.read())
models = config["test_model"]
stock_tf = config["stocktf"]
for model in models:
bs_dic[model]=config['model_batchsize'][model]
print("=" * 15 * (len(bs_dic)+1))
print('%-10s'%'model', end="\t")
for k in bs_dic.keys():
print('%-10s'%k, end='\t')
print("")
print('%-10s'%'batchsize' ,end='\t')
for k in bs_dic.keys():
print("%-10s" %bs_dic[k], end="\t")
print("")
print("=" * 15 * (len(bs_dic)+1))
return stock_tf, bs_dic, models
if __name__ == "__main__":
stock_tf, bs_dic, models = read_config()
parser = get_arg_parser()
args = parser.parse_args()
log_dir = args.log_dir
log_list = []
result={}
for root, dirs, files in os.walk(log_dir, topdown=False):
for name in files:
if os.path.splitext(name)[1] == '.log':
log_list.append(os.path.join(root, name))
acc_dic = {}
auc_dic = {}
gstep_dic = {}
for file in log_list:
output = []
file_name = os.path.split(file)[1]
model_name = file_name.split('_')[0]
file_name_nosurf = os.path.splitext(file_name)[0]
with open(file, 'r') as f:
for line in f:
matchObj = re.search(r'global_step/sec: \d+(\.\d+)?', line)
if matchObj:
output.append(matchObj.group()[17:])
if "ACC" in line:
value = float(line.split()[2])
acc_dic[file_name_nosurf] = value
if "AUC" in line:
value = float(line.split()[2])
auc_dic[file_name_nosurf] = value
gstep = [float(i) for i in output[20:30]]
avg = sum(gstep) / len(gstep)
gstep_dic[file_name_nosurf] = avg
total_dic = {}
for model in models:
total_dic[model]= {}
total_dic[model]["acc"]={}
total_dic[model]["auc"]={}
total_dic[model]["gstep"]={}
for acc_key in acc_dic.keys():
if model.lower() in acc_key:
if "tf_fp32" in acc_key:
total_dic[model]["acc"]["tf_fp32"]=acc_dic[acc_key]
elif "deeprec_fp32" in acc_key:
total_dic[model]["acc"]["deeprec_fp32"]=acc_dic[acc_key]
elif "deeprec_bf16" in acc_key:
total_dic[model]["acc"]["deeprec_bf16"]=acc_dic[acc_key]
for auc_key in auc_dic.keys():
if model.lower() in auc_key:
if "tf_fp32" in auc_key:
total_dic[model]["auc"]["tf_fp32"]=auc_dic[auc_key]
elif "deeprec_fp32" in auc_key:
total_dic[model]["auc"]["deeprec_fp32"]=auc_dic[auc_key]
elif "deeprec_bf16" in auc_key:
total_dic[model]["auc"]["deeprec_bf16"]=auc_dic[auc_key]
for gstep_key in gstep_dic.keys():
if model.lower() in gstep_key:
if "tf_fp32" in gstep_key:
total_dic[model]["gstep"]["tf_fp32"]=gstep_dic[gstep_key]
elif "deeprec_fp32" in gstep_key:
total_dic[model]["gstep"]["deeprec_fp32"]=gstep_dic[gstep_key]
elif "deeprec_bf16" in gstep_key:
total_dic[model]["gstep"]["deeprec_bf16"]=gstep_dic[gstep_key]
upgrade_dic = {}
for model in models:
upgrade_dic[model] = {}
upgrade_dic[model]['tf_fp32'] = 'baseline'
if stock_tf:
upgrade_dic[model]['deeprec_fp32'] = total_dic[model]['gstep']['deeprec_fp32'] / total_dic[model]['gstep']['tf_fp32']
upgrade_dic[model]['deeprec_bf16'] = total_dic[model]['gstep']['deeprec_bf16'] / total_dic[model]['gstep']['tf_fp32']
if stock_tf:
print("%-5s\t %10s\t %-10s\t %-10s\t %-11s\t %10s\t %10s\t %11s" %('Model', 'FrameWork', 'Datatype', 'ACC', 'AUC', 'Gstep', 'Throughput', 'Speedup'))
for model in total_dic.keys():
print(model+':')
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %11s" %('', 'StockTF', 'FP32', total_dic[model]['acc']['tf_fp32'], total_dic[model]['auc']['tf_fp32'], total_dic[model]['gstep']['tf_fp32'], total_dic[model]['gstep']['tf_fp32']*bs_dic[model], upgrade_dic[model]['tf_fp32']))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %10.2f%%" %('', 'DeepRec', 'FP32', total_dic[model]['acc']['deeprec_fp32'], total_dic[model]['auc']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32']*bs_dic[model], upgrade_dic[model]['deeprec_fp32']*100))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f\t %10.2f%%" %('', 'DeepRec', 'BF16', total_dic[model]['acc']['deeprec_bf16'], total_dic[model]['auc']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16']*bs_dic[model], upgrade_dic[model]['deeprec_bf16']*100))
else:
print("%-5s\t %10s\t %-10s\t %-10s\t %-11s\t %10s\t %10s\t" %('Model', 'FrameWork', 'Datatype', 'ACC', 'AUC', 'Gstep', 'Throughput'))
for model in total_dic.keys():
print(model+':')
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f" %('', 'DeepRec', 'FP32', total_dic[model]['acc']['deeprec_fp32'], total_dic[model]['auc']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32'], total_dic[model]['gstep']['deeprec_fp32']*bs_dic[model]))
print("%-5s\t %10s\t %-10s\t %-10.6f\t %-5.6f\t %10.2f\t %10.2f" %('', 'DeepRec', 'BF16', total_dic[model]['acc']['deeprec_bf16'], total_dic[model]['auc']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16'], total_dic[model]['gstep']['deeprec_bf16']*bs_dic[model]))
| python |
# Run these tests from ipython in the main package directory:
# `run tests\python_example_package_tests.py`
import unittest
import python_example_package
class TestAdd(unittest.TestCase):
def test_basic(self):
print "I RAN!"
def test_add(self):
self.assertEqual( python_example_package.add(1,2), 3)
self.assertEqual( python_example_package.add(0,0), 0)
self.assertEqual( python_example_package.add(-1,-1), -2)
if __name__=='__main__':
print python_example_package.add(1,2)
unittest.main() | python |
from time import localtime
activities = {8: 'Sleeping',
9: 'Commuting',
17: 'Working',
18: 'Commuting',
20: 'Eating',
22: 'Resting' }
time_now = localtime()
hour = time_now.tm_hour
for activity_time in sorted(activities.keys()):
if hour < activity_time:
print activities[activity_time]
break
else:
print 'Unknown, AFK or sleeping!'
| python |
# Copyright 2019 Atalaya Tech, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import importlib
from bentoml.utils import cloudpickle
from bentoml.artifact import BentoServiceArtifact, BentoServiceArtifactWrapper
from bentoml.exceptions import (
ArtifactLoadingException,
MissingDependencyException,
InvalidArgument,
)
class KerasModelArtifact(BentoServiceArtifact):
"""
Abstraction for saving/loading Keras model
Args:
name (string): name of the artifact
custom_objects (dict): dictionary of Keras custom objects for model
store_as_json_and_weights (bool): flag allowing storage of the Keras
model as JSON and weights
Raises:
MissingDependencyException: keras or tensorflow.keras package is required for
KerasModelArtifact
InvalidArgument: invalid argument type, model being packed must be instance of
keras.engine.network.Network, tf.keras.models.Model, or their aliases
Example usage:
>>> from tensorflow import keras
>>> from tensorflow.keras.models import Sequential
>>> from tensorflow.keras.preprocessing import sequence, text
>>>
>>> model_to_save = Sequential()
>>> # traing model
>>> model_to_save.compile(...)
>>> model_to_save.fit(...)
>>>
>>> import bentoml
>>>
>>> @bentoml.env(pip_dependencies=['tensorflow==1.14.0', 'numpy', 'pandas'])
>>> @bentoml.artifacts([KerasModelArtifact('model')])
>>> class KerasModelService(bentoml.BentoService):
>>> @bentoml.api(input=JsonInput())
>>> def predict(self, parsed_json):
>>> input_data = text.text_to_word_sequence(parsed_json['text'])
>>> return self.artifacts.model.predict_classes(input_data)
>>>
>>> svc = KerasModelService()
>>> svc.pack('model', model_to_save)
"""
def __init__(
self,
name,
custom_objects=None,
model_extension=".h5",
store_as_json_and_weights=False,
):
super(KerasModelArtifact, self).__init__(name)
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use KerasModelArtifact. BentoML "
"currently only support using Keras with Tensorflow backend."
)
self._model_extension = model_extension
self._store_as_json_and_weights = store_as_json_and_weights
# By default assume using tf.keras module
self._keras_module_name = tf.keras.__name__
self.custom_objects = custom_objects
self.graph = None
self.sess = None
@property
def pip_dependencies(self):
# Note that keras module is not required, user can use tf.keras as an
# replacement for the keras module. Although tensorflow module is required to
# be used as the default Keras backend
deps = ['tensorflow']
if self._keras_module_name == 'keras':
deps.append('keras')
return deps
def _keras_module_name_path(self, base_path):
# The name of the keras module used, can be 'keras' or 'tensorflow.keras'
return os.path.join(base_path, self.name + '_keras_module_name.txt')
def _custom_objects_path(self, base_path):
return os.path.join(base_path, self.name + '_custom_objects.pkl')
def _model_file_path(self, base_path):
return os.path.join(base_path, self.name + self._model_extension)
def _model_weights_path(self, base_path):
return os.path.join(base_path, self.name + '_weights.hdf5')
def _model_json_path(self, base_path):
return os.path.join(base_path, self.name + '_json.json')
def bind_keras_backend_session(self):
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use KerasModelArtifact. BentoML "
"currently only support using Keras with Tensorflow backend."
)
self.sess = tf.compat.v1.keras.backend.get_session()
self.graph = self.sess.graph
def creat_session(self):
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use KerasModelArtifact. BentoML "
"currently only support using Keras with Tensorflow backend."
)
self.graph = tf.compat.v1.get_default_graph()
self.sess = tf.compat.v1.Session(graph=self.graph)
tf.compat.v1.keras.backend.set_session(self.sess)
def pack(self, data): # pylint:disable=arguments-differ
try:
import tensorflow as tf
except ImportError:
raise MissingDependencyException(
"Tensorflow package is required to use KerasModelArtifact. BentoML "
"currently only support using Keras with Tensorflow backend."
)
if isinstance(data, dict):
model = data['model']
custom_objects = (
data['custom_objects']
if 'custom_objects' in data
else self.custom_objects
)
else:
model = data
custom_objects = self.custom_objects
if not isinstance(model, tf.keras.models.Model):
error_msg = (
"KerasModelArtifact#pack expects model argument to be type: "
"keras.engine.network.Network, tf.keras.models.Model, or their "
"aliases, instead got type: {}".format(type(model))
)
try:
import keras
if not isinstance(model, keras.engine.network.Network):
raise InvalidArgument(error_msg)
else:
self._keras_module_name = keras.__name__
except ImportError:
raise InvalidArgument(error_msg)
self.bind_keras_backend_session()
model._make_predict_function()
return _KerasModelArtifactWrapper(self, model, custom_objects)
def load(self, path):
if os.path.isfile(self._keras_module_name_path(path)):
with open(self._keras_module_name_path(path), "rb") as text_file:
keras_module_name = text_file.read().decode("utf-8")
try:
keras_module = importlib.import_module(keras_module_name)
except ImportError:
raise ArtifactLoadingException(
"Failed to import '{}' module when loading saved "
"KerasModelArtifact".format(keras_module_name)
)
self.creat_session()
if self.custom_objects is None and os.path.isfile(
self._custom_objects_path(path)
):
self.custom_objects = cloudpickle.load(
open(self._custom_objects_path(path), 'rb')
)
with self.graph.as_default():
with self.sess.as_default():
# load keras model via json and weights if requested
if self._store_as_json_and_weights:
with open(self._model_json_path(path), 'r') as json_file:
model_json = json_file.read()
model = keras_module.models.model_from_json(
model_json, custom_objects=self.custom_objects
)
model.load_weights(self._model_weights_path(path))
# otherwise, load keras model via standard load_model
else:
model = keras_module.models.load_model(
self._model_file_path(path), custom_objects=self.custom_objects
)
return self.pack(model)
class _KerasModelArtifactWrapper(BentoServiceArtifactWrapper):
def __init__(self, spec, model, custom_objects):
super(_KerasModelArtifactWrapper, self).__init__(spec)
self.graph = spec.graph
self.sess = spec.sess
self._model = model
self._custom_objects = custom_objects
self._store_as_json_and_weights = spec._store_as_json_and_weights
self._model_wrapper = _KerasModelWrapper(self._model, self.graph, self.sess)
def save(self, dst):
# save the keras module name to be used when loading
with open(self.spec._keras_module_name_path(dst), "wb") as text_file:
text_file.write(self.spec._keras_module_name.encode("utf-8"))
# save custom_objects for model
cloudpickle.dump(
self._custom_objects, open(self.spec._custom_objects_path(dst), "wb")
)
# save keras model using json and weights if requested
if self.spec._store_as_json_and_weights:
with open(self.spec._model_json_path(dst), "w") as json_file:
json_file.write(self._model.to_json())
self._model.save_weights(self.spec._model_weights_path(dst))
# otherwise, save standard keras model
else:
self._model.save(self.spec._model_file_path(dst))
def get(self):
return self._model_wrapper
class _KerasModelWrapper:
def __init__(self, keras_model, graph, sess):
self.keras_model = keras_model
self.graph = graph
self.sess = sess
def predict(self, *args, **kwargs):
with self.graph.as_default():
with self.sess.as_default():
return self.keras_model.predict(*args, **kwargs)
def predict_classes(self, *args, **kwargs):
with self.graph.as_default():
with self.sess.as_default():
return self.keras_model.predict_classes(*args, **kwargs)
def __call__(self, *args, **kwargs):
with self.graph.as_default():
with self.sess.as_default():
return object.__call__(self, *args, **kwargs)
| python |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import xmltodict
from wechatpy.enterprise.events import EVENT_TYPES
from wechatpy.enterprise.messages import MESSAGE_TYPES
from wechatpy.messages import UnknownMessage
from wechatpy.utils import to_text
def parse_message(xml):
if not xml:
return
message = xmltodict.parse(to_text(xml))['xml']
message_type = message['MsgType'].lower()
if message_type == 'event':
event_type = message['Event'].lower()
message_class = EVENT_TYPES.get(event_type, UnknownMessage)
else:
message_class = MESSAGE_TYPES.get(message_type, UnknownMessage)
return message_class(message)
| python |
"""Predict a flower name from an image using a trained model.
Returns the flower name and class probability.
"""
import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from workspace_utils import active_session
import logging
import json
import argparse
import consts
from image import process_image
from model_utils import set_device, select_pretrained_model, freeze_params, map_category_names, print_predictions, print_args
from network import Network
def init_argparse(*args):
"""Instantiate argparse object"""
parser = argparse.ArgumentParser(
description='Train a network on dataset and save the model as a checkpoint'
)
parser.add_argument('-i', '--input_img',
help='Path to image')
parser.add_argument('-c', '--checkpoint',
help='Path to checkpoint',
default='checkpoints')
parser.add_argument('-k', '--top_k',
help='Return n most likely classes',
type=int,
default=3)
parser.add_argument('-n', '--category_names',
help='Use a mapping of categories to real names')
parser.add_argument('--gpu',
help='Use GPU for predictions; Default is True',
action='store_true',
default=True)
# Initialize with constants if passed in as an argument
if args:
return parser.parse_args(args[0])
return parser.parse_args()
def load_checkpoint(path, cuda):
"""Load a checkpoint and rebuild the model
Args:
path: Path to checkpoint file
Returns:
model: Recreation of the saved model
"""
device = set_device(cuda)
checkpoint = torch.load(path, map_location=device)
# Load pretrained model
model = select_pretrained_model(checkpoint['pretrained_model'])
# Freeze parameters to prevent backpropagation
freeze_params(model)
# Load classifier
classifier = Network(checkpoint['input_size'],
checkpoint['output_size'],
checkpoint['hidden_layers'],
checkpoint['drop_p'])
classifier.load_state_dict(checkpoint['state_dict'])
# Merge classifier to end of pretrained model
model.fc = classifier
# Add class to index mapping
model.class_to_idx = checkpoint['class_to_idx']
# Invert class_to_idx dictionary
# Ref: https://therenegadecoder.com/code/how-to-invert-a-dictionary-in-python/#invert-a-dictionary-with-a-comprehension
model.idx_to_class = {v: k for k, v in checkpoint['class_to_idx'].items()}
return model
def predict(image_path, model, k, cuda):
''' Predict the class (or classes) of an image using a
trained deep learning model.
Args:
image_path: Path of image to be classified
model: Model to classify the image
k: Number of predictions to return
cuda: Run prediction with cuda
Returns:
probs: Probabilities for each class prediction
classes: Class predictions
'''
# Use CUDA if available
device = set_device(cuda)
model.to(device)
# Disable dropout
model.eval()
# Disable autograd
with torch.no_grad():
# Process image to PyTorch tensor
img = process_image(image_path).to(device)
# Need to unsqueeze for a single image
# Ref: https://discuss.pytorch.org/t/expected-stride-to-be-a-single-integer-value-or-a-list/17612/4
img.unsqueeze_(0)
# Get probability distribution
output = model(img)
ps = torch.exp(output)
# Get top k probabilities and classes
top_p, top_classes = ps.topk(k, dim=1)
# Convert top_p, top_classes tensors to plain lists for easier
# ingestion downstream.
# Ref: https://stackoverflow.com/a/53903817
probs = top_p.squeeze().tolist()
classes = [model.idx_to_class[i] for i in top_classes.squeeze().tolist()]
logging.info(f'Probability distribution: {ps}')
logging.info(probs)
logging.info(classes)
return probs, classes
if __name__ == '__main__':
logging.basicConfig(filename='predict_log.txt', level=logging.INFO)
args = init_argparse(consts.PREDICT_ARGS)
print_args(args)
model = load_checkpoint(args.checkpoint, args.gpu)
probs, classes = predict(image_path=args.input_img, model=model, k=args.top_k, cuda=args.gpu)
pred_labels = map_category_names(cat_to_name=args.category_names,
classes=classes)
print_predictions(pred_labels, probs)
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2017 theloop, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test Radiostation Admin Manager"""
import unittest
import json
import os
import loopchain.utils as util
import testcase.unittest.test_util as test_util
from loopchain.radiostation import AdminManager
from loopchain import configure as conf
util.set_log_level_debug()
class TestRSAdminManager(unittest.TestCase):
def setUp(self):
test_util.print_testname(self._testMethodName)
def tearDown(self):
pass
def test_get_channel_info_by_peer_target(self):
# GIVEN
default_CHANNEL_MANAGE_DATA_PATH = conf.CHANNEL_MANAGE_DATA_PATH
default_ENABLE_CHANNEL_AUTH = conf.ENABLE_CHANNEL_AUTH
conf.CHANNEL_MANAGE_DATA_PATH = os.path.join(conf.LOOPCHAIN_ROOT_PATH,
"testcase/unittest/channel_manage_data_for_test.json")
conf.ENABLE_CHANNEL_AUTH = True
peer_target1 = '111.123.123.123:7100'
peer_target2 = '222.123.123.123:7200'
peer_target3 = '333.123.123.123:7300'
peer_target4 = '444.123.123.123:7400'
channel1 = 'kofia_certificate'
channel2 = 'kofia_fine'
# WHEN
channel_infos1 = json.loads(AdminManager("station").get_channel_infos_by_peer_target(peer_target1))
channel_infos2 = json.loads(AdminManager("station").get_channel_infos_by_peer_target(peer_target2))
channel_infos3 = json.loads(AdminManager("station").get_channel_infos_by_peer_target(peer_target3))
channel_infos4 = json.loads(AdminManager("station").get_channel_infos_by_peer_target(peer_target4))
# THEN
self.assertEqual(list(channel_infos1.keys()), [channel1, channel2])
self.assertEqual(list(channel_infos2.keys()), [channel1])
self.assertEqual(list(channel_infos3.keys()), [channel2])
self.assertEqual(list(channel_infos4.keys()), [])
# CLEAR
conf.CHANNEL_MANAGE_DATA_PATH = default_CHANNEL_MANAGE_DATA_PATH
conf.ENABLE_CHANNEL_AUTH = default_ENABLE_CHANNEL_AUTH
def test_get_all_channel_info(self):
# GIVEN
default_CHANNEL_MANAGE_DATA_PATH = conf.CHANNEL_MANAGE_DATA_PATH
conf.CHANNEL_MANAGE_DATA_PATH = os.path.join(conf.LOOPCHAIN_ROOT_PATH,
"testcase/unittest/channel_manage_data_for_test.json")
# WHEN
all_channel_info = AdminManager("station").get_all_channel_info()
# THEN
self.assertTrue(isinstance(all_channel_info, str))
# CLEAR
conf.CHANNEL_MANAGE_DATA_PATH = default_CHANNEL_MANAGE_DATA_PATH
def test_add_peer_target(self):
# GIVEN
default_CHANNEL_MANAGE_DATA_PATH = conf.CHANNEL_MANAGE_DATA_PATH
conf.CHANNEL_MANAGE_DATA_PATH = os.path.join(conf.LOOPCHAIN_ROOT_PATH,
"testcase/unittest/channel_manage_data_for_test.json")
choice = 'Y'
i = 0
new_peer_target = '9.9.9.9:9999'
default_data = AdminManager("station").json_data
channel_list = AdminManager("station").get_channel_list()
peer_target_list = default_data[channel_list[0]]["peers"]
# WHEN
modified_data = AdminManager("station").add_peer_target(choice, new_peer_target, peer_target_list, i)
# THEN
self.assertNotEqual(default_data, modified_data)
# CLEAR
conf.CHANNEL_MANAGE_DATA_PATH = default_CHANNEL_MANAGE_DATA_PATH
if __name__ == '__main__':
unittest.main()
| python |
"""
The viewer is just a frameset that loads a menu and a folder.
"""
def generateHtml(pathUrl):
html = f"""<html>
<head><title>ABF Browser</title></head>
<frameset cols='300px,100%' border='5'>
<frame name='menu' src='/ABFmenu/{pathUrl}' frameborder='0' />
<frame name='content' src='/ABFexperiment/{pathUrl}' frameborder='0' />
</frameset>
</html>"""
return html | python |
# Copyright (c) 2020 Attila Gobi
# SPDX-License-Identifier: BSD-3-Clause
"""
Solution for https://adventofcode.com/2020/day/4
>>> passports = parse("day04/test.txt")
>>> solve1(passports)
2
>>> solve2(passports)
2
"""
import sys
import re
def parse(fn):
ret = []
current = {}
with open(fn, "rt") as f:
for line in f:
line = line.strip()
if line == "":
ret.append(current)
current = {}
else:
for k, v in [x.split(":") for x in line.split(" ")]:
current[k] = v
ret.append(current)
return ret
def solve1(data):
fields = set(["byr", "iyr", "eyr", "hgt", "hcl", "ecl", "pid"])
count = 0
for passport in data:
if not fields - set(passport.keys()):
count += 1
return count
def solve2(data):
pid_re = re.compile(r'\d{9}')
hcl_re = re.compile(r'#[0-9a-f]{6}')
ecl_set = set(["amb", "blu", "brn", "gry", "grn", "hzl", "oth"])
def valid_hgt(x):
try:
int_x = int(x[:-2])
if x.endswith("in"):
return int_x >= 59 and int_x <= 76
elif x.endswith("cm"):
return int_x >= 150 and int_x <= 193
except ValueError:
pass
return False
fields = {
"byr": lambda x: int(x) >= 1920 and int(x) <= 2002,
"iyr": lambda x: int(x) >= 2010 and int(x) <= 2020,
"eyr": lambda x: int(x) >= 2020 and int(x) <= 2030,
"hgt": valid_hgt,
"hcl": lambda x: hcl_re.fullmatch(x),
"ecl": lambda x: x in ecl_set,
"pid": lambda x: pid_re.fullmatch(x)
}
def validate(x):
for k, v in fields.items():
if k not in passport or not v(passport[k]):
# print("ERROR:", k, passport)
return False
return True
count = 0
for passport in data:
if validate(passport):
count += 1
return count
if __name__ == '__main__':
data = parse(sys.argv[1])
print(solve1(data))
print(solve2(data))
| python |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from enum import Enum
__all__ = [
'ConnectionAliasAssociationAssociationStatus',
'ConnectionAliasState',
]
class ConnectionAliasAssociationAssociationStatus(str, Enum):
NOT_ASSOCIATED = "NOT_ASSOCIATED"
PENDING_ASSOCIATION = "PENDING_ASSOCIATION"
ASSOCIATED_WITH_OWNER_ACCOUNT = "ASSOCIATED_WITH_OWNER_ACCOUNT"
ASSOCIATED_WITH_SHARED_ACCOUNT = "ASSOCIATED_WITH_SHARED_ACCOUNT"
PENDING_DISASSOCIATION = "PENDING_DISASSOCIATION"
class ConnectionAliasState(str, Enum):
CREATING = "CREATING"
CREATED = "CREATED"
DELETING = "DELETING"
| python |
import itertools
from aoc_cqkh42 import BaseSolution
class Solution(BaseSolution):
def part_a(self):
self.sequence(40)
return len(self.data)
def part_b(self):
self.sequence(10)
return len(self.data)
def iteration(self):
g = itertools.groupby(self.data)
d = (f'{len(list(b))}{a}' for a, b in g)
self.data = ''.join(d)
def sequence(self, iters):
for _ in range(iters):
self.iteration()
| python |
from os import path
from setuptools import setup
# read the contents of your README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='football-data-connector',
version='0.9.1',
url='https://github.com/tony-joseph/football-data-connector',
license='BSD',
author='Tony Joseph',
author_email='[email protected]',
description='Python package to connect to football-data.org API',
long_description=long_description,
long_description_content_type='text/markdown',
packages=['footballdata'],
include_package_data=True,
install_requires=[
'python-dateutil>=2.7.5',
'requests>=2.20.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
]
)
| python |
'''File contains the trainer class
Complete the functions train() which will train the network given the dataset and hyperparams, and the function __init__ to set your network topology for each dataset
'''
import numpy as np
import sys
import pickle
import nn
from util import *
from layers import *
class Trainer:
def __init__(self,dataset_name):
self.save_model = False
if dataset_name == 'MNIST':
self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readMNIST()
# Add your network topology along with other hyperparameters here
self.batch_size = 100
self.epochs = 10
self.lr = 0.03
self.nn = nn.NeuralNetwork(10, self.lr)
self.nn.addLayer(FullyConnectedLayer(784, 28, 'relu'))
self.nn.addLayer(FullyConnectedLayer(28, 10, 'softmax'))
if dataset_name == 'CIFAR10':
self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readCIFAR10()
self.XTrain = self.XTrain[0:5000,:,:,:]
self.XVal = self.XVal[0:1000,:,:,:]
self.XTest = self.XTest[0:1000,:,:,:]
self.YVal = self.YVal[0:1000,:]
self.YTest = self.YTest[0:1000,:]
self.YTrain = self.YTrain[0:5000,:]
self.save_model = True
self.model_name = "model.p"
# Add your network topology along with other hyperparameters here
self.batch_size = 100
self.epochs = 40
self.lr = 0.03
self.nn = nn.NeuralNetwork(10, self.lr)
self.nn.addLayer(ConvolutionLayer([3, 32, 32], [5, 5], 16, 1, 'relu'))
self.nn.addLayer(MaxPoolingLayer([16, 28, 28], [2, 2], 2))
self.nn.addLayer(ConvolutionLayer([16, 14, 14], [5, 5], 20, 1, 'relu'))
self.nn.addLayer(MaxPoolingLayer([20, 10, 10], [2, 2], 2))
self.nn.addLayer(FlattenLayer())
self.nn.addLayer(FullyConnectedLayer(500, 10, 'softmax'))
if dataset_name == 'XOR':
self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readXOR()
# Add your network topology along with other hyperparameters here
self.batch_size = 10
self.epochs = 30
self.lr = 0.03
self.nn = nn.NeuralNetwork(2,self.lr)
self.nn.addLayer(FullyConnectedLayer(2,4,'softmax'))
self.nn.addLayer(FullyConnectedLayer(4,2,'softmax'))
if dataset_name == 'circle':
self.XTrain, self.YTrain, self.XVal, self.YVal, self.XTest, self.YTest = readCircle()
# Add your network topology along with other hyperparameters here
self.batch_size = 10
self.epochs = 30
self.lr = 0.03
self.nn = nn.NeuralNetwork(2,self.lr)
self.nn.addLayer(FullyConnectedLayer(2,2,'relu'))
self.nn.addLayer(FullyConnectedLayer(2,2,'softmax'))
def train(self, verbose=True):
# Method for training the Neural Network
# Input
# trainX - A list of training input data to the neural network
# trainY - Corresponding list of training data labels
# validX - A list of validation input data to the neural network
# validY - Corresponding list of validation data labels
# printTrainStats - Print training loss and accuracy for each epoch
# printValStats - Prints validation set accuracy after each epoch of training
# saveModel - True -> Saves model in "modelName" file after each epoch of training
# loadModel - True -> Loads model from "modelName" file before training
# modelName - Name of the model from which the funtion loads and/or saves the neural net
# The methods trains the weights and baises using the training data(trainX, trainY)
# and evaluates the validation set accuracy after each epoch of training
for epoch in range(self.epochs):
# A Training Epoch
if verbose:
print("Epoch: ", epoch)
# TODO
# Shuffle the training data for the current epoch
shuffled_indices = np.arange(self.XTrain.shape[0])
np.random.shuffle(shuffled_indices)
self.XTrain = self.XTrain[shuffled_indices]
self.YTrain = self.YTrain[shuffled_indices]
# Initializing training loss and accuracy
trainLoss = 0
trainAcc = 0
# Divide the training data into mini-batches
numBatches = 0
for i in range(0,self.XTrain.shape[0]-self.batch_size+1,self.batch_size):
numBatches+=1
# Calculate the activations after the feedforward pass
activations = self.nn.feedforward(self.XTrain[i:i+self.batch_size])
# Compute the loss
trainLoss += self.nn.computeLoss(self.YTrain[i:i+self.batch_size], activations)
# Calculate the training accuracy for the current batch
predlabels = oneHotEncodeY(np.argmax(activations[-1],-1),self.nn.out_nodes)
trainAcc += self.nn.computeAccuracy(self.YTrain[i:i+self.batch_size], predlabels)
# Backpropagation Pass to adjust weights and biases of the neural network
self.nn.backpropagate(activations, self.YTrain[i:i+self.batch_size])
# END TODO
# Print Training loss and accuracy statistics
trainAcc /= numBatches
if verbose:
print("Epoch ", epoch, " Training Loss=", trainLoss, " Training Accuracy=", trainAcc)
if self.save_model:
model = []
for l in self.nn.layers:
# print(type(l).__name__)
if type(l).__name__ != "AvgPoolingLayer" and type(l).__name__ != "FlattenLayer" and type(l).__name__ != "MaxPoolingLayer":
model.append(l.weights)
model.append(l.biases)
pickle.dump(model,open(self.model_name,"wb"))
print("Model Saved... ")
# Estimate the prediction accuracy over validation data set
if self.XVal is not None and self.YVal is not None and verbose:
_, validAcc = self.nn.validate(self.XVal, self.YVal)
print("Validation Set Accuracy: ", validAcc, "%")
pred, acc = self.nn.validate(self.XTest, self.YTest)
print('Test Accuracy ',acc)
| python |
#!/usr/bin/python
class FilterModule(object):
def filters(self):
return {
'amend_list_items': self.amend_list_items
}
def amend_list_items(self, orig_list, prefix="", postfix=""):
return list(map(lambda listelement: prefix +
str(listelement) + postfix, orig_list))
| python |
from django import template
from django.utils.translation import gettext as _
register = template.Library()
@register.simple_tag
def pagetitle(title, **kwargs):
if "page" in kwargs and kwargs["page"] > 1:
title += " (%s)" % (_("page: %(page)s") % {"page": kwargs["page"]})
if "parent" in kwargs:
title += " | %s" % kwargs["parent"]
return title
| python |
from pydantic.class_validators import root_validator
from pydantic.main import BaseModel
from .types import TopicID
class InputTopic(BaseModel):
default: str
@root_validator
def check_lang(cls, obj):
default_lang = obj["default"]
if default_lang == "default" or default_lang not in obj:
raise ValueError(f"Default language can't be '{default_lang}'.")
return obj
class Topic(InputTopic):
_id: TopicID
| python |
import os, datetime
import pandas as pd
from download.box import LifespanBox
import sys
verbose = True
#verbose = False
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
#Two types of files to curate...the so called raw data from which scores are generated and the scores themeselves.
#connect to Box (to get latest greatest curated stuff)
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#removelist=pd.read_csv(os.path.join(box_temp,'RemoveFromCurated_perTrello19May2020.csv'))
removelist=pd.read_csv(os.path.join(box_temp,'RemoveFromCurated_perTrello27May2020.csv'))
#validpair(pin='HCD0007014_V1')
#get list of filenames
##################################################################################################
WashuD=84801037257
curated=82804015457
wudfiles, wudfolders=foldercontents(WashuD)
#wudfiles2, wudfolders2=folderlistcontents(wudfolders.foldername,wudfolders.folder_id)
#wudfiles=pd.concat([wudfiles,wudfiles2],axis=0,sort=True)
data4process=wudfiles.loc[(wudfiles.filename.str.contains('aw_')==True) | (wudfiles.filename.str.contains('Raw')==True)]
scores4process=wudfiles.loc[wudfiles.filename.str.contains('cored')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'WashUDevelopment_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
#box.download_files(data4process.file_id)
#box.download_files(scores4process.file_id)
#subset to files that passed basic QC for next round
wdatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
wscoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
wdatainit.PIN=wdatainit.PIN.str.strip()
wscoreinit.PIN=wscoreinit.PIN.str.strip()
#dlist and slist shoult be empty
dlist,slist=findwierdos(wdatainit,wscoreinit)
if dlist.empty and slist.empty:
wdatainit=wdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
wscoreinit=wscoreinit.drop_duplicates(subset={'PIN','Inst'})
wdatainit = wdatainit.loc[wdatainit.PIN.isnull() == False]
wscoreinit = wscoreinit.loc[wscoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(wdatainit,wscoreinit) #this is the list of ids in both scored and raw data
len(wdatainit.PIN.unique())
len(wscoreinit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated) #301 in each now
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'WashUDevelopment_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
wdatainit.loc[~(wdatainit.PIN.isin(droplist))].to_csv(box_temp+'/wudPASSED_corrected_data'+snapshotdate+'.csv')
wscoreinit.loc[~(wscoreinit.PIN.isin(droplist))].to_csv(box_temp+'/wudPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/wudPASSED_corrected_data'+snapshotdate+'.csv',WashuD)
box.upload_file(box_temp+'/wudPASSED_corrected_scores'+snapshotdate+'.csv',WashuD)
##################################################################################################
WashuA=84799623206
curated=82804729845
wuafiles, wuafolders=foldercontents(WashuA)
data4process=wuafiles.loc[(wuafiles.filename.str.contains('aw_')==True) | (wuafiles.filename.str.contains('Raw')==True)]
scores4process=wuafiles.loc[wuafiles.filename.str.contains('core')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'WashUAging_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
wadatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
wascoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
wadatainit.PIN=wadatainit.PIN.str.strip()
wascoreinit.PIN=wascoreinit.PIN.str.strip()
dlist,slist=findwierdos(wadatainit,wascoreinit)
if dlist.empty and slist.empty:
wadatainit=wadatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
wascoreinit=wascoreinit.drop_duplicates(subset={'PIN','Inst'})
wadatainit = wadatainit.loc[wadatainit.PIN.isnull() == False]
wascoreinit = wascoreinit.loc[wascoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(wadatainit,wascoreinit) #this is the list of ids in both scored and raw data
len(wascoreinit.PIN.unique())==len(wadatainit.PIN.unique())
len(wascoreinit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated)
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'WashUAging_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
wadatainit.loc[~(wadatainit.PIN.isin(droplist))].to_csv(box_temp+'/wuaPASSED_corrected_data'+snapshotdate+'.csv')
wascoreinit.loc[~(wascoreinit.PIN.isin(droplist))].to_csv(box_temp+'/wuaPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/wuaPASSED_corrected_data'+snapshotdate+'.csv',WashuA)
box.upload_file(box_temp+'/wuaPASSED_corrected_scores'+snapshotdate+'.csv',WashuA)
##########################
Harvard=84800505740
harvardfiles, harvardfolders=foldercontents(Harvard)
harvardfoldersnew=harvardfolders.loc[~(harvardfolders.foldername=='incorporated')]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfoldersnew.foldername,harvardfoldersnew.folder_id)
harvardfiles=harvardfiles2.copy()
data4process=harvardfiles.loc[(harvardfiles.filename.str.contains('aw_')==True) | (harvardfiles.filename.str.contains('Raw')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.contains('core')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'Harvard_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
#stil nothing new to process at this time
####upload the concatenated files to site directory in box and move other files to incorporated
####hdatainit.to_csv(box_temp+'/harvard_corrected_data'+snapshotdate+'.csv')
####hscoreinit.to_csv(box_temp+'/harvard_corrected_scores'+snapshotdate+'.csv')
####box.upload_file(box_temp+'/harvard_corrected_data'+snapshotdate+'.csv',Harvard)
####box.upload_file(box_temp+'/harvard_corrected_scores'+snapshotdate+'.csv',Harvard)
####all files associated with this snapshotdate moved to incorporated_snapshotdate folder under this
####corrected folder
#########################################
###CANT ADD NEW DATA FROM MGH BECAUSE UPLOADED AS XLS
###again can't upload because uploaded as gsheet.
MGH=84799213727
mghfiles, mghfolders=foldercontents(MGH)
#petra to send request to update file format for HCA6826989_V1 trello card
####data4process=mghfiles.loc[(mghfiles.filename.str.contains('Data')==True) | (mghfiles.filename.str.contains('Raw')==True)]
####scores4process=mghfiles.loc[mghfiles.filename.str.contains('Score')==True]
####box.download_files(data4process.file_id)
####box.download_files(scores4process.file_id)
####mdatainit=catcontents(data4process,box_temp)
####mscoreinit=catcontents(scores4process,box_temp)
####dlist,slist=findwierdos(mdatainit,mscoreinit)
####if dlist.empty and slist.empty:
#### mdatainit=mdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
#### mscoreinit=mscoreinit.drop_duplicates(subset={'PIN','Inst'})
####else:
#### print('Found Non-Identical Duplications')
#### print(dlist)
#### print(slist)
####m=findpairs(mdatainit,mscoreinit) #this is the list of ids in both scored and raw data
#####upload the concatenated files to site directory in box and move other files to incorporated
####mdatainit.to_csv(box_temp+'/mgh_corrected_data'+snapshotdate+'.csv')
####mscoreinit.to_csv(box_temp+'/mgh_corrected_scores'+snapshotdate+'.csv')
####box.upload_file(box_temp+'/mgh_corrected_data'+snapshotdate+'.csv',MGH)
#box.upload_file(box_temp+'/mgh_corrected_scores'+snapshotdate+'.csv',MGH)
####
#all files associated with this snapshotdate moved to incorporated_snapshotdate folder under this
#corrected folder
##########################################################################################################
#ANY? OF THE UMN FILES UPLOADED TO CORRECTED HAVE HEADERS...SIGH
#no new data this round...all still missing headers
umnD=84799525828
curated=82805151056
umnDfiles, umnDfolders=foldercontents(umnD)
data4process=umnDfiles.loc[(umnDfiles.filename.str.contains('Data')==True) | (umnDfiles.filename.str.contains('Raw')==True)]
scores4process=umnDfiles.loc[umnDfiles.filename.str.contains('core')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UMN_Development_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
#subset to files that passed basic QC for next round
udatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
uscoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
udatainit.PIN=udatainit.PIN.str.strip()
uscoreinit.PIN=uscoreinit.PIN.str.strip()
#dlist and slist shoult be empty
dlist,slist=findwierdos(udatainit,uscoreinit)
if dlist.empty and slist.empty:
udatainit=udatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
uscoreinit=uscoreinit.drop_duplicates(subset={'PIN','Inst'})
udatainit = udatainit.loc[udatainit.PIN.isnull() == False]
uscoreinit = uscoreinit.loc[uscoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(udatainit,uscoreinit) #this is the list of ids in both scored and raw data
len(uscoreinit.PIN.unique())
len(udatainit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated) #301 in each now
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UMN_Development_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
udatainit.loc[~(udatainit.PIN.isin(droplist))].to_csv(box_temp+'/umndPASSED_corrected_data'+snapshotdate+'.csv')
uscoreinit.loc[~(uscoreinit.PIN.isin(droplist))].to_csv(box_temp+'/umndPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/umndPASSED_corrected_data'+snapshotdate+'.csv',umnD)
box.upload_file(box_temp+'/umndPASSED_corrected_scores'+snapshotdate+'.csv',umnD)
######################################################
umnA=84799599800
curated=82803665867
umnafiles, umnafolders=foldercontents(umnA)
umnafiles2, umnafolders2=folderlistcontents(umnafolders.loc[~(umnafolders.foldername=='incorporated')].foldername,umnafolders.loc[~(umnafolders.foldername=='incorporated')].folder_id)
umnafiles=pd.concat([umnafiles,umnafiles2],axis=0,sort=True)
data4process=umnafiles.loc[umnafiles.filename.str.contains('Raw')==True]
scores4process=umnafiles.loc[umnafiles.filename.str.contains('Score')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UMN_Aging_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
umadatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
umascoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
umadatainit.PIN=umadatainit.PIN.str.strip()
umascoreinit.PIN=umascoreinit.PIN.str.strip()
#dlist and slist shoult be empty
dlist,slist=findwierdos(umadatainit,umascoreinit)
if dlist.empty and slist.empty:
umadatainit=umadatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
umascoreinit=umascoreinit.drop_duplicates(subset={'PIN','Inst'})
umadatainit = umadatainit.loc[umadatainit.PIN.isnull() == False]
umascoreinit = umascoreinit.loc[umascoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(umadatainit,umascoreinit) #this is the list of ids in both scored and raw data
len(umadatainit.PIN.unique())
len(umascoreinit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated)
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UMN_Aging_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
umadatainit.loc[~(umadatainit.PIN.isin(droplist))].to_csv(box_temp+'/umnaPASSED_corrected_data'+snapshotdate+'.csv')
umascoreinit.loc[~(umascoreinit.PIN.isin(droplist))].to_csv(box_temp+'/umnaPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/umnaPASSED_corrected_data'+snapshotdate+'.csv',umnA)
box.upload_file(box_temp+'/umnaPASSED_corrected_scores'+snapshotdate+'.csv',umnA)
######################################################
uclaA=84799075673
curated=82807223120
uclaAfiles, uclaAfolders=foldercontents(uclaA)
data4process=uclaAfiles.loc[uclaAfiles.filename.str.contains('Raw')==True]
scores4process=uclaAfiles.loc[uclaAfiles.filename.str.contains('Score')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UCLA_Aging_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
print('Checking that both Scores and Raw data uploaded for given PIN=')
droplist=[]
for p in data4process.PIN.unique():
if p in scores4process.PIN.unique():
pass
else:
print(p+' Missing scores file')
print('Status: FAIL')
droplist=droplist+[p]
droplist=[]
for p in scores4process.PIN.unique():
if p in data4process.PIN.unique():
pass
else:
print(p+' Missing Raw/Data file')
print('Status: FAIL')
droplist=droplist+[p]
print('##################################################')
data4process=data4process.loc[~(data4process.PIN.isin(droplist))]
scores4process=scores4process.loc[~(scores4process.PIN.isin(droplist))]
#run the validator for each pair of files in the Corrected data - write log to a file
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
#subset to files that passed basic QC for next round
uadatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
uascoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
uadatainit['PIN']=uadatainit.PIN.str.strip()
uascoreinit['PIN']=uascoreinit.PIN.str.strip()
#dlist and slist shoult be empty
dlist,slist=findwierdos(uadatainit,uascoreinit)
if dlist.empty and slist.empty:
uadatainit=uadatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
uascoreinit=uascoreinit.drop_duplicates(subset={'PIN','Inst'})
uadatainit = uadatainit.loc[uadatainit.PIN.isnull() == False]
uascoreinit = uascoreinit.loc[uascoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(uadatainit,uascoreinit) #this is the list of ids in both scored and raw data
#keep the ones that have no nan pins
len(uadatainit.PIN.unique())
len(uascoreinit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated) #301 in each now
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UCLA_Aging_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
uadatainit.loc[~(uadatainit.PIN.isin(droplist))].to_csv(box_temp+'/uclaaPASSED_corrected_data'+snapshotdate+'.csv')
uascoreinit.loc[~(uascoreinit.PIN.isin(droplist))].to_csv(box_temp+'/uclaaPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/uclaaPASSED_corrected_data'+snapshotdate+'.csv',uclaA)
box.upload_file(box_temp+'/uclaaPASSED_corrected_scores'+snapshotdate+'.csv',uclaA)
######################################################
uclaD=84800272537
curated=82805124019
uclaDfiles, uclaDfolders=foldercontents(uclaD)
data4process=uclaDfiles.loc[uclaDfiles.filename.str.contains('Raw')==True]
scores4process=uclaDfiles.loc[uclaDfiles.filename.str.contains('Score')==True]
data4process['PIN']=data4process.filename.str[:13]
scores4process['PIN']=scores4process.filename.str[:13]
data4process['Fail']=1
scores4process['Fail']=1
#run the validator for each pair of files in the Corrected data - write log to a file
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UCLA_Development_QC_Corrected_'+snapshotdate+'.txt'),'w')
sys.stdout = f
for p in data4process.PIN:
checkd, checks=validpair(p)
sys.stdout = orig_stdout
f.close()
data4process.groupby('Fail').count()
#subset to files that passed basic QC for next round
uddatainit=catcontents(data4process.loc[data4process.Fail==0],box_temp)
udscoreinit=catcontents(scores4process.loc[scores4process.Fail==0],box_temp)
uddatainit['PIN']=uddatainit.PIN.str.strip()
udscoreinit['PIN']=udscoreinit.PIN.str.strip()
#dlist and slist shoult be empty
dlist,slist=findwierdos(uddatainit,udscoreinit)
if dlist.empty and slist.empty:
uddatainit=uddatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
udscoreinit=udscoreinit.drop_duplicates(subset={'PIN','Inst'})
uddatainit = uddatainit.loc[uddatainit.PIN.isnull() == False]
udscoreinit = udscoreinit.loc[udscoreinit.PIN.isnull() == False]
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
w=findpairs(uddatainit,udscoreinit) #this is the list of ids in both scored and raw data
len(uddatainit.PIN.unique())
len(udscoreinit.PIN.unique())
#of those that passed, check to see if they already in curated. If so, confirm that RA wanted to replace
#data otherwise, send back to site for confirmation
curdata,curscores=box2dataframe(fileid=curated)
droplist=[]
orig_stdout = sys.stdout
f = open(os.path.join(box_temp,'UCLA_Development_QC_Corrected_'+snapshotdate+'.txt'),'a')
sys.stdout = f
for i in curscores.loc[curscores.PIN.isin(w)].PIN.unique():
print('Checking files in CURATED folder for PIN='+i + ' against Trello list membership')
if i in list(removelist.PIN):
print('Status : PASS')
else:
print(i + ' Is not in List to Remove from Curated: please review Trello card and move to approp list')
droplist=droplist+[i]
data4process.loc[data4process.PIN==i,'Fail']=1
scores4process.loc[scores4process.PIN==i,'Fail']=1
print('Status : FAIL')
print('##################################################')
print('Summary of Failures')
print(data4process.loc[data4process.Fail==1])
print(scores4process.loc[scores4process.Fail==1])
print('##################################################')
sys.stdout = orig_stdout
f.close()
droplist
data4process.groupby('Fail').count()
scores4process.groupby('Fail').count()
uddatainit.loc[~(uddatainit.PIN.isin(droplist))].to_csv(box_temp+'/ucladPASSED_corrected_data'+snapshotdate+'.csv')
udscoreinit.loc[~(udscoreinit.PIN.isin(droplist))].to_csv(box_temp+'/ucladPASSED_corrected_scores'+snapshotdate+'.csv')
box.upload_file(box_temp+'/ucladPASSED_corrected_data'+snapshotdate+'.csv',uclaD)
box.upload_file(box_temp+'/ucladPASSED_corrected_scores'+snapshotdate+'.csv',uclaD)
###########################
#altogether
#Harvard l hdatainit hscoreinit
#MGH m mdatainit mscoreinit
#WashuD wd wdatainit wscoreinit
#WashUA wa wadatainit wascoreinit
#UMND ud udatainit uscoreinit
#UMNA uma umadatainit umascoreinit
#UCLAA uca uadatainit uascoreinit
#UCLAD ucd uddatainit udscoreinit
#raw
correctedraw=pd.concat([hdatainit, mdatainit, wdatainit, wadatainit, udatainit, umadatainit, uadatainit, uddatainit],axis=0,sort=True)
correctedraw=correctedraw.loc[correctedraw.PIN.isnull()==False]
#scores
correctedscores=pd.concat([hscoreinit, mscoreinit, wscoreinit, wascoreinit, uscoreinit, umascoreinit, uascoreinit, udscoreinit],axis=0,sort=True)
correctedscores=correctedscores.loc[correctedscores.PIN.isnull()==False]
#check tallies - all 168
len(ucd)+len(uca)+len(wa)+len(wd)+len(ud)+len(uma)+len(l)+len(m)
len(correctedraw.PIN.unique())
len(correctedscores.PIN.unique())
#lightson
dlist,slist=findwierdos(correctedraw,correctedscores)
if dlist.empty and slist.empty:
correctedraw=correctedraw.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
correctedscores=correctedscores.drop_duplicates(subset={'PIN','Inst'})
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
correctedraw['subject']=correctedraw.PIN.str.split("_",expand=True)[0]
correctedraw['visit']=correctedraw.PIN.str.split("_",expand=True)[1]
correctedscores['subject']=correctedscores.PIN.str.split("_",expand=True)[0]
correctedscores['visit']=correctedscores.PIN.str.split("_",expand=True)[1]
correctedraw.to_csv(box_temp+'/allsites_corrected_data.csv')
correctedscores.to_csv(box_temp+'/allsites_corrected_scores.csv')
#hdatainit mdatainit wdatainit wadatainit udatainit uadatainit uddatainit
#hscoreinit mscoreinit wscoreinit wascoreinit uscoreinit uascoreinit udscoreinit
#pull id visit combos that arent in both scores and data files
def findpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.PIN.unique():
if i in hdatainit.PIN.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following PINs in scores but not data:')
print(i)
for i in hdatainit.PIN.unique():
if i in hscoreinit.PIN.unique():
pass
else:
print('the following PINs in data but not scores:')
print(i)
return pinsinboth
def findwierdos(hdatainit,hscoreinit):
#compare the two types of sort to identify which files have non-identical duplications
sort1data=hdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
sort1score=hscoreinit.drop_duplicates(subset={'PIN','Inst'})
sort2data=hdatainit.drop_duplicates(subset=set(hdatainit.columns).difference({'filename','file_id'}))
sort2score=hscoreinit.drop_duplicates(subset=set(hscoreinit.columns).difference({'filename','file_id'}))
s1d=sort1data.groupby('PIN').count()
s2d=sort2data.groupby('PIN').count()
databoth=pd.merge(s1d.reset_index()[['PIN','DeviceID']], s2d.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_data=databoth.loc[databoth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
s1s=sort1score.groupby('PIN').count()
s2s=sort2score.groupby('PIN').count()
scoreboth=pd.merge(s1s.reset_index()[['PIN','DeviceID']], s2s.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_score=scoreboth.loc[scoreboth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
return wierd_data,wierd_score
def catcontents(files,cache_space): #dataframe that has filename and file_id as columns
scoresfiles=files.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.filename:
filepath=os.path.join(cache_space,i)
filenum=scoresfiles.loc[scoresfiles.filename==i,'file_id']
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']=i
temp['file_id']=pd.Series(int(filenum.values[0]),index=temp.index)
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series(i,index=[0])
temp['file_id']=pd.Series(int(filenum.values[0]),index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def catfromlocal(endpoint_temp,scores2cat): #dataframe that has filenames
scoresfiles=scores2cat.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.fname:
filepath=os.path.join(endpoint_temp,i)
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']="endpointmachine/"+i
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series("endpointmachine/"+i,index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def folderlistcontents(folderslabels,folderslist):
bdasfilelist=pd.DataFrame()
bdasfolderlist=pd.DataFrame()
for i in range(len(folderslist)):
print('getting file and folder contents of box folder ' +folderslabels[i])
subfiles,subfolders=foldercontents(folderslist[i]) #foldercontents generates two dfs: a df with names and ids of files and a df with names and ids of folders
bdasfilelist=bdasfilelist.append(subfiles)
bdasfolderlist=bdasfolderlist.append(subfolders)
return bdasfilelist,bdasfolderlist
def foldercontents(folder_id):
filelist=[]
fileidlist=[]
folderlist=[]
folderidlist=[]
WUlist=box.client.folder(folder_id=folder_id).get_items(limit=None, offset=0, marker=None, use_marker=False, sort=None, direction=None, fields=None)
for item in WUlist:
if item.type == 'file':
filelist.append(item.name)
fileidlist.append(item.id)
if item.type == 'folder':
folderlist.append(item.name)
folderidlist.append(item.id)
files=pd.DataFrame({'filename':filelist, 'file_id':fileidlist})
folders=pd.DataFrame({'foldername':folderlist, 'folder_id':folderidlist})
return files,folders
def box2dataframe(fileid):
harvardfiles, harvardfolders = foldercontents(fileid)
data4process = harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE') == True)]
scores4process = harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE') == True]
data4process=data4process.reset_index()
scores4process = scores4process.reset_index()
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
harvcleandata = pd.read_csv(box_temp+'/'+ data4process.filename[0], header=0, low_memory=False)
harvcleanscores = pd.read_csv(box_temp+'/'+ scores4process.filename[0], header=0, low_memory=False)
return harvcleandata,harvcleanscores
def validpair(pin='HCD0007014_V1'):
print('Checking files in CORRECTED folder having title with PIN='+pin)
PINcheckd=data4process.loc[data4process.PIN==pin]
PINchecks=scores4process.loc[scores4process.PIN==pin]
box.download_files(PINcheckd.file_id)
box.download_files(PINchecks.file_id)
d=catcontents(PINcheckd,box_temp)
s=catcontents(PINchecks,box_temp)
if 'PIN' in d.columns:
if 'PIN' in s.columns:
d = d.loc[d.PIN.isnull() == False]
s = s.loc[s.PIN.isnull() == False]
print('PINS in Data: ')
print(d.PIN.unique())
print('PINS in Scores: ')
print(s.PIN.unique())
try:
if d.PIN.unique()==s.PIN.unique():
print('Passed Unique PIN test')
dlist,slist=findwierdos(d,s)
if dlist.empty and slist.empty:
d=d.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
s=s.drop_duplicates(subset={'PIN','Inst'})
print('Passed duplicate Instruments Test')
data4process.loc[data4process.PIN == pin,'Fail'] = 0
scores4process.loc[scores4process.PIN==pin,'Fail'] = 0
else:
print('Found Non-Identical Duplications')
print(dlist+': in Data')
print(slist+': in Scores')
except:
print('Status : FAIL')
else:
print('variable named PIN not found. Check for missing header')
print('Status : FAIL')
print('##################################################')
return d,s
| python |
# !/usr/bin/env python3
# coding=utf-8
import sys
import argparse
import os
import struct
parser = argparse.ArgumentParser(description='Cisco VxWorks firmware extractor')
parser.add_argument('-i',
'--input-firmware-path',
metavar='input_firmware_path',
help='Firmware path')
parser.add_argument('-o',
'--output-path',
metavar='output_path',
help='Extracted files store path')
def extract_firmware(source_file_data, output_path):
"""Cisco VxWorks firmware extract function
:param source_file_data:
:param output_path:
:return:
"""
file_count = struct.unpack("<I", source_file_data[0x20:0x24])[0]
print("Found {} files in firmware".format(file_count))
print("Star extract files")
for i in range(file_count):
file_name = source_file_data[0x50 + (i * 0x20):0x60 + (i * 0x20)]
file_name = file_name.replace(b'\x00', b'')
print("file_name: {}".format(file_name))
file_offset = struct.unpack("<I", source_file_data[0x60 + (i * 0x20):0x60 + 4 + (i * 0x20)])[0]
file_length = struct.unpack("<I", source_file_data[0x60 + 4 + (i * 0x20):0x60 + 8 + (i * 0x20)])[0]
print("file_offset: {:#010x}".format(file_offset))
print("file_length: {}".format(file_length))
output_file = open("{}/{:#08x}_{}".format(output_path, file_offset, file_name.decode('utf-8'), ), 'wb')
output_file.write(source_file_data[file_offset: file_offset + file_length])
if __name__ == '__main__':
args = parser.parse_args()
if len(sys.argv) == 1: #
parser.print_help()
sys.exit(1)
print("args.input_firmware_path: {}".format(args.input_firmware_path))
if args.input_firmware_path:
if os.path.isfile(args.input_firmware_path):
try:
firmware_file_data = open(args.input_firmware_path, "rb").read()
except Exception as err:
print("Can't read input file: {} because of {}".format(args.input_firmware_path, err))
sys.exit(1)
else:
print("Can't read input file: {}".format(args.input_firmware_path))
sys.exit(1)
else:
parser.print_help()
sys.exit(1)
print("args.output_path: {}".format(args.output_path))
if args.output_path:
if os.path.exists(args.output_path):
if os.path.isdir(args.output_path):
output_path = args.output_path
else:
print("output_path {} is not directory".format(args.output_path))
sys.exit(1)
else:
try:
os.makedirs(args.output_path, exist_ok=True)
output_path = args.output_path
except Exception as err:
print("Can't create output folder : {} because of {}".format(args.output_path, err))
sys.exit(1)
else:
input_file_name = os.path.basename(args.input_firmware_path)
output_path = "./{}.extracted".format(input_file_name)
temp_out_path = output_path
index = 1
while os.path.exists(output_path):
output_path = "{}_{}".format(temp_out_path, index)
index += 1
try:
os.makedirs(output_path)
except Exception as err:
print("Can't create output folder : {} because of {}".format(output_path, err))
if firmware_file_data and output_path:
extract_firmware(firmware_file_data, output_path)
| python |
from discord import File
from discord.ext import commands
from shavatar import generate
from src.internal.bot import Bot
from src.internal.context import Context
class Avatar(commands.Cog):
"""Generate an avatar with SHAvatar."""
def __init__(self, bot: Bot):
self.bot = bot
@commands.command(name="shavatar")
async def shavatar(self, ctx: Context, *, text: str = None) -> None:
"""Generate an avatar with SHAvatar."""
avatar = generate(text or str(ctx.author.id), size=512)
avatar.save("./avatar.png")
await ctx.reply(file=File("./avatar.png"))
def setup(bot: Bot):
bot.add_cog(Avatar(bot))
| python |
import pathlib
from os import listdir
from __utils import *
import pandas as pd
import pandas as pd
from math import floor
from time import time
# This is a wrapper script for analysis of predictions produced in stage 2-model
#
# Arguments:
# REGION name of region
# PRED_DIR path to folder with region predictions
# ORIG_DIR path to folder of training data
# VALIDATE 1 <= x <2 for r^2 / RMSE; 2 for deltas
# R2_FILE path of file in which to drop r^2 values
# RMSE_FILE path of file in which to drop RMSE values
def analysis(REGION, PRED_DIR, ORIG_DIR, VALIDATE, R2_FILE, RMSE_FILE):
RESID_COMP = pathlib.Path("analysis_visualization/3c-obs_vs_pred.R").resolve()
PREDS = PRED_DIR.joinpath(REGION, SUB_PRED)
RESIDS = PRED_DIR.joinpath(REGION, SUB_RESI)
#RES_FIGS = PRED_DIR.joinpath(REGION, SUB_FIGS, SUB_RESI)
if not RESIDS.is_dir():
RESIDS.mkdir(parents=True)
#if not RES_FIGS.is_dir():
# RES_FIGS.mkdir(parents=True)
ORIG = append_to_folder(ORIG_DIR.joinpath(REGION), ".csv")
for pred in (file for file in listdir(PREDS) if file.endswith(".csv")):
PRED = PREDS.joinpath(pred)
RESID = RESIDS.joinpath(pred)
LOG = RESID.with_suffix(".log")
with open(LOG, "w") as log:
t0 = time()
log.write(f"t0={t0}\n")
if floor(VALIDATE)==1:
# ToDo: Save the differences to RESID.
log.write(f"floor(VALIDATE)==1: Computing residuals between prediction and the portion of original satellite data removed for testing.\n")
#RES_COMP = RES_FIGS.joinpath(pred).with_suffix(".png")
#resid_args = [RESID_COMP, ORIG, PRED, RES_COMP, SCORE_FILE]
resid_args = [RESID_COMP, ORIG, PRED, R2_FILE, RMSE_FILE]
log.write(f"resid_args: {resid_args}\n")
bash(resid_args)
elif VALIDATE==2:
log.write(f"VALIDATE==2: Computing differences between prediction and supplied validation data.\n")
# Load in known sm values.
old = pd.read_csv(ORIG)
old.columns = ["x", "y", "old"]
old.set_index(["x", "y"], inplace=True)
# Load in predicted sm values.
new = pd.read_csv(PRED, header=None)
new = new[new.columns[:3]]
new.columns = ["x", "y", "new"]
new.set_index(["x", "y"], inplace=True)
# Join old and new.
# Will only keep data points for which the same x/y exists in both.
compare = old.join(new)#[new.columns[2]])#"new"])
#compare.columns = ["x", "y", "old", "new"]
compare.dropna(inplace=True)
# Compute stats and save to files.
corr = (compare["new"].corr(compare["old"]))**2
log.write(f"The correlation between the original and predicted data is {corr}.\n")
with open(R2_FILE, 'a') as r2_out:
r2_out.write(f"{corr},{PRED}")
rmse = np.sqrt(np.mean((compare["new"] - compare["old"])**2))
log.write(f"The RMSE between the original and predicted data is {rmse}.\n")
with open(RMSE_FILE, 'a') as rmse_out:
rmse_out.write(f"{rmse},{PRED}")
# Find differences and save to file.
compare["deltas"] = compare["new"] - compare["old"]
compare["reltas"] = compare["deltas"]/compare["old"]
log.write(f"The first few rows of differences and relative differences:\n{compare.head()}\n")
resid = compare[["deltas"]]#"x","y","reltas"]]
resid.to_csv(path_or_buf=RESID, header=False)#, index=False)
t1 = time()
log.write(f"t1={t1}\n")
log.write(f"t={t1 - t0}\n")
| python |
from gna.configurator import NestedDict
from gna.expression.preparse import open_fcn
from gna.expression.operation import *
from gna.env import env
import re
import inspect
class VTContainer_v01(OrderedDict):
_order=None
def __init__(self, *args, **kwargs):
super(VTContainer_v01, self).__init__(*args, **kwargs)
def set_indices(self, indices):
self._order=indices.order
def __missing__(self, key):
newvar = Variable(key, order=self._order)
self.__setitem__(key, newvar)
return newvar
def __setitem__(self, key, value):
if isinstance(value, Indexed):
if value.name is undefinedname and key!='__tree__':
value.name = key
value.nindex.arrange(self._order)
# value.expandable=False
elif inspect.isclass(value) and issubclass(value, Operation):
value.order=self._order
OrderedDict.__setitem__(self, key, value)
return value
class Expression_v01(object):
operations = dict(sum=OSum, prod=OProd, concat=OConcat, accumulate=Accumulate, Accumulate=AccumulateTransformation, bracket=bracket, expand=expand, inverse=OInverse, select1=OSelect1 )
tree = None
def __init__(self, expression, indices=[], **kwargs):
if isinstance(expression, str):
self.expressions_raw = [expression]
elif isinstance(expression, (tuple, list)):
self.expressions_raw = list(expression)
else:
raise Exception('Unsupported expression: {!r}'.format(expression))
cexpr = re.compile('\s*#.*')
rexpr = re.compile('\n\s+')
self.expressions_raw = [ rexpr.sub('', cexpr.sub('', e)) for e in self.expressions_raw ]
self.expressions = [open_fcn(expr) for expr in self.expressions_raw]
self.globals=VTContainer_v01()
self.defindices(indices, **kwargs)
self.set_operations()
def set_operations(self):
for name, op in self.operations.items():
self.globals[name]=op
def parse(self):
if self.tree:
raise Exception('Expression is already parsed')
self.trees = []
for expr in self.expressions:
if not expr:
continue
texpr = '__tree__ = '+expr
try:
exec(texpr, self.globals, self.globals)
tree = self.globals.pop('__tree__')
except:
print('Failed to evaluate expression:')
print(expr)
raise
self.trees.append(tree)
self.tree=self.trees[-1]
def guessname(self, ilib, *args, **kwargs):
if isinstance(ilib, str):
import yaml
try:
ilib = yaml.load(ilib, yaml.Loader)
except:
raise Exception('Unable to parse name library (yaml)')
lib = dict()
for k, v in ilib.items():
v['name'] = k
exprs = v['expr']
if isinstance(exprs, str):
exprs=[exprs]
for expr in exprs:
lib[expr] = v
for tree in self.trees:
tree.guessname(lib, *args, **kwargs)
def dump_all(self, yieldself):
for tree in self.trees:
tree.dump(yieldself)
def __str__(self):
return self.expressions_raw
def __repr__(self):
return 'Expression("{}")'.format(self.expressions_raw)
def defindices(self, defs):
if isinstance(defs, NIndex):
self.nindex=defs
else:
self.nindex = NIndex(fromlist=defs)
for short, idx in self.nindex.indices.items():
self.globals[short] = idx
slave=idx.slave
if slave:
self.globals[slave.short]=slave
self.globals.set_indices(self.nindex)
def build(self, context):
if not self.tree:
raise Exception('Expression is not initialized, call parse() method first')
context.set_indices(self.nindex)
for tree in self.trees:
creq = tree.require(context)
context.build_bundles()
with context:
for tree in self.trees:
tree.bind(context)
class ItemProvider(object):
"""Container for the bundle class, bundle configuration and provided items"""
bundle=None
def __init__(self, cfg, name=''):
self.cfg = cfg
self.name=name
from gna.bundle.bundle import get_bundle
self.bundleclass = get_bundle((cfg.bundle.name, cfg.bundle.get('version', None)))
variables, objects = self.bundleclass.provides(self.cfg)
self.items = variables+objects
def register_in(self):
if self.cfg.bundle.get('inactive', False):
return dict()
return {key: self for key in self.items}
def build(self, **kwargs):
if self.bundle:
return self.bundle
self.bundle = self.bundleclass(self.cfg, **kwargs)
self.bundle.execute()
def set_nidx(self, nidx):
if nidx is None:
printl_debug( 'indices: %s'%(self.name) )
return
bundlecfg = self.cfg.bundle
predefined_nidx = bundlecfg.get('nidx', None)
if predefined_nidx is None:
printl_debug( 'indices: %s[%s]'%(self.name, str(predefined_nidx)) )
bundlecfg.nidx = nidx
else:
if isinstance(predefined_nidx, list):
predefined_nidx = NIndex(fromlist=predefined_nidx)
elif not isinstance(predefined_nidx, NIndex):
raise Exception('Unsupported nidx field')
printl_debug('indices: %s[%s + %s]'%(self.name, str(predefined_nidx), str(nidx)))
bundlecfg.nidx=predefined_nidx+nidx
class ExpressionContext_v01(object):
indices = None
def __init__(self, bundles, ns=None, inputs=None, outputs=None):
self.bundles = bundles
self.outputs = NestedDict() if outputs is None else outputs
self.inputs = NestedDict() if inputs is None else inputs
self.ns = ns or env.globalns
self.providers = dict()
for name, cfg in self.bundles.items():
if not 'bundle' in cfg:
continue
provider = ItemProvider(cfg, name)
self.providers.update(provider.register_in())
self.required_bundles = OrderedDict()
def __enter__(self):
self.ns.__enter__()
def __exit__(self, *args, **kwargs):
self.ns.__exit__(*args, **kwargs)
def namespace(self):
return self.ns
def set_indices(self, indices):
self.nindex = indices
@methodname
def require(self, name, nidx):
provider = self.required_bundles.get(name, None)
if provider is None:
provider = self.providers.get(name, None)
if provider is None:
if nidx:
for it in nidx.iterate():
self.require(it.current_format(name=name), None)
return self.required_bundles
print('List of available (provided) variables:', list(self.required_bundles.keys()))
raise Exception('Do not know how to build '+name)
self.required_bundles[name] = provider
provider.set_nidx(nidx)
return self.required_bundles
def build_bundles(self):
with self.ns:
for provider in self.required_bundles.values():
provider.build(inputs=self.inputs, outputs=self.outputs, namespace=self.ns)
def get_variable(self, name, *idx):
pass
def get_key(self, name, nidx, fmt=None, clone=None):
if nidx is None:
nidx = NIndex()
if clone is not None:
clone = '%02d'%clone
if fmt:
ret = ndix.current_format(fmt)
if clone:
ret += '.'+clone
return ret
nidx = nidx.current_values(name=name)
if clone:
nidx = nidx + (clone,)
return nidx
def get_output(self, name, nidx=None, clone=None):
return self.get( self.outputs, name, nidx, 'output', clone=clone )
def set_output(self, output, name, nidx=None, fmt=None, **kwargs):
import ROOT as R
if isinstance(output, R.TransformationTypes.OutputHandle):
output = R.OutputDescriptor(output)
self.set( self.outputs, output, name, nidx, 'output', fmt, **kwargs )
return output
def get_input(self, name, nidx=None, clone=None):
return self.get( self.inputs, name, nidx, 'input', clone=clone )
def set_input(self, input, name, nidx=None, fmt=None, clone=None):
self.set( self.inputs, input, name, nidx, 'input', fmt, clone)
return input
def get(self, source, name, nidx, type, clone=None):
key = self.get_key(name, nidx, clone=clone)
printl_debug('get {}'.format(type), name, key)
ret = source.get(key, None)
if not ret:
raise Exception('Failed to get {} {}[{}]'.format(type, name, nidx, clone))
if isinstance(ret, NestedDict):
raise Exception('Incomplete index ({!s}) provided (probably). Need at least resolve {!s}'.format(nidx, list(res.keys())))
return ret
def set(self, target, io, name, nidx, type, fmt=None, clone=None):
key = self.get_key( name, nidx, fmt, clone )
printl_debug('set {}'.format(type), name, key)
target[key]=io
def set_variable(self, name, nidx, var, **kwargs):
key = '.'.join(self.get_key( name, nidx ))
printl_debug('set variable', name, key)
self.ns.reqparameter(key, cfg=var, **kwargs)
# def connect(self, source, sink, nidx, fmtsource=None, fmtsink=None):
# printl_debug( 'connect: {}->{} ({:s})'.format( source, sink, nidx ) )
# with nextlevel():
# output = self.get_output( source, nidx )
# input = self.get_input( sink, nidx )
# input( output )
| python |
import time
from print_running_function import print_running_function
# Hackish method to import from another directory
# Useful while xendit-python isn't released yet to the public
import importlib.machinery
loader = importlib.machinery.SourceFileLoader("xendit", "../xendit/__init__.py")
xendit = loader.load_module("xendit")
class CreateCardlessCreditPayment:
@staticmethod
def run(xendit_instance, **kwargs):
try:
cardless_credit_payment = xendit_instance.CardlessCredit.create_payment(
**kwargs
)
print(cardless_credit_payment)
except xendit.XenditError as e:
print("Error status code:", e.status_code)
print("Error message:", e)
@staticmethod
def example(xendit_instance):
cardless_credit_items = []
cardless_credit_items.append(
{
"id": "item-123",
"name": "Phone Case",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
)
customer_details = {
"first_name": "customer first name",
"last_name": "customer last name",
"email": "[email protected]",
"phone": "0812332145",
}
shipping_address = {
"first_name": "first name",
"last_name": "last name",
"address": "Jl Teknologi No. 12",
"city": "Jakarta",
"postal_code": "12345",
"phone": "081513114262",
"country_code": "IDN",
}
args = {
"cardless_credit_type": xendit.CardlessCreditType.KREDIVO,
"external_id": f"id-{int(time.time())}",
"amount": 10000,
"payment_type": "3_months",
"items": cardless_credit_items,
"customer_details": customer_details,
"shipping_address": shipping_address,
"redirect_url": "https://my-shop.com/home",
"callback_url": "https://my-shop.com/callback",
}
print_running_function("xendit.CardlessCredit.create_payment", args)
CreateCardlessCreditPayment.run(xendit_instance, **args)
class CalculatePaymentType:
@staticmethod
def run(xendit_instance, **kwargs):
try:
cardless_credit_payment_types = xendit_instance.CardlessCredit.calculate_payment_type(
**kwargs
)
print(cardless_credit_payment_types)
except xendit.XenditError as e:
print("Error status code:", e.status_code)
print("Error message:", e)
@staticmethod
def example(xendit_instance):
cardless_credit_items = []
cardless_credit_items.append(
{
"id": "item-123",
"name": "Phone Case",
"price": 200000,
"type": "Smartphone",
"url": "http://example.com/phone/phone_case",
"quantity": 2,
}
)
args = {
"cardless_credit_type": xendit.CardlessCreditType.KREDIVO,
"amount": 10000,
"items": cardless_credit_items,
}
print_running_function("xendit.CardlessCredit.calculate_payment_type", args)
CalculatePaymentType.run(xendit_instance, **args)
def ask_cardless_credit_input():
print("Input the action that you want to use")
print("0. Exit")
print("1. Create Payment / Generate Checkout URL")
print("2. Calculate Payment Types")
try:
return int(input())
except ValueError:
print("Invalid input. Please type a number")
return ask_cardless_credit_input()
def cardless_credit_example(xendit_instance):
cardless_credit_input = ask_cardless_credit_input()
while cardless_credit_input != 0:
if cardless_credit_input == 1:
print("Running example of Create Payment / Generate Checkout URL")
CreateCardlessCreditPayment.example(xendit_instance)
elif cardless_credit_input == 2:
print("Running example of Calculate Payment Types")
CalculatePaymentType.example(xendit_instance)
cardless_credit_input = ask_cardless_credit_input()
| python |
command = input()
all_students = {}
while command[0].isupper():
command = command.split(":")
key = command[2]
value = command[0] + " - " + command[1]
all_students.setdefault(key, []).append(value)
command = input()
searched_course = command.replace("_", " ")
print("\n".join(all_students[searched_course]))
| python |
import sys
import random
n = int(sys.argv[1])
k = n+n*(n+1)//2 # 10**5 #
print('%d %d'%(n, k))
for i in range(n):
print ('A %d %d'%(i+1, random.randint(10**8,10**9)))
k -= 1
for i in range(n):
for j in range(i, n):
print('Q %d %d'%(i+1, j+1))
k -= 1
if k <= 1: break
if k <= 1: break
print('Q 1 %d'%n)
| python |
import subprocess
from flask import Flask, redirect, url_for, request, render_template
app = Flask(__name__)
@app.route('/')
def hello_world(): # put application's code here
return render_template("index.html")
@app.route('/success/<command>')
def success(command):
return subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read()
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
togglapi = request.form['togglapi']
since = request.form['since']
until = request.form['until']
project = request.form['project']
journal = request.form['journal']
command = "python toggljournal.py " + togglapi + " " + since + " " + until + " " + " " + project + " " + journal
return redirect(url_for('success', command=command))
else:
togglapi = request.args.get('togglapi')
since = request.args.get('since')
until = request.args.get('until')
project = request.args.get('project')
journal = request.args.get('journal')
return redirect(url_for('success', command=command))
if __name__ == '__main__':
app.run(debug=True)
| python |
# coding: utf-8
from models.models import Group
from models.models import Person
from random import randrange
def test_edit_group_name(app):
if app.object.count_group() == 0:
app.object.create_group_form(Group(name="test"))
old_groups = app.object.get_group_list()
index = randrange(len(old_groups))
group = Group(name="new test progon")
group.id = old_groups[index].id
app.object.edit_group_by_index(index, group)
new_groups = app.object.get_group_list()
assert len(old_groups) == app.object.count_group()
old_groups[index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
def test_edit_group_header(app):
if app.object.count_group() == 0:
app.object.create_group_form(Group(name="test"))
old_groups = app.object.get_group_list()
app.object.edit_first_group(Group(header="new header",
)
)
new_groups = app.object.get_group_list()
assert len(old_groups) == app.object.count_group()
def test_edit_person(app):
if app.object.count_person() == 0:
app.object.create_person_form(Person(name="test",
lastname="test",
address="test",
email="test",
mobile="test",
)
)
old_persons = app.object.get_person_list()
index = randrange(len(old_persons))
person = Person(name="new 1",
lastname="new 2",
address="new 3",
mobile="new 4",
email="new 5",
)
person.id = old_persons[index].id
app.object.edit_person_form_by_index(index, person)
new_persons = app.object.get_person_list()
assert len(old_persons) == app.object.count_person()
old_persons[index] = person
assert sorted(old_persons, key=Person.id_or_max) == sorted(new_persons, key=Person.id_or_max)
| python |
import numpy as np
import matplotlib.pyplot as plt
import spams
import cv2
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
from pathlib import Path
import os
import sys
import random
import warnings
import pandas as pd
from tqdm import tqdm
from itertools import chain
import math
from vahadane import vahadane
IMG_WIDTH = 256
IMG_HEIGHT = 256
IMG_CHANNELS = 3
M_CHANNEL=1
Res_HEIGHT = 1000 # actual image height
Res_WIDTH = 1000 # actual image width
#no of patches = (input image size/ crop size)^2 per image .
pat = 16
warnings.filterwarnings('ignore', category=UserWarning, module='skimage')
seed = 42
np.random.seed = seed
# path where you want to store the stain normalized images
#----- # Test # ------#
Path("/content/TestData").mkdir(parents=True, exist_ok=True)
Path("/content/TestData/Bin").mkdir(parents=True, exist_ok=True) # for masks
Path("/content/TestData/tis").mkdir(parents=True, exist_ok=True) # for tissues
bin_p_ts = '/content/TestData/Bin'
tis_p_ts = '/content/TestData/tis'
#----- # Train # ------#
Path("/content/TrainData").mkdir(parents=True, exist_ok=True)
Path("/content/TrainData/Bin").mkdir(parents=True, exist_ok=True) # for masks
Path("/content/TrainData/tis").mkdir(parents=True, exist_ok=True) # for tissues
bin_p_tr = '/content/TrainData/Bin/'
tis_p_tr = '/content/TrainData/tis/'
#----- # Valid # ------#
Path("/content/ValidData").mkdir(parents=True, exist_ok=True)
Path("/content/ValidData/Bin").mkdir(parents=True, exist_ok=True) # for masks
Path("/content/ValidData/tis").mkdir(parents=True, exist_ok=True) # for tissues
bin_p_vl = '/content/ValidData/Bin/'
tis_p_vl = '/content/ValidData/tis/'
# Give path to your dataset
Train_image_path = '/content/drive/MyDrive/intern_pyth/monuseg/TrainData/original_images/'
Train_mask_path = '/content/drive/MyDrive/intern_pyth/monuseg/TrainData/Bin/'
val_image_path = '/content/drive/MyDrive/intern_pyth/monuseg/ValidData/original_images/'
val_mask_path = '/content/drive/MyDrive/intern_pyth/monuseg/ValidData/Bin/'
Test_image_path = '/content/drive/MyDrive/intern_pyth/monuseg/TestData/tis/'
test_mask_path = '/content/drive/MyDrive/intern_pyth/monuseg/TestData/Bin/'
# Give a reference image path for stain normalization
reference_image = '/content/drive/MyDrive/intern_pyth/monuseg/TestData/tis/TCGA-21-5784-01Z-00-DX1.tif'
# getting the train and test ids
train_ids1 = next(os.walk(Train_image_path))[2]
train_mask_ids1 = next(os.walk(Train_mask_path))[2]
val_ids1 = next(os.walk(val_image_path))[2]
val_mask_ids1 = next(os.walk(val_mask_path))[2]
test_ids1 = next(os.walk(Test_image_path))[2]
test_mask_ids1 = next(os.walk(test_mask_path))[2]
# sorting the train and test ids
train_ids = sorted(train_ids1,key=lambda x: (os.path.splitext(x)[0]))
train_mask_ids = sorted(train_mask_ids1,key=lambda x: (os.path.splitext(x)[0]))
test_ids = sorted(test_ids1,key=lambda x: (os.path.splitext(x)[0]))
test_mask_ids = sorted(test_mask_ids1,key=lambda x: (os.path.splitext(x)[0]))
val_ids = sorted(val_ids1,key=lambda x: (os.path.splitext(x)[0]))
val_mask_ids = sorted(val_mask_ids1,key=lambda x: (os.path.splitext(x)[0]))
def stain_norm_patch():
def read_image(path):
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # opencv default color space is BGR, change it to RGB
p = np.percentile(img, 90)
img = np.clip(img * 255.0 / p, 0, 255).astype(np.uint8)
return img
def vaha(SOURCE_PATH,TARGET_PATH):
source_image = read_image(SOURCE_PATH)
target_image = read_image(TARGET_PATH)
vhd = vahadane(LAMBDA1=0.01, LAMBDA2=0.01, fast_mode=1, getH_mode=0, ITER=50)
# vhd.show_config()
Ws, Hs = vhd.stain_separate(source_image)
vhd.fast_mode=0;vhd.getH_mode=0;
Wt, Ht = vhd.stain_separate(target_image)
img = vhd.SPCN(source_image, Ws, Hs, Wt, Ht)
return img
def rein(src):
# stain_normalizer 'Vahadane'
target_img = reference_image
im_nmzd = vaha(src,target_img)
return im_nmzd
# Get and resize train images and masks
def train():
X_train = np.zeros((len(train_ids)*pat, IMG_HEIGHT, IMG_WIDTH, 3), dtype=np.float32)
Y_train = np.zeros((len(train_ids)*pat, IMG_HEIGHT, IMG_WIDTH,1), dtype=np.bool)
print('stain normalizing and cropping patches of train images and masks ... ')
sys.stdout.flush()
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
img = rein(Train_image_path + id_)
mask_ = cv2.imread(Train_mask_path + (os.path.splitext(id_)[0])+'.png',0)
mask_ = np.expand_dims(mask_, -1)
temp_list = []
temp_list_mask = []
for i in range (int(math.pow(pat,0.5))):
for j in range(int(math.pow(pat,0.5))):
if i<(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_img1 = img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask1 = mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_img1)
temp_list_mask.append(crop_mask1)
elif j==(int(math.pow(pat,0.5))-1):
crop_img2 = img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask2 = mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_img2)
temp_list_mask.append(crop_mask2)
elif i==(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_img3 = img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask3 = mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_img3)
temp_list_mask.append(crop_mask3)
elif j==(int(math.pow(pat,0.5))-1):
crop_img4 = img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask4 = mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_img4)
temp_list_mask.append(crop_mask4)
for t in range(0,pat):
X_train[n*pat+t] = temp_list[t]
Y_train[n*pat+t] = temp_list_mask[t]
# mask = np.maximum(mask, mask_)
return X_train, Y_train
def val():
X_val = np.zeros((len(val_ids)*pat, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
Y_val = np.zeros((len(val_ids)*pat, IMG_HEIGHT, IMG_WIDTH,1), dtype=np.bool)
print('stain normalizing and cropping patches of validation images and masks ... ')
sys.stdout.flush()
for m, id_ in tqdm(enumerate(val_ids), total=len(val_ids)):
val_img = rein(val_image_path + id_)
val_mask_ = cv2.imread(val_mask_path + (os.path.splitext(id_)[0])+'.png',0)
val_mask_ = np.expand_dims(val_mask_, -1)
temp_list = []
temp_list_mask = []
for i in range (int(math.pow(pat,0.5))):
for j in range(int(math.pow(pat,0.5))):
if i<(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_val_img1 = val_img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask1 = val_mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_val_img1)
temp_list_mask.append(crop_mask1)
elif j==(int(math.pow(pat,0.5))-1):
crop_val_img2 = val_img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask2 = val_mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_val_img2)
temp_list_mask.append(crop_mask2)
elif i==(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_val_img3 = val_img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask3 = val_mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_val_img3)
temp_list_mask.append(crop_mask3)
elif j==(int(math.pow(pat,0.5))-1):
crop_val_img4 = val_img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask4 = val_mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_val_img4)
temp_list_mask.append(crop_mask4)
for t in range(0,pat):
X_val[m*pat+t] = temp_list[t]
Y_val[m*pat+t] = temp_list_mask[t]
# mask = np.maximum(mask, mask_)
return X_val, Y_val
def test():
X_test = np.zeros((len(test_ids)*pat, IMG_HEIGHT, IMG_WIDTH, IMG_CHANNELS), dtype=np.float32)
Y_test = np.zeros((len(test_ids)*pat, IMG_HEIGHT, IMG_WIDTH,1), dtype=np.bool)
print('stain normalizing and cropping patches of test images ... ')
sys.stdout.flush()
for s, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
img = rein(Test_image_path + id_)
test_mask_ = cv2.imread(test_mask_path + (os.path.splitext(id_)[0])+'.png',0)
test_mask_ = np.expand_dims(test_mask_, -1)
temp_list = []
temp_list_mask = []
for i in range (int(math.pow(pat,0.5))):
for j in range(int(math.pow(pat,0.5))):
if i<(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_img1 = img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask1 = test_mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_img1)
temp_list_mask.append(crop_mask1)
elif j==(int(math.pow(pat,0.5))-1):
crop_img2 = img[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask2 = test_mask_[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_img2)
temp_list_mask.append(crop_mask2)
elif i==(int(math.pow(pat,0.5))-1):
if j<(int(math.pow(pat,0.5))-1):
crop_img3 = img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
crop_mask3 = test_mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH]
temp_list.append(crop_img3)
temp_list_mask.append(crop_mask3)
elif j==(int(math.pow(pat,0.5))-1):
crop_img4 = img[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
crop_mask4 = test_mask_[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24]
temp_list.append(crop_img4)
temp_list_mask.append(crop_mask4)
for t in range(0,pat):
X_test[s*pat+t] = temp_list[t]
Y_test[s*pat+t] = temp_list_mask[t]
# mask = np.maximum(mask, mask_)
return X_test, Y_test
train1 = train()
X_train = train1[0]
Y_train = train1[1]
val1 = val()
X_val = val1[0]
Y_val = val1[1]
test1 = test()
X_test = test1[0]
Y_test = test1[1]
# this will save the stain normalized patches into the created paths above
#------------------------#TEST#---------------------------------#
for n, id_ in tqdm(enumerate(test_ids), total=len(test_ids)):
id_1 = os.path.splitext(id_)[0]
for j in range(16):
j1 = "{0:0=2d}".format(j)
img_t = X_test[n*16+j]
imgs_b = Y_test[n*16+j]*255
# img_t = X_test[n]
# imgs_b = np.reshape(Y_test[n]*255,(IMG_WIDTH,IMG_HEIGHT))
filename1 = '{}/{}_{}.png'.format(tis_p_ts,id_1,j1)
cv2.imwrite(filename1, cv2.cvtColor(img_t, cv2.COLOR_BGR2RGB))
filename2 = '{}/{}_{}.png'.format(bin_p_ts,id_1,j1)
cv2.imwrite(filename2, imgs_b)
#------------------------#VAL#-------------------------------#
for n, id_ in tqdm(enumerate(val_ids), total=len(val_ids)):
id_1 = os.path.splitext(id_)[0]
for j in range(16):
j1 = "{0:0=2d}".format(j)
img_t = X_val[n*16+j]
imgs_b = Y_val[n*16+j]*255
filename1 = '{}/{}_{}.png'.format(tis_p_vl,id_1,j1)
cv2.imwrite(filename1,cv2.cvtColor(img_t, cv2.COLOR_BGR2RGB)) #cv2.cvtColor(img_t, cv2.COLOR_BGR2RGB)
filename2 = '{}/{}_{}.png'.format(bin_p_vl,id_1,j1)
cv2.imwrite(filename2, imgs_b)
#------------------------#TRAIN#-------------------------------#
for n, id_ in tqdm(enumerate(train_ids), total=len(train_ids)):
id_1 = os.path.splitext(id_)[0]
for j in range(16):
j1 = "{0:0=2d}".format(j)
img_t = X_train[n*16+j]
imgs_b = Y_train[n*16+j]*255
filename1 = '{}/{}_{}.png'.format(tis_p_tr,id_1,j1)
cv2.imwrite(filename1, cv2.cvtColor(img_t, cv2.COLOR_BGR2RGB)) #cv2.cvtColor(img_t, cv2.COLOR_BGR2RGB)
filename2 = '{}/{}_{}.png'.format(bin_p_tr,id_1,j1)
cv2.imwrite(filename2, imgs_b)
def patch_join(out_im):
num_im = len(out_im)//pat
num_pat = int(pat**0.5)
out_concat = np.zeros((Res_HEIGHT, Res_WIDTH, 1), dtype=np.uint8)
# Y_concat = np.zeros((Res_HEIGHT, Res_WIDTH, 1), dtype=np.bool)
out_full = np.zeros((num_im,Res_HEIGHT, Res_WIDTH, 1), dtype=np.uint8)
# Y_full = np.zeros((num_im,Res_HEIGHT, Res_WIDTH, 1), dtype=np.bool)
for k in range(num_im):
sec1 = []
y_sec1 = []
for l in range(pat):
sec = out_im[k*pat+l]
sec1.append(sec)
for i in range(int(num_pat)):
for j in range(int(num_pat)):
if i<num_pat-1:
if j<num_pat-1:
out_concat[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH] = sec1[i*num_pat+j]
elif j==num_pat-1:
out_concat[i*IMG_HEIGHT:i*IMG_HEIGHT+IMG_HEIGHT, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24] = sec1[i*num_pat+j]
elif i==num_pat-1:
if j<num_pat-1:
out_concat[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH:j*IMG_WIDTH+IMG_WIDTH] = sec1[i*num_pat+j]
elif j==num_pat-1:
out_concat[i*IMG_HEIGHT-24:i*IMG_HEIGHT+IMG_HEIGHT-24, j*IMG_WIDTH-24:j*IMG_WIDTH+IMG_WIDTH-24] = sec1[i*num_pat+j]
out_full[k] = out_concat
return out_full,test_ids
if __name__ == '__main__':
stain_norm_patch() | python |
from .models import ThingDescription, DirectoryNameToURL, TargetToChildName, TypeToChildrenNames, DynamicAttributes
from flask_pymongo import PyMongo
mongo = PyMongo()
def clear_database() -> None:
"""Drop collections in the mongodb database in order to initialize it.
"""
ThingDescription.drop_collection()
DirectoryNameToURL.drop_collection()
TypeToChildrenNames.drop_collection()
TargetToChildName.drop_collection()
DynamicAttributes.drop_collection()
def init_dir_to_url(level: str) -> None:
"""Initialize name-to-URL mappings for the current directory using contents specified by 'level'
Args:
level(str): it specifies the level of current directory
"""
DirectoryNameToURL.drop_collection()
if level == "level1":
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level2a',
url='http://localhost:5002', relationship='child').save()
DirectoryNameToURL(directory_name='level2b',
url='http://localhost:5003', relationship='child').save()
elif level == 'level2a':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level1',
url=f'http://localhost:5001', relationship='parent').save()
DirectoryNameToURL(directory_name='level3aa',
url=f'http://localhost:5004', relationship='child').save()
DirectoryNameToURL(directory_name='level3ab',
url=f'http://localhost:5005', relationship='child').save()
elif level == 'level2b':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level1',
url=f'http://localhost:5001', relationship='parent').save()
elif level == 'level3aa':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level2a',
url=f'http://localhost:5002', relationship='parent').save()
elif level == 'level3ab':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level2a',
url=f'http://localhost:5002', relationship='parent').save()
DirectoryNameToURL(directory_name='level4aba',
url=f'http://localhost:5006', relationship='child').save()
DirectoryNameToURL(directory_name='level4abb',
url=f'http://localhost:5007', relationship='child').save()
elif level == 'level4aba':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level3ab',
url=f'http://localhost:5005', relationship='parent').save()
elif level == 'level4abb':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level3ab',
url=f'http://localhost:5005', relationship='parent').save()
DirectoryNameToURL(directory_name='level5abba',
url=f'http://localhost:5008', relationship='child').save()
DirectoryNameToURL(directory_name='level5abbb',
url=f'http://localhost:5009', relationship='child').save()
elif level == 'level5abba':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level4abb',
url=f'http://localhost:5007', relationship='parent').save()
elif level == 'level5abbb':
DirectoryNameToURL(directory_name='master',
url=f'http://localhost:5001', relationship='master').save()
DirectoryNameToURL(directory_name='level4abb',
url=f'http://localhost:5007', relationship='parent').save()
def init_target_to_child_name(level: str) -> None:
"""Initialize the target-to-child mappings for the current directory
Args:
level(str): it specifies the level of current directory
"""
if level == 'level1':
TargetToChildName(target_name='level3aa', child_name='level2a').save()
TargetToChildName(target_name='level3ab', child_name='level2a').save()
TargetToChildName(target_name='level4aba', child_name='level2a').save()
TargetToChildName(target_name='level4abb', child_name='level2a').save()
TargetToChildName(target_name='level5abba', child_name='level2a').save()
TargetToChildName(target_name='level5abbb', child_name='level2a').save()
elif level == 'level2a':
TargetToChildName(target_name='level4aba', child_name='level3ab').save()
TargetToChildName(target_name='level4abb', child_name='level3ab').save()
TargetToChildName(target_name='level5abba', child_name='level3ab').save()
TargetToChildName(target_name='level5abbb', child_name='level3ab').save()
elif level == 'level3ab':
TargetToChildName(target_name='level5abba', child_name='level4abb').save()
TargetToChildName(target_name='level5abbb', child_name='level4abb').save()
else:
pass
| python |
### channel configuration
CHANNEL_NAME = 'ThreatWire'
CHANNEL_PLAYLIST_ID = 'PLW5y1tjAOzI0Sx4UU2fncEwQ9BQLr5Vlu'
ITEMS_TO_SCAN = 5
FG_YOUTUBE = 'https://www.youtube.com/channel/UC3s0BtrBJpwNDaflRSoiieQ' # channel link
FG_AUTHOR = {'name':'Shannon Morse','email':'[email protected]'}
### data storage and history
ITEMS_TO_KEEP = 25
HISTORY_JSON = 'history.json'
PODCAST_FILE = 'podcast.rss'
### web hosting
WEB_HOST_DIRECTORY = '/var/www/html/ytp'
WEB_BASE_URL = 'http://10.0.1.25/ytp/'
### api stuff
API_KEY = 'insert your api key here so you won’t get rate-limited'
API_PLAYLIST_URL = 'https://www.googleapis.com/youtube/v3/playlistItems?key={}&part=snippet&contentDetails&status&maxResults={}&playlistId={}'
### other config items
REFRESH_TIME = 7200 # in seconds, this is 2 hours
FFMPEG_CMD = 'ffmpeg -i {} -b:a 192K -vn {}'
TEMP_DIRECTORY = '/tmp/yt-podcast/'
| python |
import cv2
import numpy as np
import core.image as im
import core.hc_extender as hc_ext
import matplotlib.pyplot as plt
from drawnow import drawnow
def trans(img, hcc):
'''
trans(img, hcc):
2D to 1D Transformed by Hilbert Curve
img <-- nxn matrix
hcc <-- Hibert curve coordinate with order k
k <-- 4^log2(n) or nxn, length of hcc
'''
result = []
k = len(hcc)
for i in np.arange(k):
(x, y) = hcc[i]
try:
val_img = img[x][y]
result.append(val_img)
except IndexError:
continue
return result
def _change_scale(value):
global d_scale
d_scale = value
def _change_sigma(value):
global d_sigma
d_sigma = value
def _change_lambda(value):
global d_lambda
d_lambda = value
def _change_theta(value):
global d_theta
d_theta = value
def _change_gamma(value):
global d_gamma
d_gamma = value
def plot_feature():
plt.plot(vhc)
# Initialization
img_path = 'data/kaggle_3m/TCGA_CS_4941_19960909/TCGA_CS_4941_19960909_12.tif'
imgs = im.getImage(img_path, display=False)
d_scale = 9
d_sigma = 3
d_lambda = 8
d_gamma = 1
d_theta = 180
vhc = []
global_img = imgs[1]
(w, h) = imgs[1].shape
if __name__ == "__main__":
# Setting GUI
cv2.namedWindow('Original Image')
cv2.imshow('Original Image', imgs[1])
# Window A
cv2.namedWindow('A')
cv2.createTrackbar('Scale', 'A', d_scale, 128, _change_scale)
cv2.createTrackbar('Sigma', 'A', d_sigma, 20, _change_sigma)
cv2.createTrackbar('Lambda', 'A', d_lambda, 100, _change_lambda)
cv2.createTrackbar('Theta', 'A', d_theta, 360, _change_theta)
cv2.createTrackbar('Gamma', 'A', d_gamma, 100, _change_gamma)
# Pyramids Image
pyr_imgs = im.multiPyrDown(global_img, debug=True)
# Get Hilbert Curve Coordinate with Order k
k = int(np.log2(global_img.shape[0]))
hcc = hc_ext.get_hc_index(order=k)
print('Hilbert Curve Order', k)
print('Current Mod Img shape:', global_img.shape)
while True:
# Get Kernal
gabor_k = cv2.getGaborKernel((d_scale, d_scale), d_sigma, d_theta, d_lambda, d_gamma, 0, ktype=cv2.CV_32F)
# Filtering
global_img = cv2.filter2D(pyr_imgs[3], -1, gabor_k)
# VHC <-- Vector of Hilbert Curve
vhc = trans(global_img, hcc)
# Display an image and Plotting graph
cv2.imshow('A', global_img)
drawnow(plot_feature)
# Key controller
key = cv2.waitKey(1) & 0xff
if key == 27:
print("End Application")
break
| python |
"""Base OAuthBackend with token and session validators."""
from typing import List, Optional
from fastapi.security import OAuth2
from starlette.authentication import AuthCredentials, AuthenticationBackend, UnauthenticatedUser
from starlette.requests import Request
from fastapi_aad_auth._base.state import AuthenticationState
from fastapi_aad_auth._base.validators import SessionValidator, TokenValidator, Validator
from fastapi_aad_auth.mixins import LoggingMixin, NotAuthenticatedMixin
from fastapi_aad_auth.utilities import deprecate
class BaseOAuthBackend(NotAuthenticatedMixin, LoggingMixin, AuthenticationBackend):
"""Base OAuthBackend with token and session validators."""
def __init__(self, validators: List[Validator], enabled: bool = True):
"""Initialise the validators."""
super().__init__()
self.enabled = enabled
self.validators = validators[:]
async def authenticate(self, request):
"""Authenticate a request.
Required by starlette authentication middleware
"""
state = self.check(request, allow_session=True)
if state is None:
return AuthCredentials([]), UnauthenticatedUser()
return state.credentials, state.authenticated_user
def is_authenticated(self, request: Request):
"""Check if a request is authenticated."""
state = self.check(request, allow_session=True)
return state is not None and state.is_authenticated()
async def __call__(self, request: Request) -> Optional[AuthenticationState]:
"""Check/validate a request."""
return self.check(request)
def check(self, request: Request, allow_session=True) -> Optional[AuthenticationState]:
"""Check/validate a request."""
state = None
for validator in self.validators:
if not allow_session and isinstance(validator, SessionValidator):
self.logger.info('Skipping Session Validator as allow_session is False')
continue
state = validator.check(request)
self.logger.debug(f'Authentication state {state} from validator {validator}')
if state is not None and state.is_authenticated():
break
self.logger.info(f'Identified state {state}')
return state
def _iter_validators(self):
"""Iterate over authentication validators."""
for validator in self.validators:
yield validator
def requires_auth(self, allow_session: bool = False):
"""Require authentication, use with fastapi Depends."""
# This is a bit horrible, but is needed for fastapi to get this into OpenAPI (or similar) - it needs to be an OAuth2 object
# We create this here "dynamically" for each endpoint, as we allow customisation on whether a session is permissible
if self.enabled:
class OAuthValidator(OAuth2):
"""OAuthValidator for API Auth."""
def __init__(self_):
"""Initialise the validator."""
token_validators = [u for u in self.validators if isinstance(u, TokenValidator)]
super().__init__(flows=token_validators[0].model.flows)
async def __call__(self_, request: Request):
"""Validate a request."""
state = self.check(request, allow_session)
if state is None or not state.is_authenticated():
raise self.not_authenticated
return state
return OAuthValidator()
else:
def noauth(request: Request):
return AuthenticationState()
return noauth
@property # type: ignore
@deprecate('0.2.0', replaced_by=f'{__name__}:BaseOAuthBackend.requires_auth')
def api_auth_scheme(self):
"""Get the API Authentication Schema."""
return self.requires_auth()
| python |
"""
Hello World
"""
from .agents import *
from .app import *
from .core import *
from .renderers import *
from .sims import *
from .simulation import *
from .styles import *
from .sys import * | python |
from asmpatch.batchbuilder import BatchBuilder
from asmpatch.util import TemporyFolderBuilder
import os
os.makedirs("./build", exist_ok=True)
import subprocess
#TODO: cache, autofind, config file
batch = BatchBuilder()
batch.set_end_offset(int("805954bc", 16)) #TODO: auto find end offset via elf file. Also auto add the linker file
gcc_path = subprocess.check_output(["nix-build", "<nixpkgs>", "-A", "pkgs.pkgsCross.ppc-embedded.buildPackages.gcc", "--no-out-link"], encoding="ascii").split("\n")[0]
# something like /nix/store/ps6pvl36wzsdcibxkyxm8wiy5qxkx87p-powerpc-none-eabi-stage-final-gcc-debug-wrapper-9.3.0, contain bin/powerpc-none-eabi-* files
batch.with_gcc_path(gcc_path)
batch.with_linker_file("patches/spyro06_ntsc.ld")
tmp_folder = TemporyFolderBuilder()
tmp_folder.set_keep_folder(True)
batch.with_tmp_builder(tmp_folder)
#for name in [ "remove_optimization_for_freecam_ntsc", "change_rot_charge_speed", "include_cpp"]:
for name in [ "rainbow_flame" ]:
batch.with_patch("./patches/{}.asm".format(name), "./build/{}_diff.txt".format(name))
print("generating ...")
batch.execute()
print("done !")
| python |
import unittest
from jpake.tests import test_jpake
from jpake.tests import test_parameters
loader = unittest.TestLoader()
suite = unittest.TestSuite((
loader.loadTestsFromModule(test_jpake),
loader.loadTestsFromModule(test_parameters),
))
| python |
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
tf.logging.set_verbosity(tf.logging.ERROR)
mnist = input_data.read_data_sets("./MNIST_data/", one_hot=True)
learning_rate = 0.001
training_epochs = 20
batch_size = 100
# MODEL CONFiGURATI
X = tf.placeholder(tf.float32, [None, 28, 28, 1], name='data')
Y = tf.placeholder(tf.float32, [None, 10])
# tf.placeholder
# shape [None, 28, 28, 1] None 은 크기가 정해지지 않음, input 크기에 따라 유동적으로 할당
conv1 = tf.layers.conv2d(X, 10, [3, 3], padding='same', activation=tf.nn.relu)
# tf.layers.conv2d(X,10, [3,3], padding ='same', activation=tf.nn.relu)
# X 이미지 입력으로 받아서 3,3 커널이 가로 세로 3칸씩 이동하면서 출력으로 10개의 체널을 relu activation을 걸치게 된다.
pool1 = tf.layers.max_pooling2d(conv1, [2, 2], strides=2, padding='same')
# conv1의 이미지를 받아서 [2,2] 사이지의 풀링 커널이 사로 세로 2칸씩 이동하면서
# padding=same 특정맵의 크기를 동일하게 하기 위해 주위에 0으로 패딩하는 방법
# 패딩 없이 순수한 입력 배열만 활용하여 맵을 만드는 경우를 vaild padding
# 패딩을 함으로서 모서리의 중요한 정보를 놓치지 않기 위해서 보통을 SAME 패딩을 많이 사용한다
conv2 = tf.layers.conv2d(pool1, 20, [3, 3], padding='same', activation=tf.nn.relu)
pool2 = tf.layers.max_pooling2d(conv2, [2, 2], strides=2, padding='same')
fc1 = tf.contrib.layers.flatten(pool2)
fc2 = tf.layers.dense(fc1, 200, activation=tf.nn.relu)
logits = tf.layers.dense(fc2, 10, activation=None)
output = tf.nn.softmax(logits, name='prob')
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=Y, logits=logits))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
#
# Training
#
sess = tf.Session()
sess.run(tf.global_variables_initializer())
total_batch = int(mnist.train.num_examples / batch_size)
print('Start learning!')
for epoch in range(training_epochs):
total_cost = 0
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size)
batch_xs = batch_xs.reshape(-1, 28, 28, 1)
_, cost_val = sess.run([optimizer, cost], feed_dict={X: batch_xs, Y: batch_ys})
total_cost += cost_val
print('Epoch: {0}, Avg. Cost = {1:.4f}'.format(epoch + 1, total_cost/total_batch))
print('Learning finished!')
# Test the results
is_correct = tf.equal(tf.argmax(logits, 1), tf.argmax(Y, 1))
acc = tf.reduce_mean(tf.cast(is_correct, tf.float32))
accuracy = sess.run(acc, feed_dict={
X: mnist.test.images.reshape(-1, 28, 28, 1), Y: mnist.test.labels})
print('Test Accuracy:', accuracy)
# Freeze variables and save pb file
output_graph_def = graph_util.convert_variables_to_constants(sess, sess.graph_def, ['prob'])
with gfile.FastGFile('./mnist_cnn.pb', 'wb') as f:
f.write(output_graph_def.SerializeToString())
print('mnist_cnn.pb file is created successfully!!') | python |
from pathlib import Path
from mmvae_hub.utils.setup.flags_utils import BaseFlagsSetup
from mmvae_hub.base.BaseFlags import parser as parser
# DATASET NAME
parser.add_argument('--exp_str_prefix', type=str, default='mnistsvhntext', help="prefix of the experiment directory.")
# DATA DEPENDENT
# to be set by experiments themselves
parser.add_argument('--style_m1_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_m2_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--style_m3_dim', type=int, default=0, help="dimension of varying factor latent space")
parser.add_argument('--len_sequence', type=int, default=8, help="length of sequence")
parser.add_argument('--num_classes', type=int, default=10, help="number of classes on which the data set trained")
parser.add_argument('--dim', type=int, default=64, help="number of classes on which the data set trained")
parser.add_argument('--data_multiplications', type=int, default=20, help="number of pairs per sample")
parser.add_argument('--num_hidden_layers', type=int, default=1, help="number of channels in images")
parser.add_argument('--likelihood_m1', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m2', type=str, default='laplace', help="output distribution")
parser.add_argument('--likelihood_m3', type=str, default='categorical', help="output distribution")
# SAVE and LOAD
# to bet set by experiments themselves
parser.add_argument('--encoder_save_m1', type=str, default='encoderM1', help="model save for encoder")
parser.add_argument('--encoder_save_m2', type=str, default='encoderM2', help="model save for encoder")
parser.add_argument('--encoder_save_m3', type=str, default='encoderM3', help="model save for decoder")
parser.add_argument('--decoder_save_m1', type=str, default='decoderM1', help="model save for decoder")
parser.add_argument('--decoder_save_m2', type=str, default='decoderM2', help="model save for decoder")
parser.add_argument('--decoder_save_m3', type=str, default='decoderM3', help="model save for decoder")
parser.add_argument('--clf_save_m1', type=str, default='clf_m1', help="model save for clf")
parser.add_argument('--clf_save_m2', type=str, default='clf_m2', help="model save for clf")
parser.add_argument('--clf_save_m3', type=str, default='clf_m3', help="model save for clf")
# LOSS TERM WEIGHTS
parser.add_argument('--beta_m1_style', type=float, default=1.0, help="default weight divergence term style modality 1")
parser.add_argument('--beta_m2_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--beta_m3_style', type=float, default=1.0, help="default weight divergence term style modality 2")
parser.add_argument('--div_weight_m1_content', type=float, default=0.25,
help="default weight divergence term content modality 1")
parser.add_argument('--div_weight_m2_content', type=float, default=0.25,
help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_m3_content', type=float, default=0.25,
help="default weight divergence term content modality 2")
parser.add_argument('--div_weight_uniform_content', type=float, default=0.25,
help="default weight divergence term prior")
class mnistsvhntextFlagsSetup(BaseFlagsSetup):
def __init__(self, config_path: Path):
super().__init__(config_path)
self.parser = parser
def flags_set_alpha_modalities(self, flags):
flags.alpha_modalities = [flags.div_weight_uniform_content, flags.div_weight_m1_content,
flags.div_weight_m2_content, flags.div_weight_m3_content]
return flags
| python |
from .. import db, flask_bcrypt
class Company(db.Model):
"""User Model for storing user related details"""
__tablename__ = "companies"
id = db.Column(db.Integer, primary_key=True, autoincrement=True)
name = db.Column(db.String(255), unique=False, nullable=False)
address = db.Column(db.String(255), nullable=False)
city = db.Column(db.String(255), nullable=False)
state = db.Column(db.String(255), nullable=False)
zip = db.Column(db.String(255), nullable=False)
registration_number = db.Column(db.String(255), nullable=False)
registration_court = db.Column(db.String(255), nullable=False)
vat_number = db.Column(db.String(255), nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
def __repr__(self):
return "<Company '{}'>".format(self.name)
| python |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import functools
import logging
import os
import shutil
import signal
import tempfile
import threading
import time
import psutil
from helpers import (unittest, with_config, skipOnTravis, LuigiTestCase,
temporary_unloaded_module)
import luigi.notifications
import luigi.task_register
import luigi.worker
import mock
from luigi import ExternalTask, RemoteScheduler, Task, Event
from luigi.mock import MockTarget, MockFileSystem
from luigi.scheduler import Scheduler
from luigi.worker import Worker
from luigi.rpc import RPCError
from luigi import six
from luigi.cmdline import luigi_run
luigi.notifications.DEBUG = True
class DummyTask(Task):
def __init__(self, *args, **kwargs):
super(DummyTask, self).__init__(*args, **kwargs)
self.has_run = False
def complete(self):
return self.has_run
def run(self):
logging.debug("%s - setting has_run", self)
self.has_run = True
class DynamicDummyTask(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(self.p)
def run(self):
with self.output().open('w') as f:
f.write('Done!')
time.sleep(0.5) # so we can benchmark & see if parallelization works
class DynamicDummyTaskWithNamespace(DynamicDummyTask):
task_namespace = 'banana'
class DynamicRequires(Task):
p = luigi.Parameter()
use_banana_task = luigi.BoolParameter(default=False)
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'parent'))
def run(self):
if self.use_banana_task:
task_cls = DynamicDummyTaskWithNamespace
else:
task_cls = DynamicDummyTask
dummy_targets = yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5)]
dummy_targets += yield [task_cls(os.path.join(self.p, str(i)))
for i in range(5, 7)]
with self.output().open('w') as f:
for i, d in enumerate(dummy_targets):
for line in d.open('r'):
print('%d: %s' % (i, line.strip()), file=f)
class DynamicRequiresOtherModule(Task):
p = luigi.Parameter()
def output(self):
return luigi.LocalTarget(os.path.join(self.p, 'baz'))
def run(self):
import other_module
other_target_foo = yield other_module.OtherModuleTask(os.path.join(self.p, 'foo')) # NOQA
other_target_bar = yield other_module.OtherModuleTask(os.path.join(self.p, 'bar')) # NOQA
with self.output().open('w') as f:
f.write('Done!')
class DummyErrorTask(Task):
retry_index = 0
def run(self):
self.retry_index += 1
raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family))
class WorkerTest(LuigiTestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.time = time.time
with Worker(scheduler=self.sch, worker_id='X') as w, Worker(scheduler=self.sch, worker_id='Y') as w2:
self.w = w
self.w2 = w2
super(WorkerTest, self).run(result)
if time.time != self.time:
time.time = self.time
def setTime(self, t):
time.time = lambda: t
def test_dep(self):
class A(Task):
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertTrue(a.has_run)
self.assertTrue(b.has_run)
def test_external_dep(self):
class A(ExternalTask):
def complete(self):
return False
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
class B(A):
def requires(self):
return luigi.task.externalize(a)
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_legacy_externalized_dep(self):
class A(Task):
has_run = False
def run(self):
self.has_run = True
def complete(self):
return self.has_run
a = A()
a.run = NotImplemented
class B(A):
def requires(self):
return a
b = B()
self.assertTrue(self.w.add(b))
self.assertTrue(self.w.run())
self.assertFalse(a.has_run)
self.assertFalse(b.has_run)
def test_type_error_in_tracking_run_deprecated(self):
class A(Task):
num_runs = 0
def complete(self):
return False
def run(self, tracking_url_callback=None):
self.num_runs += 1
raise TypeError('bad type')
a = A()
self.assertTrue(self.w.add(a))
self.assertFalse(self.w.run())
# Should only run and fail once, not retry because of the type error
self.assertEqual(1, a.num_runs)
def test_tracking_url(self):
tracking_url = 'http://test_url.com/'
class A(Task):
has_run = False
def complete(self):
return self.has_run
def run(self):
self.set_tracking_url(tracking_url)
self.has_run = True
a = A()
self.assertTrue(self.w.add(a))
self.assertTrue(self.w.run())
tasks = self.sch.task_list('DONE', '')
self.assertEqual(1, len(tasks))
self.assertEqual(tracking_url, tasks[a.task_id]['tracking_url'])
def test_fail(self):
class CustomException(BaseException):
def __init__(self, msg):
self.msg = msg
class A(Task):
def run(self):
self.has_run = True
raise CustomException('bad things')
def complete(self):
return self.has_run
a = A()
class B(Task):
def requires(self):
return a
def run(self):
self.has_run = True
def complete(self):
return self.has_run
b = B()
a.has_run = False
b.has_run = False
self.assertTrue(self.w.add(b))
self.assertFalse(self.w.run())
self.assertTrue(a.has_run)
self.assertFalse(b.has_run)
def test_unknown_dep(self):
# see related test_remove_dep test (grep for it)
class A(ExternalTask):
def complete(self):
return False
class C(Task):
def complete(self):
return True
def get_b(dep):
class B(Task):
def requires(self):
return dep
def run(self):
self.has_run = True
def complete(self):
return False
b = B()
b.has_run = False
return b
b_a = get_b(A())
b_c = get_b(C())
self.assertTrue(self.w.add(b_a))
# So now another worker goes in and schedules C -> B
# This should remove the dep A -> B but will screw up the first worker
self.assertTrue(self.w2.add(b_c))
self.assertFalse(self.w.run()) # should not run anything - the worker should detect that A is broken
self.assertFalse(b_a.has_run)
# not sure what should happen??
# self.w2.run() # should run B since C is fulfilled
# self.assertTrue(b_c.has_run)
def test_unfulfilled_dep(self):
class A(Task):
def complete(self):
return self.done
def run(self):
self.done = True
def get_b(a):
class B(A):
def requires(self):
return a
b = B()
b.done = False
a.done = True
return b
a = A()
b = get_b(a)
self.assertTrue(self.w.add(b))
a.done = False
self.w.run()
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_gets_missed_work(self):
class A(Task):
done = False
def complete(self):
return self.done
def run(self):
self.done = True
a = A()
self.assertTrue(self.w.add(a))
# simulate a missed get_work response
self.assertEqual(a.task_id, self.sch.get_work(worker='X')['task_id'])
self.assertTrue(self.w.run())
self.assertTrue(a.complete())
def test_avoid_infinite_reschedule(self):
class A(Task):
def complete(self):
return False
class B(Task):
def complete(self):
return False
def requires(self):
return A()
self.assertTrue(self.w.add(B()))
self.assertFalse(self.w.run())
def test_fails_registering_signal(self):
with mock.patch('luigi.worker.signal', spec=['signal']):
# mock will raise an attribute error getting signal.SIGUSR1
Worker()
def test_allow_reschedule_with_many_missing_deps(self):
class A(Task):
""" Task that must run twice to succeed """
i = luigi.IntParameter()
runs = 0
def complete(self):
return self.runs >= 2
def run(self):
self.runs += 1
class B(Task):
done = False
def requires(self):
return map(A, range(20))
def complete(self):
return self.done
def run(self):
self.done = True
b = B()
w = Worker(scheduler=self.sch, worker_id='X', max_reschedules=1)
self.assertTrue(w.add(b))
self.assertFalse(w.run())
# For b to be done, we must have rescheduled its dependencies to run them twice
self.assertTrue(b.complete())
self.assertTrue(all(a.complete() for a in b.deps()))
def test_interleaved_workers(self):
class A(DummyTask):
pass
a = A()
class B(DummyTask):
def requires(self):
return a
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(eb))
logging.debug("RUNNING BROKEN WORKER")
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
logging.debug("RUNNING FUNCTIONAL WORKER")
self.assertTrue(w.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_interleaved_workers2(self):
# two tasks without dependencies, one external, one not
class B(DummyTask):
pass
ExternalB = luigi.task.externalize(B)
b = B()
eb = ExternalB()
self.assertEqual(str(eb), "B()")
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X') as w, Worker(scheduler=sch, worker_id='Y') as w2:
self.assertTrue(w2.add(eb))
self.assertTrue(w.add(b))
self.assertTrue(w2.run())
self.assertFalse(b.complete())
self.assertTrue(w.run())
self.assertTrue(b.complete())
def test_interleaved_workers3(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(a))
self.assertTrue(w2.add(b))
threading.Thread(target=w.run).start()
self.assertTrue(w2.run())
self.assertTrue(a.complete())
self.assertTrue(b.complete())
def test_die_for_non_unique_pending(self):
class A(DummyTask):
def run(self):
logging.debug('running A')
time.sleep(0.1)
super(A, self).run()
a = A()
class B(DummyTask):
def requires(self):
return a
def run(self):
logging.debug('running B')
super(B, self).run()
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id='X', keep_alive=True, count_uniques=True) as w:
with Worker(scheduler=sch, worker_id='Y', keep_alive=True, count_uniques=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w.add(b))
self.assertTrue(w2.add(b))
self.assertEqual(w._get_work()[0], a.task_id)
self.assertTrue(w2.run())
self.assertFalse(a.complete())
self.assertFalse(b.complete())
def test_complete_exception(self):
"Tests that a task is still scheduled if its sister task crashes in the complete() method"
class A(DummyTask):
def complete(self):
raise Exception("doh")
a = A()
class C(DummyTask):
pass
c = C()
class B(DummyTask):
def requires(self):
return a, c
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertFalse(a.has_run)
def test_requires_exception(self):
class A(DummyTask):
def requires(self):
raise Exception("doh")
a = A()
class D(DummyTask):
pass
d = D()
class C(DummyTask):
def requires(self):
return d
c = C()
class B(DummyTask):
def requires(self):
return c, a
b = B()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as w:
self.assertFalse(w.add(b))
self.assertTrue(w.run())
self.assertFalse(b.has_run)
self.assertTrue(c.has_run)
self.assertTrue(d.has_run)
self.assertFalse(a.has_run)
def test_run_csv_batch_job(self):
completed = set()
class CsvBatchJob(luigi.Task):
values = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
def run(self):
completed.update(self.values.split(','))
self.has_run = True
def complete(self):
return all(value in completed for value in self.values.split(','))
tasks = [CsvBatchJob(str(i)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertFalse(task.has_run)
def test_run_max_batch_job(self):
completed = set()
class MaxBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return any(self.value <= ran for ran in completed)
tasks = [MaxBatchJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
def test_run_batch_job_unbatched(self):
completed = set()
class MaxNonBatchJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
batchable = False
def run(self):
completed.add(self.value)
self.has_run = True
def complete(self):
return self.value in completed
tasks = [MaxNonBatchJob((i,)) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertTrue(task.has_run)
def test_run_batch_job_limit_batch_size(self):
completed = set()
runs = []
class CsvLimitedBatchJob(luigi.Task):
value = luigi.parameter.Parameter(batch_method=','.join)
has_run = False
max_batch_size = 4
def run(self):
completed.update(self.value.split(','))
runs.append(self)
def complete(self):
return all(value in completed for value in self.value.split(','))
tasks = [CsvLimitedBatchJob(str(i)) for i in range(11)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertTrue(self.w.run())
for task in tasks:
self.assertTrue(task.complete())
self.assertEqual(3, len(runs))
def test_fail_max_batch_job(self):
class MaxBatchFailJob(luigi.Task):
value = luigi.IntParameter(batch_method=max)
has_run = False
def run(self):
self.has_run = True
assert False
def complete(self):
return False
tasks = [MaxBatchFailJob(i) for i in range(10)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
for task in tasks:
# only task number 9 should run
self.assertFalse(task.has_run and task.value < 9)
self.assertEqual({task.task_id for task in tasks}, set(self.sch.task_list('FAILED', '')))
def test_gracefully_handle_batch_method_failure(self):
class BadBatchMethodTask(DummyTask):
priority = 10
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
bad_tasks = [BadBatchMethodTask(i) for i in range(5)]
good_tasks = [DummyTask()]
all_tasks = good_tasks + bad_tasks
self.assertFalse(any(task.complete() for task in all_tasks))
worker = Worker(scheduler=Scheduler(retry_count=1), keep_alive=True)
for task in all_tasks:
self.assertTrue(worker.add(task))
self.assertFalse(worker.run())
self.assertFalse(any(task.complete() for task in bad_tasks))
# we only get to run the good task if the bad task failures were handled gracefully
self.assertTrue(all(task.complete() for task in good_tasks))
def test_post_error_message_for_failed_batch_methods(self):
class BadBatchMethodTask(DummyTask):
batch_int_param = luigi.IntParameter(batch_method=int.__add__) # should be sum
tasks = [BadBatchMethodTask(1), BadBatchMethodTask(2)]
for task in tasks:
self.assertTrue(self.w.add(task))
self.assertFalse(self.w.run())
failed_ids = set(self.sch.task_list('FAILED', ''))
self.assertEqual({task.task_id for task in tasks}, failed_ids)
self.assertTrue(all(self.sch.fetch_error(task_id)['error'] for task_id in failed_ids))
class WorkerKeepAliveTests(LuigiTestCase):
def setUp(self):
self.sch = Scheduler()
super(WorkerKeepAliveTests, self).setUp()
def _worker_keep_alive_test(self, first_should_live, second_should_live, task_status=None, **worker_args):
worker_args.update({
'scheduler': self.sch,
'worker_processes': 0,
'wait_interval': 0.01,
'wait_jitter': 0.0,
})
w1 = Worker(worker_id='w1', **worker_args)
w2 = Worker(worker_id='w2', **worker_args)
with w1 as worker1, w2 as worker2:
worker1.add(DummyTask())
t1 = threading.Thread(target=worker1.run)
t1.start()
worker2.add(DummyTask())
t2 = threading.Thread(target=worker2.run)
t2.start()
if task_status:
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status=task_status)
# allow workers to run their get work loops a few times
time.sleep(0.1)
try:
self.assertEqual(first_should_live, t1.isAlive())
self.assertEqual(second_should_live, t2.isAlive())
finally:
# mark the task done so the worker threads will die
self.sch.add_task(worker='DummyWorker', task_id=DummyTask().task_id, status='DONE')
t1.join()
t2.join()
def test_no_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
)
def test_keep_alive(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
)
def test_keep_alive_count_uniques(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
count_uniques=True,
)
def test_keep_alive_count_last_scheduled(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=True,
keep_alive=True,
count_last_scheduled=True,
)
def test_keep_alive_through_failure(self):
self._worker_keep_alive_test(
first_should_live=True,
second_should_live=True,
keep_alive=True,
task_status='FAILED',
)
def test_do_not_keep_alive_through_disable(self):
self._worker_keep_alive_test(
first_should_live=False,
second_should_live=False,
keep_alive=True,
task_status='DISABLED',
)
class WorkerInterruptedTest(unittest.TestCase):
def setUp(self):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
requiring_sigusr = unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
'signal.SIGUSR1 not found on this system')
def _test_stop_getting_new_work(self, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
worker.handle_interrupt(signal.SIGUSR1, None)
worker.run()
self.assertFalse(d.complete())
@requiring_sigusr
def test_stop_getting_new_work(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch))
@requiring_sigusr
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=False, assistant=True))
@requiring_sigusr
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(
Worker(scheduler=self.sch, keep_alive=True, assistant=True))
def test_existence_of_disabling_option(self):
# any code equivalent of `os.kill(os.getpid(), signal.SIGUSR1)`
# seem to give some sort of a "InvocationError"
Worker(no_install_shutdown_handler=True)
@with_config({"worker": {"no_install_shutdown_handler": "True"}})
def test_can_run_luigi_in_thread(self):
class A(DummyTask):
pass
task = A()
# Note that ``signal.signal(signal.SIGUSR1, fn)`` can only be called in the main thread.
# So if we do not disable the shutdown handler, this would fail.
t = threading.Thread(target=lambda: luigi.build([task], local_scheduler=True))
t.start()
t.join()
self.assertTrue(task.complete())
class WorkerDisabledTest(LuigiTestCase):
def make_sch(self):
return Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
def _test_stop_getting_new_work_build(self, sch, worker):
"""
I got motivated to create this test case when I saw that the
execution_summary crashed after my first attemted solution.
"""
class KillWorkerTask(luigi.Task):
did_actually_run = False
def run(self):
sch.disable_worker('my_worker_id')
KillWorkerTask.did_actually_run = True
class Factory(object):
def create_local_scheduler(self, *args, **kwargs):
return sch
def create_worker(self, *args, **kwargs):
return worker
luigi.build([KillWorkerTask()], worker_scheduler_factory=Factory(), local_scheduler=True)
self.assertTrue(KillWorkerTask.did_actually_run)
def _test_stop_getting_new_work_manual(self, sch, worker):
d = DummyTask()
with worker:
worker.add(d) # For assistant its ok that other tasks add it
self.assertFalse(d.complete())
sch.disable_worker('my_worker_id')
worker.run() # Note: Test could fail by hanging on this line
self.assertFalse(d.complete())
def _test_stop_getting_new_work(self, **worker_kwargs):
worker_kwargs['worker_id'] = 'my_worker_id'
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_manual(sch, Worker(**worker_kwargs))
sch = self.make_sch()
worker_kwargs['scheduler'] = sch
self._test_stop_getting_new_work_build(sch, Worker(**worker_kwargs))
def test_stop_getting_new_work_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=False)
def test_stop_getting_new_work_assistant(self):
self._test_stop_getting_new_work(keep_alive=False, assistant=True)
def test_stop_getting_new_work_assistant_keep_alive(self):
self._test_stop_getting_new_work(keep_alive=True, assistant=True)
class DynamicDependenciesTest(unittest.TestCase):
n_workers = 1
timeout = float('inf')
def setUp(self):
self.p = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.p)
def test_dynamic_dependencies(self, use_banana_task=False):
t0 = time.time()
t = DynamicRequires(p=self.p, use_banana_task=use_banana_task)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
# loop through output and verify
with t.output().open('r') as f:
for i in range(7):
self.assertEqual(f.readline().strip(), '%d: Done!' % i)
self.assertTrue(time.time() - t0 < self.timeout)
def test_dynamic_dependencies_with_namespace(self):
self.test_dynamic_dependencies(use_banana_task=True)
def test_dynamic_dependencies_other_module(self):
t = DynamicRequiresOtherModule(p=self.p)
luigi.build([t], local_scheduler=True, workers=self.n_workers)
self.assertTrue(t.complete())
class DynamicDependenciesWithMultipleWorkersTest(DynamicDependenciesTest):
n_workers = 100
timeout = 3.0 # We run 7 tasks that take 0.5s each so it should take less than 3.5s
class WorkerPingThreadTests(unittest.TestCase):
def test_ping_retry(self):
""" Worker ping fails once. Ping continues to try to connect to scheduler
Kind of ugly since it uses actual timing with sleep to test the thread
"""
sch = Scheduler(
retry_delay=100,
remove_delay=1000,
worker_disconnect_delay=10,
)
self._total_pings = 0 # class var so it can be accessed from fail_ping
def fail_ping(worker):
# this will be called from within keep-alive thread...
self._total_pings += 1
raise Exception("Some random exception")
sch.ping = fail_ping
with Worker(
scheduler=sch,
worker_id="foo",
ping_interval=0.01 # very short between pings to make test fast
):
# let the keep-alive thread run for a bit...
time.sleep(0.1) # yes, this is ugly but it's exactly what we need to test
self.assertTrue(
self._total_pings > 1,
msg="Didn't retry pings (%d pings performed)" % (self._total_pings,)
)
def test_ping_thread_shutdown(self):
with Worker(ping_interval=0.01) as w:
self.assertTrue(w._keep_alive_thread.is_alive())
self.assertFalse(w._keep_alive_thread.is_alive())
def email_patch(test_func, email_config=None):
EMAIL_CONFIG = {"core": {"error-email": "not-a-real-email-address-for-test-only"}, "email": {"force-send": "true"}}
if email_config is not None:
EMAIL_CONFIG.update(email_config)
emails = []
def mock_send_email(sender, recipients, msg):
emails.append(msg)
@with_config(EMAIL_CONFIG)
@functools.wraps(test_func)
@mock.patch('smtplib.SMTP')
def run_test(self, smtp):
smtp().sendmail.side_effect = mock_send_email
test_func(self, emails)
return run_test
def custom_email_patch(config):
return functools.partial(email_patch, email_config=config)
class WorkerEmailTest(LuigiTestCase):
def run(self, result=None):
super(WorkerEmailTest, self).setUp()
sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
with Worker(scheduler=sch, worker_id="foo") as self.worker:
super(WorkerEmailTest, self).run(result)
@email_patch
def test_connection_error(self, emails):
sch = RemoteScheduler('http://tld.invalid:1337', connect_timeout=1)
self.waits = 0
def dummy_wait():
self.waits += 1
sch._wait = dummy_wait
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
with Worker(scheduler=sch) as worker:
try:
worker.add(a)
except RPCError:
self.assertEqual(self.waits, 2) # should attempt to add it 3 times
self.assertNotEqual(emails, [])
self.assertTrue(emails[0].find("Luigi: Framework error while scheduling %s" % (a,)) != -1)
else:
self.fail()
@email_patch
def test_complete_error(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch(self, emails):
class A(DummyTask):
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_error_email_batch_to_owner(self, emails):
class A(DummyTask):
owner_email = '[email protected]'
def complete(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue(any(
"1 scheduling failure" in email and '[email protected]' in email
for email in emails))
@email_patch
def test_requires_error(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_requires_error_email_batch(self, emails):
class A(DummyTask):
def requires(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
worker.run()
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_complete_return_value(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.worker.run()
self.assertTrue(emails[0].find("Luigi: %s failed scheduling" % (a,)) != -1)
self.assertFalse(a.has_run)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_complete_return_value_email_batch(self, emails):
class A(DummyTask):
def complete(self):
pass # no return value should be an error
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
a = A()
self.assertEqual(emails, [])
worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertFalse(a.has_run)
scheduler.prune()
self.assertTrue("1 scheduling failure" in emails[0])
@email_patch
def test_run_error(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
a = A()
luigi.build([a], workers=1, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_email_batch(self, emails):
class A(luigi.Task):
owner_email = ['[email protected]', '[email protected]']
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(3, len(emails))
self.assertTrue(any('[email protected]' in email for email in emails))
self.assertTrue(any('[email protected]' in email for email in emails))
@with_config({'batch_email': {'email_interval': '0'}, 'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_batch_email_string(self, emails):
class A(luigi.Task):
owner_email = '[email protected]'
def run(self):
raise Exception("b0rk")
scheduler = Scheduler(batch_emails=True)
worker = Worker(scheduler)
worker.add(A())
worker.run()
scheduler.prune()
self.assertEqual(2, len(emails))
self.assertTrue(any('[email protected]' in email for email in emails))
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_run_error_no_email(self, emails):
class A(luigi.Task):
def run(self):
raise Exception("b0rk")
luigi.build([A()], workers=1, local_scheduler=True)
self.assertFalse(emails)
@email_patch
def test_task_process_dies_with_email(self, emails):
a = SendSignalTask(signal.SIGKILL)
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("died unexpectedly with exit code -9") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_process_dies_no_email(self, emails):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@email_patch
def test_task_times_out(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(1, len(emails))
self.assertTrue(emails[0].find("Luigi: %s FAILED" % (a,)) != -1)
self.assertTrue(emails[0].find("timed out after 0.0001 seconds and was terminated.") != -1)
@with_config({'worker': {'send_failure_email': 'False'}})
@email_patch
def test_task_times_out_no_email(self, emails):
class A(luigi.Task):
worker_timeout = 0.0001
def run(self):
time.sleep(5)
luigi.build([A()], workers=2, local_scheduler=True)
self.assertEqual([], emails)
@with_config(dict(worker=dict(retry_external_tasks='true')))
@email_patch
def test_external_task_retries(self, emails):
"""
Test that we do not send error emails on the failures of external tasks
"""
class A(luigi.ExternalTask):
pass
a = A()
luigi.build([a], workers=2, local_scheduler=True)
self.assertEqual(emails, [])
@email_patch
def test_no_error(self, emails):
class A(DummyTask):
pass
a = A()
self.assertEqual(emails, [])
self.worker.add(a)
self.assertEqual(emails, [])
self.worker.run()
self.assertEqual(emails, [])
self.assertTrue(a.complete())
@custom_email_patch({"core": {"error-email": "not-a-real-email-address-for-test-only", 'email-type': 'none'}})
def test_disable_emails(self, emails):
class A(luigi.Task):
def complete(self):
raise Exception("b0rk")
self.worker.add(A())
self.assertEqual(emails, [])
class RaiseSystemExit(luigi.Task):
def run(self):
raise SystemExit("System exit!!")
class SendSignalTask(luigi.Task):
signal = luigi.IntParameter()
def run(self):
os.kill(os.getpid(), self.signal)
class HangTheWorkerTask(luigi.Task):
worker_timeout = luigi.IntParameter(default=None)
def run(self):
while True:
pass
def complete(self):
return False
class MultipleWorkersTest(unittest.TestCase):
@unittest.skip('Always skip. There are many intermittent failures')
# This pass under python3 when run as `nosetests test/worker_test.py`
# but not as `nosetests test`. Probably some side effect on previous tests
@unittest.skipIf(six.PY3, 'This test fail on python3 when run with tox.')
def test_multiple_workers(self):
# Test using multiple workers
# Also test generating classes dynamically since this may reflect issues with
# various platform and how multiprocessing is implemented. If it's using os.fork
# under the hood it should be fine, but dynamic classses can't be pickled, so
# other implementations of multiprocessing (using spawn etc) may fail
class MyDynamicTask(luigi.Task):
x = luigi.Parameter()
def run(self):
time.sleep(0.1)
t0 = time.time()
luigi.build([MyDynamicTask(i) for i in range(100)], workers=100, local_scheduler=True)
self.assertTrue(time.time() < t0 + 5.0) # should ideally take exactly 0.1s, but definitely less than 10.0
def test_zero_workers(self):
d = DummyTask()
luigi.build([d], workers=0, local_scheduler=True)
self.assertFalse(d.complete())
def test_system_exit(self):
# This would hang indefinitely before this fix:
# https://github.com/spotify/luigi/pull/439
luigi.build([RaiseSystemExit()], workers=2, local_scheduler=True)
def test_term_worker(self):
luigi.build([SendSignalTask(signal.SIGTERM)], workers=2, local_scheduler=True)
def test_kill_worker(self):
luigi.build([SendSignalTask(signal.SIGKILL)], workers=2, local_scheduler=True)
def test_purge_multiple_workers(self):
w = Worker(worker_processes=2, wait_interval=0.01)
t1 = SendSignalTask(signal.SIGTERM)
t2 = SendSignalTask(signal.SIGKILL)
w.add(t1)
w.add(t2)
w._run_task(t1.task_id)
w._run_task(t2.task_id)
time.sleep(1.0)
w._handle_next_task()
w._handle_next_task()
w._handle_next_task()
def test_stop_worker_kills_subprocesses(self):
with Worker(worker_processes=2) as w:
hung_task = HangTheWorkerTask()
w.add(hung_task)
w._run_task(hung_task.task_id)
pids = [p.pid for p in w._running_tasks.values()]
self.assertEqual(1, len(pids))
pid = pids[0]
def is_running():
return pid in {p.pid for p in psutil.Process().children()}
self.assertTrue(is_running())
self.assertFalse(is_running())
def test_time_out_hung_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=2, local_scheduler=True)
def test_time_out_hung_single_worker(self):
luigi.build([HangTheWorkerTask(0.1)], workers=1, local_scheduler=True)
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/72953986')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_default_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask()
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 5
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 6
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
@skipOnTravis('https://travis-ci.org/spotify/luigi/jobs/76645264')
@mock.patch('luigi.worker.time')
def test_purge_hung_worker_override_timeout_time(self, mock_time):
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=10)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 10
w._handle_next_task()
self.assertEqual(1, len(w._running_tasks))
mock_time.time.return_value = 11
w._handle_next_task()
self.assertEqual(0, len(w._running_tasks))
class Dummy2Task(Task):
p = luigi.Parameter()
def output(self):
return MockTarget(self.p)
def run(self):
f = self.output().open('w')
f.write('test')
f.close()
class AssistantTest(unittest.TestCase):
def run(self, result=None):
self.sch = Scheduler(retry_delay=100, remove_delay=1000, worker_disconnect_delay=10)
self.assistant = Worker(scheduler=self.sch, worker_id='Y', assistant=True)
with Worker(scheduler=self.sch, worker_id='X') as w:
self.w = w
super(AssistantTest, self).run(result)
def test_get_work(self):
d = Dummy2Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assistant.run()
self.assertTrue(d.complete())
def test_bad_job_type(self):
class Dummy3Task(Dummy2Task):
task_family = 'UnknownTaskFamily'
d = Dummy3Task('123')
self.w.add(d)
self.assertFalse(d.complete())
self.assertFalse(self.assistant.run())
self.assertFalse(d.complete())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [d.task_id])
def test_unimported_job_type(self):
MODULE_CONTENTS = b'''
import luigi
class UnimportedTask(luigi.Task):
def complete(self):
return False
'''
reg = luigi.task_register.Register._get_reg()
class UnimportedTask(luigi.Task):
task_module = None # Set it here, so it's generally settable
luigi.task_register.Register._set_reg(reg)
task = UnimportedTask()
# verify that it can't run the task without the module info necessary to import it
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
# check that it can import with the right module
with temporary_unloaded_module(MODULE_CONTENTS) as task.task_module:
self.w.add(task)
self.assertTrue(self.assistant.run())
self.assertEqual(list(self.sch.task_list('DONE', '').keys()), [task.task_id])
def test_unimported_job_sends_failure_message(self):
class NotInAssistantTask(luigi.Task):
task_family = 'Unknown'
task_module = None
task = NotInAssistantTask()
self.w.add(task)
self.assertFalse(self.assistant.run())
self.assertEqual(list(self.sch.task_list('FAILED', '').keys()), [task.task_id])
self.assertTrue(self.sch.fetch_error(task.task_id)['error'])
class ForkBombTask(luigi.Task):
depth = luigi.IntParameter()
breadth = luigi.IntParameter()
p = luigi.Parameter(default=(0, )) # ehm for some weird reason [0] becomes a tuple...?
def output(self):
return MockTarget('.'.join(map(str, self.p)))
def run(self):
with self.output().open('w') as f:
f.write('Done!')
def requires(self):
if len(self.p) < self.depth:
for i in range(self.breadth):
yield ForkBombTask(self.depth, self.breadth, self.p + (i, ))
class TaskLimitTest(unittest.TestCase):
def tearDown(self):
MockFileSystem().remove('')
@with_config({'core': {'worker-task-limit': '6'}})
def test_task_limit_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertFalse(t.complete())
leaf_tasks = [ForkBombTask(3, 2, branch) for branch in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1)]]
self.assertEqual(3, sum(t.complete() for t in leaf_tasks),
"should have gracefully completed as much as possible even though the single last leaf didn't get scheduled")
@with_config({'core': {'worker-task-limit': '7'}})
def test_task_limit_not_exceeded(self):
w = Worker()
t = ForkBombTask(3, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
def test_no_task_limit(self):
w = Worker()
t = ForkBombTask(4, 2)
w.add(t)
w.run()
self.assertTrue(t.complete())
class WorkerConfigurationTest(unittest.TestCase):
def test_asserts_for_worker(self):
"""
Test that Worker() asserts that it's sanely configured
"""
Worker(wait_interval=1) # This shouldn't raise
self.assertRaises(AssertionError, Worker, wait_interval=0)
class WorkerWaitJitterTest(unittest.TestCase):
@with_config({'worker': {'wait_jitter': '10.0'}})
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter(self, mock_sleep, mock_random):
""" verify configured jitter amount """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 2.0
six.next(x)
mock_random.assert_called_with(0, 10.0)
mock_sleep.assert_called_with(3.0)
@mock.patch("random.uniform")
@mock.patch("time.sleep")
def test_wait_jitter_default(self, mock_sleep, mock_random):
""" verify default jitter is as expected """
mock_random.return_value = 1.0
w = Worker()
x = w._sleeper()
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(2.0)
mock_random.return_value = 3.3
six.next(x)
mock_random.assert_called_with(0, 5.0)
mock_sleep.assert_called_with(4.3)
class KeyboardInterruptBehaviorTest(LuigiTestCase):
def test_propagation_when_executing(self):
"""
Ensure that keyboard interrupts causes luigi to quit when you are
executing tasks.
TODO: Add a test that tests the multiprocessing (--worker >1) case
"""
class KeyboardInterruptTask(luigi.Task):
def run(self):
raise KeyboardInterrupt()
cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ')
self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
def test_propagation_when_scheduling(self):
"""
Test that KeyboardInterrupt causes luigi to quit while scheduling.
"""
class KeyboardInterruptTask(luigi.Task):
def complete(self):
raise KeyboardInterrupt()
class ExternalKeyboardInterruptTask(luigi.ExternalTask):
def complete(self):
raise KeyboardInterrupt()
self.assertRaises(KeyboardInterrupt, luigi_run,
['KeyboardInterruptTask', '--local-scheduler', '--no-lock'])
self.assertRaises(KeyboardInterrupt, luigi_run,
['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
class WorkerPurgeEventHandlerTest(unittest.TestCase):
@mock.patch('luigi.worker.TaskProcess')
def test_process_killed_handler(self, task_proc):
result = []
@HangTheWorkerTask.event_handler(Event.PROCESS_FAILURE)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker()
task = HangTheWorkerTask()
task_process = mock.MagicMock(is_alive=lambda: False, exitcode=-14, task=task)
task_proc.return_value = task_process
w.add(task)
w._run_task(task.task_id)
w._handle_next_task()
self.assertEqual(result, [task])
@mock.patch('luigi.worker.time')
def test_timeout_handler(self, mock_time):
result = []
@HangTheWorkerTask.event_handler(Event.TIMEOUT)
def store_task(t, error_msg):
self.assertTrue(error_msg)
result.append(t)
w = Worker(worker_processes=2, wait_interval=0.01, timeout=5)
mock_time.time.return_value = 0
task = HangTheWorkerTask(worker_timeout=1)
w.add(task)
w._run_task(task.task_id)
mock_time.time.return_value = 3
w._handle_next_task()
self.assertEqual(result, [task])
class PerTaskRetryPolicyBehaviorTest(LuigiTestCase):
def setUp(self):
super(PerTaskRetryPolicyBehaviorTest, self).setUp()
self.per_task_retry_count = 3
self.default_retry_count = 1
self.sch = Scheduler(retry_delay=0.1, retry_count=self.default_retry_count, prune_on_get_work=True)
def test_with_all_disabled_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_all_disabled_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is
tested.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e2, e1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e2))
self.assertTrue(w3.add(e1))
self.assertFalse(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_includes_success_with_single_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on single worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_includes_success_with_multiple_worker(self):
"""
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested.
Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestErrorTask1(DummyErrorTask):
retry_count = self.per_task_retry_count
e1 = TestErrorTask1()
class TestWrapperTask(luigi.WrapperTask):
def requires(self):
return [e1, s1]
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(e1))
self.assertTrue(w3.add(s1))
self.assertTrue(w3.run())
self.assertFalse(w2.run())
self.assertTrue(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_single_worker(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on single worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
self.assertTrue(w1.add(wt))
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
def test_with_dynamic_dependencies_with_multiple_workers(self):
"""
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed.
Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2.
This test is running on multiple worker
"""
class TestErrorTask1(DummyErrorTask):
pass
e1 = TestErrorTask1()
class TestErrorTask2(DummyErrorTask):
retry_count = self.per_task_retry_count
e2 = TestErrorTask2()
class TestSuccessTask1(DummyTask):
pass
s1 = TestSuccessTask1()
class TestWrapperTask(DummyTask):
def requires(self):
return [s1]
def run(self):
super(TestWrapperTask, self).run()
yield e2, e1
wt = TestWrapperTask()
with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1:
with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2:
self.assertTrue(w1.add(wt))
self.assertTrue(w2.add(s1))
self.assertTrue(w2.run())
self.assertFalse(w1.run())
self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys()))
self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys()))
self.assertEqual(0, self.sch._state.get_task(wt.task_id).failures.num_failures())
self.assertEqual(0, self.sch._state.get_task(s1.task_id).failures.num_failures())
self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).failures.num_failures())
self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).failures.num_failures())
| python |
import pytest
pytestmark = [pytest.mark.django_db]
def test_item(stripe):
result = stripe.get_items()
assert result == [
{
'price_data': {
'currency': 'usd',
'product_data': {
'name': 'Cutting and Sewing',
},
'unit_amount': 143600,
},
'quantity': 1,
},
]
@pytest.mark.parametrize(('price', 'expected'), [
(70, 100),
(140, 200),
(95, 100),
(105, 200),
])
def test_price(stripe, price, expected):
stripe.order.setattr_and_save('price', price)
result = stripe.get_items()
assert result[0]['price_data']['unit_amount'] == expected
| python |
from .simple_spread.simple_spread import env, parallel_env, raw_env # noqa: F401
| python |
import csv
import numpy as np
from os.path import join
from os.path import dirname
def load_synthetic(data_file_name):
""" This is almost completely stolen from sklearn!
Loads data from data/data_file_name.
Parameters
----------
data_file_name : String. Name of csv file to be loaded from
module_path/data/data_file_name. For example 'wine_data.csv'.
Returns
-------
data : Numpy Array
A 2D array with each row representing one sample and each column
representing the features of a given sample.
target : Numpy Array
A 1D array holding target variables for all the samples in `data.
For example target[0] is the target varible for data[0].
target_names : Numpy Array
A 1D array containing the names of the classifications. For example
target_names[0] is the name of the target[0] class.
feature_names : Numpy Array
A 1D array containing the names of the features. These are used
in plotting functions later.
"""
module_path = dirname(__file__)
with open(join(module_path, 'data', data_file_name)) as csv_file:
data_file = csv.reader(csv_file)
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
feature_names = ['BM%i' % (x+1) for x in range(n_features)]
feature_names = np.array(feature_names)
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float64)
target[i] = np.asarray(ir[-1], dtype=np.int)
return data, target, feature_names, target_names
| python |
'''
@package: pyAudioLex
@author: Jim Schwoebel
@module: ls_freq
#ls = list item marker
'''
from nltk.tokenize import word_tokenize
from nltk.tag import pos_tag, map_tag
from collections import Counter
def ls_freq(importtext):
text=word_tokenize(importtext)
tokens=nltk.pos_tag(text)
c=Counter(token for word, token in tokens)
return c['LS']/len(text)
| python |
#!/usr/bin/python
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Looks for dynamic code loading patterns.
Patterns to identify include
* require(...) where ... is not a string literal.
* eval
* Function(...) where there is more than one argument or the sole
argument is not a function.
"""
import json
import os.path
import py_common.npm
import re
import shutil
import sys
dynamic_load_pattern = re.compile(
r'(?<![_$\w.])require\s*\(\s*[^\s)\"\']'
# r'(?<![_$\w.])require\s*(?:\(\s*[^\s)\"\']|[^\(])' # To also match indirect uses of require, like aliasing it to a variable.
)
def find_dynamic_load(node_modules, module_name):
return py_common.npm.js_srcs_matching(
node_modules, module_name, dynamic_load_pattern,
module_filter=py_common.npm.ignore_tools_that_can_run_early(module_name))
if __name__ == '__main__':
(node_modules, separate_modules, top100_txt) = sys.argv[1:]
top100 = [x for x in file(top100_txt).read().split('\n') if x]
uses = 0
total_count = 0
has_dynamic_load = {}
for module_name in top100:
js_srcs = find_dynamic_load(node_modules, module_name)
has_dynamic_load[module_name] = js_srcs
if len(js_srcs):
uses += 1
total_count += 1
# for k, v in has_dynamic_load.iteritems():
# print "%s: %r" % (k, v)
print (
"""
## Dynamic loads {#dynamic_load}
Dynamic loading can complicate code bundling.
%d of %d = %1.02f%% call `require(...)` without a literal string argument.
""" % (uses, total_count, (100.0 * uses) / total_count))
| python |
#Genre Year
#Comparison of movie genres to year. By Bradley Brian
#imports
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#data = pd.read_csv('movies_initial.csv')
#Function
def genreselect():
print('------Please Select a genre------')
print("")
print("[1] Action")
print("[2] Adventure")
print("[3] Animation")
print("[4] Biography")
print("[5] Comdey")
print("[6] Crime")
print("[7] Documentary")
print("[8] Drama")
print("[9] Family")
print("[10] Fantasy")
print("[11] Film Noir")
print("[12] History")
print("[13] Horror")
print("[14] Musical")
print("[15] Mystery")
print("[16] Romance")
print("[17] Science fiction")
print("[18] Sport")
print("[19] Thriller")
print("[20] War")
print("[21] Western")
genreselect()
option = int(input("Enter the number of your genre choice: "))
print("")
if option == 1:
print("Action")
print('there are [5285] movies in this genre')
print('The year with the most action movies is')
#this is where code to calculate most popular year is going to be
elif option == 2:
print("Adventure")
print('there are [] movies in this genre')
print('The year with the most Adventure movies is')
elif option == 3:
print("Animation")
print('there are [] movies in this genre')
print('The year with the most Animation movies is')
elif option == 4:
print("Biography")
print('there are [] movies in this genre')
print('The year with the most Biography movies is')
elif option == 5:
print("Comdey")
print('there are [] movies in this genre')
print('The year with the most Comedy movies is')
elif option == 6:
print("Crime")
print('there are [] movies in this genre')
print('The year with the most Crime movies is')
elif option == 7:
print("Documentary")
print('there are [] movies in this genre')
print('The year with the most Documentary movies is')
elif option == 8:
print("Drama")
print('there are [] movies in this genre')
print('The year with the most Drama movies is')
elif option == 9:
print("Family")
print('there are [] movies in this genre')
print('The year with the most Family movies is')
elif option == 10:
print("Fantasy")
print('there are [] movies in this genre')
print('The year with the most Fantasy movies is')
elif option == 11:
print("Film Noir")
print('there are [] movies in this genre')
print('The year with the most Film Noir movies is')
elif option == 12:
print("History")
print('there are [] movies in this genre')
print('The year with the most History movies is')
elif option == 13:
print("Horror")
print('there are [] movies in this genre')
print('The year with the most Horror movies is')
elif option == 14:
print("Musical")
print('there are [] movies in this genre')
print('The year with the most Musical movies is')
elif option == 15:
print("Mystery")
print('there are [] movies in this genre')
print('The year with the most Mystery movies is')
elif option == 16:
print("Romance")
print('there are [] movies in this genre')
print('The year with the most Romance movies is')
elif option == 17:
print("Science fiction")
print('there are [] movies in this genre')
print('The year with the most Science Fiction movies is')
| python |
import logging
from optparse import make_option
from django.core.management.base import NoArgsCommand
from django.db import connection
from mailer.models import Message
class Command(NoArgsCommand):
help = "Attempt to resend any deferred mail."
base_options = (
make_option('-c', '--cron', default=0, type='int',
help='If 1 don\'t print messagges, but only errors.'
),
)
option_list = NoArgsCommand.option_list + base_options
def handle_noargs(self, **options):
if options['cron'] == 0:
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
else:
logging.basicConfig(level=logging.ERROR, format="%(message)s")
count = Message.objects.retry_deferred() # @@@ new_priority not yet supported
logging.info("%s message(s) retried" % count)
connection.close()
| python |
"""Import all hardware interfaces"""
from .gpio_implementations import *
from .hardware_interfaces import *
from .hpwm_implementations import *
from .i2c_implementations import *
from .spi_implementations import *
| python |
class Solution(object):
def canJump(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
if len(nums) == 1:
return True
jump = 0
current_bound = next_bound = nums[0]
i = 1
while True:
jump += 1
if current_bound >= len(nums)-1:
return True
while i <= current_bound:
next_bound = max(next_bound, i+nums[i])
i += 1
if next_bound <= current_bound:
return False
current_bound = next_bound | python |
#!/usr/bin/env python3
import os
import requests, shutil, socket
from datetime import datetime, timedelta
from time import sleep
from pathlib import Path
import localconfig
from camera import Camera
from common import fmt_bytes, get_data_from_pic_stem, get_score
class Task:
def is_due(self, dt0, dt1):
"""
Returns the importance with which the Task thinks that a picture
should be taken in the intervall dt0 < t <= dt1.
"""
return 0
class TaskEvery5Minutes(Task):
def is_due(self, dt0, dt1):
ts0 = dt0.timestamp()
ts1 = dt1.timestamp()
if (ts0 // 300) < (ts1 // 300):
return 4
return 0
class TaskEveryFullHour(Task):
def is_due(self, dt0, dt1):
ts0 = dt0.timestamp()
ts1 = dt1.timestamp()
if (ts0 // 3600) < (ts1 // 3600):
return 6
return 0
def gather_pictures(clean_dir, now):
pics = []
for pic_path in Path(clean_dir).iterdir():
pics.append(
(
pic_path,
pic_path.stat().st_size,
get_score(*get_data_from_pic_stem(pic_path.stem), now),
)
)
# Sort the pictures by score.
pics.sort(key=lambda pic: -pic[2])
return pics
class TaskCleanupDisk(Task):
def __init__(self, clean_dir):
self.clean_dir = clean_dir
self.next_clean_dt = datetime(2000, 1, 1)
def clean(self, now):
print("Cleaning up disk space ...")
GiB = pow(1024, 3)
MIN_PICS_TOTAL = 1*GiB
MAX_PICS_TOTAL = 20*GiB
MIN_DISK_FREE = 2*GiB
disk_total, disk_used, disk_free = shutil.disk_usage("/")
# print(f"disk_total {fmt_bytes(disk_total):>10}")
# print(f"disk_used {fmt_bytes(disk_used):>10}")
# print(f"disk_free {fmt_bytes(disk_free):>10} ({fmt_bytes(MIN_DISK_FREE)})")
btd_disk = max(MIN_DISK_FREE - disk_free, 0)
print(f"free disk space: {fmt_bytes(disk_free)}, want at least: {fmt_bytes(MIN_DISK_FREE)} --> delete {fmt_bytes(btd_disk)}")
pics = gather_pictures(self.clean_dir, now)
pics_total_bytes = sum(pic[1] for pic in pics)
btd_pics = max(pics_total_bytes - MAX_PICS_TOTAL, 0)
print(f"{len(pics)} pictures of size: {fmt_bytes(pics_total_bytes)}, want at most: {fmt_bytes(MAX_PICS_TOTAL)} --> delete {fmt_bytes(btd_pics)}")
pic_bytes_to_delete = max(btd_disk, btd_pics)
print(f"--> deleting {fmt_bytes(pic_bytes_to_delete)} ...")
while pics and pic_bytes_to_delete > 0 and pics_total_bytes > MIN_PICS_TOTAL:
pic = pics.pop()
print(f" deleting {pic[0]},{pic[1]:>8} bytes, score {pic[2]}")
pic[0].unlink()
# pic[0].unlink(missing_ok=True) # Python 3.8+
pic_bytes_to_delete -= pic[1]
pics_total_bytes -= pic[1]
print(f"{len(pics)} pictures of size: {fmt_bytes(pics_total_bytes)} left")
if pic_bytes_to_delete > 0:
print(f"{fmt_bytes(pic_bytes_to_delete)} not deleted, MIN_PICS_TOTAL is {fmt_bytes(MIN_PICS_TOTAL)}")
def is_due(self, dt0, dt1):
if dt0 < self.next_clean_dt <= dt1:
print("Running TaskCleanupDisk!", dt0, dt1)
self.clean(dt1)
if self.next_clean_dt <= dt1:
self.next_clean_dt = datetime(dt1.year, dt1.month, dt1.day, 3, 33, 33)
if self.next_clean_dt <= dt1:
self.next_clean_dt += timedelta(days=1)
print("Next disk cleanup is scheduled for", self.next_clean_dt)
return 0
def upload_picture(filename, upload_url):
if not upload_url:
return
print(f"Uploading {filename} ...")
with open("/sys/class/thermal/thermal_zone0/temp") as temp_file:
# https://www.elektronik-kompendium.de/sites/raspberry-pi/1911241.htm
# https://raspberrypi.stackexchange.com/questions/41784/temperature-differences-between-cpu-gpu
cpu_temp = temp_file.readline().strip()
with open(filename, 'rb') as pic_file:
try:
r = requests.post(
upload_url,
data={
'camera': localconfig.CAMERA_NAME,
'password': localconfig.CAMERA_UPLOAD_PASSWORD,
'cpu_temp': cpu_temp,
},
files={'pic_file': pic_file},
allow_redirects=False,
timeout=10.0,
)
print(f"Uploaded picture to {upload_url}")
if r.status_code != 302:
print(f"Unexpected response: Expected status_code 302, got status_code {r.status_code}.")
print(r)
print(r.text[:2000])
except requests.exceptions.Timeout as e:
print(f"Requests raised a timeout exception: {e}")
except requests.exceptions.RequestException as e:
print(f"Requests raised an exception: {e}")
PICTURES_DIR = '/var/HallCam/pictures/'
def run_camera():
if not os.access(PICTURES_DIR, os.W_OK):
print(f"Cannot access {PICTURES_DIR}.")
else:
print(f"Access to {PICTURES_DIR} is good.")
print(f"localconfig.UPLOAD_URL = {localconfig.UPLOAD_URL}")
print("")
camera = Camera()
camera.pi_cam.start_preview()
sleep(2)
tasks = [
TaskEvery5Minutes(),
TaskEveryFullHour(),
# TaskExposureMonitor(),
TaskCleanupDisk(PICTURES_DIR),
]
print("Entering main loop, press CTRL+C to exit.")
try:
prev_dt = datetime.now()
while True:
curr_dt = datetime.now()
importance = 0
for task in tasks:
importance = max(importance, task.is_due(prev_dt, curr_dt))
if importance > 0:
filename = camera.capture_picture(curr_dt, importance, PICTURES_DIR)
upload_picture(filename, localconfig.UPLOAD_URL)
prev_dt = curr_dt
sleep(1)
except KeyboardInterrupt:
print("\nExiting ...")
camera.pi_cam.stop_preview()
if __name__ == "__main__":
lock_socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
try:
# The null byte (\0) means the socket is created in the abstract
# namespace instead of being created on the file system itself.
# https://stackoverflow.com/questions/788411/check-to-see-if-python-script-is-running
lock_socket.bind('\0' + 'HallCam')
# We got the lock.
run_camera()
except socket.error:
print("HallCam is already running.")
| python |
"""Support for P2000 sensors."""
import datetime
import logging
import feedparser
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_ICON,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect, dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util as util
from homeassistant.util.location import distance
_LOGGER = logging.getLogger(__name__)
BASE_URL = "http://p2000.brandweer-berkel-enschot.nl/homeassistant/rss.asp"
DEFAULT_INTERVAL = datetime.timedelta(seconds=10)
DATA_UPDATED = "p2000_data_updated"
CONF_REGIOS = "regios"
CONF_DISCIPLINES = "disciplines"
CONF_CAPCODES = "capcodes"
CONF_ATTRIBUTION = "P2000 Livemonitor 2021 HomeAssistant"
CONF_NOLOCATION = "nolocation"
CONF_CONTAINS = "contains"
DEFAULT_NAME = "P2000"
DEFAULT_ICON = "mdi:ambulance"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_REGIOS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DISCIPLINES): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_RADIUS, 0): vol.Coerce(float),
vol.Optional(CONF_CAPCODES): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_NOLOCATION, default=False): cv.boolean,
vol.Optional(CONF_CONTAINS): cv.string,
vol.Optional(CONF_ICON, default=DEFAULT_ICON): cv.icon,
}
)
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the P2000 sensor."""
data = P2000Data(hass, config)
async_track_time_interval(hass, data.async_update, config[CONF_SCAN_INTERVAL])
async_add_devices(
[P2000Sensor(hass, data, config.get(CONF_NAME), config.get(CONF_ICON))], True
)
class P2000Data:
"""Handle P2000 object and limit updates."""
def __init__(self, hass, config):
"""Initialize the data object."""
self._hass = hass
self._lat = util.convert(config.get(CONF_LATITUDE, hass.config.latitude), float)
self._lon = util.convert(
config.get(CONF_LONGITUDE, hass.config.longitude), float
)
self._regios = config.get(CONF_REGIOS)
self._url = BASE_URL
self._nolocation = config.get(CONF_NOLOCATION)
self._radius = config.get(CONF_RADIUS)
self._capcodes = config.get(CONF_CAPCODES)
self._contains = config.get(CONF_CONTAINS)
self._disciplines = config.get(CONF_DISCIPLINES)
self._capcodelist = None
self._regiolist = None
self._disciplinelist = None
self._feed = None
self._restart = True
self._event_time = None
self._data = None
if self._capcodes:
self._capcodelist = self._capcodes.split(",")
if self._regios:
self._regiolist = self._regios.split(",")
if self._disciplines:
self._disciplinelist = self._disciplines.split(",")
@property
def latest_data(self):
"""Return the data object."""
return self._data
@staticmethod
def _convert_time(time):
try:
return datetime.datetime.strptime(
time.split(",")[1][:-6], " %d %b %Y %H:%M:%S"
)
except IndexError:
return None
async def async_update(self, dummy):
"""Update data."""
self._feed = await self._hass.async_add_executor_job(
feedparser.parse, self._url
)
if not self._feed:
_LOGGER.debug("Failed to get feed data from %s", self._url)
return
if self._feed.bozo:
_LOGGER.debug("Error parsing feed data from %s", self._url)
return
_LOGGER.debug("Feed url: %s data: %s", self._url, self._feed)
if self._restart:
self._restart = False
self._event_time = self._convert_time(self._feed.entries[0]["published"])
_LOGGER.debug("Start fresh after a restart")
return
try:
for entry in reversed(self._feed.entries):
event_msg = None
event_capcode = None
event_time = self._convert_time(entry.published)
if event_time < self._event_time:
continue
self._event_time = event_time
# Fill data from feed
event_msg = entry.message
event_regioname = entry.regname
event_regio = entry.regcode.lstrip("0")
event_discipline = entry.dienst
event_capcode = entry.code
_LOGGER.debug(
"New P2000 event found: %s, at %s", event_msg, entry.published
)
# Check regio
if "regcode" in entry:
if self._regiolist:
_LOGGER.debug("Filtering on Regio(s) %s", self._regiolist)
regiofound = False
for regio in self._regiolist:
_LOGGER.debug(
"Searching for regio %s in %s",
regio,
event_regio,
)
if event_regio == regio:
_LOGGER.debug("Regio matched")
regiofound = True
break
_LOGGER.debug("Regio mismatch, discarding")
continue
if not regiofound:
continue
# Check discipline
if "dienst" in entry:
if self._disciplines:
if self._disciplinelist:
_LOGGER.debug(
"Filtering on Disciplines(s) %s", self._disciplinelist
)
disciplinefound = False
for discipline in self._disciplinelist:
_LOGGER.debug(
"Searching for discipline %s in %s",
discipline,
event_discipline,
)
if event_discipline == discipline:
_LOGGER.debug("Discipline matched")
disciplinefound = True
break
_LOGGER.debug("Discipline mismatch, discarding")
continue
if not disciplinefound:
continue
# Check radius or nolocation
if "lat" in entry and entry.lat:
event_lat = float(entry.lat)
event_lon = float(entry.lon)
event_dist = distance(self._lat, self._lon, event_lat, event_lon)
event_dist = int(round(event_dist))
if self._radius:
_LOGGER.debug(
"Filtering on Radius %s, calculated distance %d m ",
self._radius,
event_dist,
)
if event_dist > self._radius:
event_msg = ""
_LOGGER.debug("Radius filter mismatch, discarding")
continue
_LOGGER.debug("Radius filter matched")
else:
event_lat = 0.0
event_lon = 0.0
event_dist = 0
if not self._nolocation:
_LOGGER.debug("No location found, discarding")
continue
# Check capcodes if defined
if "code" in entry:
if self._capcodelist:
_LOGGER.debug("Filtering on Capcode(s) %s", self._capcodelist)
capfound = False
for capcode in self._capcodelist:
_LOGGER.debug(
"Searching for capcode %s in %s",
capcode.strip(),
event_capcode,
)
if event_capcode == capcode.strip():
_LOGGER.debug("Capcode filter matched")
capfound = True
break
_LOGGER.debug("Capcode filter mismatch, discarding")
continue
if not capfound:
continue
if self._contains:
_LOGGER.debug("Filtering on Contains string %s", self._contains)
if event_msg.find(self._contains) != -1:
_LOGGER.debug("Contains string filter matched")
else:
_LOGGER.debug("Contains string filter mismatch, discarding")
continue
if event_msg:
event = {}
event["msgtext"] = event_msg
event["latitude"] = event_lat
event["longitude"] = event_lon
event["distance"] = event_dist
event["msgtime"] = event_time
event["capcode"] = event_capcode
event["regio"] = event_regio
event["regioname"] = event_regioname
event["discipline"] = event_discipline
_LOGGER.debug("Event: %s", event)
self._data = event
dispatcher_send(self._hass, DATA_UPDATED + CONF_NAME)
except ValueError as err:
_LOGGER.error("Error parsing feed data %s", err)
self._data = None
class P2000Sensor(RestoreEntity):
"""Representation of a P2000 Sensor."""
def __init__(self, hass, data, name, icon):
"""Initialize a P2000 sensor."""
self._hass = hass
self._data = data
self._name = name
self._icon = icon
self._state = None
self.attrs = {}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
data = self._data.latest_data
if data:
if data["discipline"] == "Ambulancediensten":
return "mdi:ambulance"
elif data["discipline"] == "Brandweerdiensten":
return "mdi:fire-truck"
elif data["discipline"] == "Politiediensten":
return "mdi:car-emergency"
elif data["discipline"] == "Gereserveerd":
return "mdi:car-emergency"
elif data["discipline"] == "Lifeliner":
return "mdi:helicopter"
return self._icon
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""Return the polling requirement for this sensor."""
return False
async def async_added_to_hass(self) -> None:
"""Handle entity which will be added."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
self.attrs = state.attributes
async_dispatcher_connect(
self._hass, DATA_UPDATED + CONF_NAME, self._schedule_immediate_update
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
data = self._data.latest_data
if data:
attrs[ATTR_LONGITUDE] = data["longitude"]
attrs[ATTR_LATITUDE] = data["latitude"]
attrs["distance"] = data["distance"]
attrs["capcode"] = data["capcode"]
attrs["regio"] = data["regio"]
attrs["regio name"] = data["regioname"]
attrs["discipline"] = data["discipline"]
attrs["time"] = data["msgtime"]
attrs[ATTR_ATTRIBUTION] = CONF_ATTRIBUTION
self.attrs = attrs
return self.attrs
def update(self):
"""Update current values."""
data = self._data.latest_data
if data:
self._state = data["msgtext"]
_LOGGER.debug("State updated to %s", self._state)
| python |
# used for testing
"""
get the coordinates of cards and marks
used to get the range for function mark_crd()
check whether the calculation is correct
change some parameters in "card.py" "anti.py" if necessary
Check line 40 and 41, annotate line 37 if you would like test the
image from your phone, if you only want to test local images,
annotate line 36 and set the directory of your image
"""
from init import *
def cards(sh):
global quick, arts, buster
threshold = 0.95
quick = get_crd(sh, 'res/quick.png', threshold)
arts = get_crd(sh, 'res/arts.png', threshold)
buster = get_crd(sh, 'res/buster.png', threshold)
all_cards = quick + arts + buster
all_cards.sort()
print("cards: ", all_cards)
print("quick:", quick)
print("arts: ", arts)
print("buster: ", buster)
def marks(sh,):
restraint = get_restraint(sh)
resistance = get_resistance(sh)
print("restraint: ", restraint)
print("resistance: ", resistance)
def test():
# sh = screenshot()
sh = 'test/t1.jpeg'
print('-------------')
cards(sh) # show coordinates of cards
print('-------------')
marks(sh) # show coordinates of marks
print('-------------')
print("result: ", init_main(sh)) # show the result of calculation
print('-------------')
test()
| python |
from torchctr.datasets.criteo import get_criteo
# step 1: download dataset
get_criteo('datasets')
# step 2: read data
| python |
'''
| Filename : util_lstm_seqlabel.py
| Description : Utility functions for the lstm_seqlabel.py file.
| Author : Pushpendre Rastogi
| Created : Mon Oct 26 20:01:22 2015 (-0400)
| Last-Updated: Wed Dec 16 03:49:16 2015 (-0500)
| By: Pushpendre Rastogi
| Update #: 44
'''
import collections
import contextlib
import numpy
import random
import rasengan
import re
import sys
import theano
import time
def set_seed(seed):
''' Set the seed in both numpy and random module
'''
numpy.random.seed(seed)
random.seed(seed)
def is_invalid(arr):
return any([f(arr).any() for f in [numpy.isinf, numpy.isnan, numpy.isneginf]])
def is_there_a_dominating_row(mat):
di = None
for i in range(mat.shape[0]):
if all(all(mat[i] > mat[j])
for j in range(mat.shape[0])
if i != j):
di = i
return di
def print_domination(arc_scores):
print('Dominating Row: ', is_there_a_dominating_row(arc_scores.squeeze()))
return
def convert_id_to_word(corpus, idx2label):
return [[idx2label[word] for word in sentence]
for sentence
in corpus]
def conv_x(x, window_size, vocsize):
x = list(x)
x = [vocsize] + x + [vocsize + 1]
cwords = contextwin(x, window_size)
words = numpy.ndarray((len(x), window_size)).astype('int32')
for i, win in enumerate(cwords):
words[i] = win
return words[1:-1]
def conv_y(y):
return y
def pprint_per_line(d, l):
''' Pretty print the entries in a dictionary/list based on the
indices / keys contained in the list.
Params
------
d : A dict or a list.
l : A list of keys or indexes
'''
for k in l:
print (k, d[k])
return
def shuffle(lol):
'''
shuffle inplace each list in the same order by ensuring that we
use the same state for every run of shuffle.
lol :: list of list as input
'''
state = random.getstate()
for l in lol:
random.setstate(state)
random.shuffle(l)
def contextwin(l, win):
'''
win :: int corresponding to the size of the window
given a list of indexes composing a sentence
l :: array containing the word indexes
it will return a list of list of indexes corresponding
to context windows surrounding each word in the sentence
'''
assert (win % 2) == 1
assert win >= 1
l = list(l)
lpadded = win // 2 * [-1] + l + win // 2 * [-1]
out = [lpadded[i:(i + win)] for i in range(len(l))]
assert len(out) == len(l)
return out
def np_floatX(data):
return numpy.asarray(data, dtype=theano.config.floatX)
def get_shuffling_index_sorted_by_length(X, seed=None, shuffle=True):
'''
together. Shuffle them by default but allow for not shuffling as well.
Params
------
X : X is a list of sequences.
seed : (default 10)
Returns
-------
Return a list of tuples where each tuple contains (length:l, list of indices:lst)
such that if the sequence X was sorted corresponding to lst then all the length l
elements from original X would come together.
'''
dd = collections.defaultdict(list)
for i, x in enumerate(X):
dd[len(x)].append(i)
if shuffle:
for k in dd:
with rasengan.reseed_ctm(seed):
random.shuffle(dd[k])
shuffled_idx = [(k, dd[k]) for k in sorted(dd.keys())]
return shuffled_idx
def print_progress(percentage_complete, tic, epoch_id=None, carriage_return=True):
'''
Params
------
epoch_id : The current Epoch
percentage_complete :
tic :
Returns
-------
'''
eol = '\r' if carriage_return else '\n'
print ('[Testing] >> %2.2f%%' % (percentage_complete)
if epoch_id is None
else
'[learning] epoch %i >> %2.2f%%' % (epoch_id, percentage_complete)),
print('completed in %.2f (sec) <<%s' % (time.time() - tic, eol),)
sys.stdout.flush()
return
def duplicate_middle_word(words):
d = words.ndim
assert d == 3
return numpy.concatenate(
(words, words), axis=d-2)
def duplicate_label(labels):
d = labels.ndim
assert d == 2
return numpy.concatenate(
(labels, labels), axis=d-1)
def deduplicate_label(labels):
d = labels.ndim
assert d == 2 and labels.shape[d-1] == 2
return labels[:, :1]
def remove_int_at_end(s):
try:
return re.match('(.*)_\d+', s).group(1)
except AttributeError:
return s
@contextlib.contextmanager
def config_overide(msg, args):
assert ' ' not in msg
args.folder = args.folder + '_' + msg
rasengan.warn('NOTE: I set args.folder to ' + args.folder)
yield
pass
| python |
"""Implementation of Eiger Meta Writer
This module is a subclass of the odin_data MetaWriter and handles Eiger specific meta messages, writing them to disk.
Matt Taylor, Diamond Light Source
"""
import numpy as np
import time
import re
import ast
from odin_data.meta_writer.meta_writer import MetaWriter
import _version as versioneer
MAJOR_VER_REGEX = r"^([0-9]+)[\\.-].*|$"
MINOR_VER_REGEX = r"^[0-9]+[\\.-]([0-9]+).*|$"
PATCH_VER_REGEX = r"^[0-9]+[\\.-][0-9]+[\\.-]([0-9]+).|$"
class EigerMetaWriter(MetaWriter):
"""Eiger Meta Writer class.
Eiger Detector Meta Writer writes Eiger meta data to disk
"""
def __init__(self, logger, directory, acquisitionID):
"""Initalise the EigerMetaWriter object.
:param logger: Logger to use
:param directory: Directory to create the meta file in
:param acquisitionID: Acquisition ID of this acquisition
"""
super(EigerMetaWriter, self).__init__(logger, directory, acquisitionID)
self.add_dataset_definition("start_time", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("stop_time", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("real_time", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("frame", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("size", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("hash", (0,), maxshape=(None,), dtype='S32', fillvalue=None)
self.add_dataset_definition("encoding", (0,), maxshape=(None,), dtype='S10', fillvalue=None)
self.add_dataset_definition("datatype", (0,), maxshape=(None,), dtype='S6', fillvalue=None)
self.add_dataset_definition("frame_series", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("frame_written", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self.add_dataset_definition("offset_written", (0,), maxshape=(None,), dtype='int64', fillvalue=-1)
self._num_frame_offsets_written = 0
self._current_frame_count = 0
self._need_to_write_data = False
self._arrays_created = False
self._close_after_write = False
self._frame_offset_dict = {}
self._frame_data_dict = {}
self._series_created = False
self._config_created = False
self._flatfield_created = False
self._pixel_mask_created = False
self._countrate_created = False
self._global_appendix_created = False
self.start_new_acquisition()
@staticmethod
def get_version():
version = versioneer.get_versions()["version"]
major_version = re.findall(MAJOR_VER_REGEX, version)[0]
minor_version = re.findall(MINOR_VER_REGEX, version)[0]
patch_version = re.findall(PATCH_VER_REGEX, version)[0]
short_version = major_version + "." + minor_version + "." + patch_version
version_dict = {}
version_dict["full"] = version
version_dict["major"] = major_version
version_dict["minor"] = minor_version
version_dict["patch"] = patch_version
version_dict["short"] = short_version
return version_dict
def start_new_acquisition(self):
"""Performs actions needed when the acquisition is started."""
self._frame_offset_dict.clear()
self._frame_data_dict.clear()
self._series_created = False
self._config_created = False
self._flatfield_created = False
self._pixel_mask_created = False
self._countrate_created = False
self._global_appendix_created = False
return
def handle_global_header_none(self, message):
"""Handle global header message with details flag set to None.
:param message: The message received
"""
self._logger.debug('Handling global header none for acqID ' + self._acquisition_id)
self._logger.debug(message)
if self._series_created:
self._logger.debug('series already created')
return
if not self.file_created:
self.create_file()
npa = np.array(message['series'])
self.create_dataset_with_data("series", data=npa)
self._series_created = True
return
def handle_global_header_config(self, header, config):
"""Handle global header config part message containing config data.
:param header: The header received
:param config: The config data
"""
self._logger.debug('Handling global header cfg for acqID ' + self._acquisition_id)
self._logger.debug(header)
self._logger.debug(config)
if not self.file_created:
self.create_file()
if self._config_created:
self._logger.debug('config already created')
else:
nps = np.str(config)
config_data = ast.literal_eval(np.str(config).decode("utf-8"))
self.create_dataset_with_data("config", data=nps)
for k in sorted(config_data):
self.create_dataset_with_data("_dectris/%s" %k, config_data[k])
self._config_created = True
if self._series_created:
self._logger.debug('series already created')
else:
npa = np.array(header['series'])
self.create_dataset_with_data("series", data=npa)
self._series_created = True
return
def handle_flatfield_header(self, header, flatfield):
"""Handle global header flatfield part message containing flatfield data.
:param header: The header received
:param flatfield: The flatfield data
"""
self._logger.debug('Handling flatfield header for acqID ' + self._acquisition_id)
self._logger.debug(header)
if self._flatfield_created:
self._logger.debug('flatfield already created')
return
if not self.file_created:
self.create_file()
self._flatfield_created = True
npa = np.frombuffer(flatfield, dtype=np.float32)
shape = header['shape']
self.create_dataset_with_data("flatfield", data=npa, shape=(shape[1], shape[0]))
return
def handle_mask_header(self, header, mask):
"""Handle global header pixel mask part message containing pixel mask data.
:param header: The header received
:param mask: The pixel mask data
"""
self._logger.debug('Handling mask header for acqID ' + self._acquisition_id)
self._logger.debug(header)
if self._pixel_mask_created:
self._logger.debug('pixel mask already created')
return
if not self.file_created:
self.create_file()
self._pixel_mask_created = True
npa = np.frombuffer(mask, dtype=np.uint32)
shape = header['shape']
self.create_dataset_with_data("mask", data=npa, shape=(shape[1], shape[0]))
return
def handle_countrate_header(self, header, countrate):
"""Handle global header count rate part message containing count rate data.
:param header: The header received
:param countrate: The count rate data
"""
self._logger.debug('Handling countrate header for acqID ' + self._acquisition_id)
self._logger.debug(header)
if self._countrate_created:
self._logger.debug('countrate already created')
return
if not self.file_created:
self.create_file()
self._countrate_created = True
npa = np.frombuffer(countrate, dtype=np.float32)
shape = header['shape']
self.create_dataset_with_data("countrate", data=npa, shape=(shape[1], shape[0]))
return
def handle_global_header_appendix(self, appendix):
"""Handle global header appendix part message.
:param appendix: The appendix data
"""
self._logger.debug('Handling global header appendix for acqID ' + self._acquisition_id)
if self._global_appendix_created:
self._logger.debug('global appendix already created')
return
if not self.file_created:
self.create_file()
self._global_appendix_created = True
nps = np.str(appendix)
self.create_dataset_with_data("globalAppendix", data=nps)
return
def handle_data(self, header):
"""Handle meta data message.
:param header: The header
"""
frame_id = header['frame']
# Check if we know the offset to write to yet, if so write the frame, if not store the data until we do know.
if self._frame_offset_dict.has_key(frame_id) == True:
self.write_frame_data(self._frame_offset_dict[frame_id], header)
del self._frame_offset_dict[frame_id]
if self._close_after_write:
self.close_file()
else:
self._frame_data_dict[frame_id] = header
return
def handle_image_appendix(self, header, appendix):
"""Handle meta data message appendix message part.
:param header: The header
:param appendix: The appendix data
"""
self._logger.debug('Handling image appendix for acqID ' + self._acquisition_id)
self._logger.debug(header)
self._logger.debug(appendix)
# Do nothing as can't write variable length dataset in swmr
return
def handle_end(self, message):
"""Handle end of series message.
:param message: The message
"""
self._logger.debug('Handling end for acqID ' + self._acquisition_id)
self._logger.debug(message)
# Do nothing with end message
return
def handle_frame_writer_start_acquisition(self, userHeader):
"""Handle frame writer plugin start acquisition message.
:param userHeader: The header
"""
self._logger.debug('Handling frame writer start acquisition for acqID ' + self._acquisition_id)
self._logger.debug(userHeader)
self.number_processes_running = self.number_processes_running + 1
if not self.file_created:
self.create_file()
if self._num_frames_to_write == -1:
self._num_frames_to_write = userHeader['totalFrames']
self.create_arrays()
return
def handle_frame_writer_create_file(self, userHeader, fileName):
"""Handle frame writer plugin create file message.
:param userHeader: The header
:param fileName: The file name
"""
self._logger.debug('Handling frame writer create file for acqID ' + self._acquisition_id)
self._logger.debug(userHeader)
self._logger.debug(fileName)
return
def handle_frame_writer_write_frame(self, message):
"""Handle frame writer plugin write frame message.
:param message: The message
"""
frame_number = message['frame']
offset_value = message['offset']
if not self._arrays_created:
self._logger.error('Arrays not created, cannot handle frame writer data')
return
offset_to_write_to = offset_value
if self._num_frame_offsets_written + 1 > self._num_frames_to_write:
self._data_set_arrays["frame_written"] = np.resize(self._data_set_arrays["frame_written"],
(self._num_frame_offsets_written + 1,))
self._data_set_arrays["offset_written"] = np.resize(self._data_set_arrays["offset_written"],
(self._num_frame_offsets_written + 1,))
self._data_set_arrays["frame_written"][self._num_frame_offsets_written] = frame_number
self._data_set_arrays["offset_written"][self._num_frame_offsets_written] = offset_to_write_to
self._num_frame_offsets_written = self._num_frame_offsets_written + 1
# Check if we have the data and/or appendix for this frame yet. If so, write it in the offset given
if self._frame_data_dict.has_key(frame_number):
self.write_frame_data(offset_to_write_to, self._frame_data_dict[frame_number])
del self._frame_data_dict[frame_number]
else:
self._frame_offset_dict[frame_number] = offset_to_write_to
return
def create_arrays(self):
"""Create the data set arrays for all of the Eiger meta datasets."""
self._data_set_arrays["start_time"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["stop_time"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["real_time"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["frame"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["size"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["hash"] = np.empty(self._num_frames_to_write, dtype='S32')
self._data_set_arrays["encoding"] = np.empty(self._num_frames_to_write, dtype='S10')
self._data_set_arrays["datatype"] = np.empty(self._num_frames_to_write, dtype='S6')
self._data_set_arrays["frame_series"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["frame_written"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._data_set_arrays["offset_written"] = np.negative(np.ones((self._num_frames_to_write,), dtype=np.int64))
self._hdf5_datasets["start_time"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["stop_time"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["real_time"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["frame"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["size"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["hash"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["encoding"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["datatype"].resize(self._num_frames_to_write, axis=0)
self._hdf5_datasets["frame_series"].resize(self._num_frames_to_write, axis=0)
self._hdf5_file.swmr_mode = True
self._arrays_created = True
def handle_frame_writer_close_file(self):
"""Handle frame writer plugin close file message."""
self._logger.debug('Handling frame writer close file for acqID ' + self._acquisition_id)
# Do nothing
return
def close_file(self):
"""Close the file."""
if len(self._frame_offset_dict) > 0:
# Writers have finished but we haven't got all associated meta. Wait till it comes before closing
self._logger.info('Unable to close file as Frame Offset Dict Length = ' + str(len(self._frame_offset_dict)))
self._close_after_write = True
return
self.write_datasets()
if self._hdf5_file is not None:
self._logger.info('Closing file ' + self.full_file_name)
self._hdf5_file.close()
self._logger.info('Meta frames written: ' + str(self._current_frame_count) + ' of ' + str(self._num_frames_to_write))
self._hdf5_file = None
self.finished = True
def handle_frame_writer_stop_acquisition(self, userheader):
"""Handle frame writer plugin stop acquisition message.
:param userheader: The user header
"""
self._logger.debug('Handling frame writer stop acquisition for acqID ' + self._acquisition_id)
self._logger.debug(userheader)
if self.number_processes_running > 0:
self.number_processes_running = self.number_processes_running - 1
if self.number_processes_running == 0:
self._logger.info('Last processor ended for acqID ' + str(self._acquisition_id))
if self._current_frame_count >= self._num_frames_to_write:
self.close_file()
else:
self._logger.info(
'Not closing file as not all frames written (' + str(self.write_count) + ' of ' + str(
self._num_frames_to_write) + ')')
else:
self._logger.info('Processor ended, but not the last for acqID ' + str(self._acquisition_id))
return
def write_frame_data(self, offset, header):
"""Write the frame data to the arrays and flush if necessary.
:param offset: The offset to write to in the arrays
:param header: The data header
"""
if not self._arrays_created:
self._logger.error('Arrays not created, cannot write frame data')
return
if offset + 1 > self._current_frame_count:
self._current_frame_count = offset + 1
self._data_set_arrays["start_time"][offset] = header['start_time']
self._data_set_arrays["stop_time"][offset] = header['stop_time']
self._data_set_arrays["real_time"][offset] = header['real_time']
self._data_set_arrays["frame"][offset] = header['frame']
self._data_set_arrays["size"][offset] = header['size']
self._data_set_arrays["hash"][offset] = header['hash']
self._data_set_arrays["encoding"][offset] = header['encoding']
self._data_set_arrays["datatype"][offset] = header['type']
self._data_set_arrays["frame_series"][offset] = header['series']
self.write_count = self.write_count + 1
self._need_to_write_data = True
flush = False
if self.flush_timeout is not None:
if (time.time() - self._last_flushed) >= self.flush_timeout:
flush = True
elif (self.write_count % self.flush_frequency) == 0:
flush = True
if flush:
self.write_datasets()
# Reset timeout count to 0
self.write_timeout_count = 0
return
def write_datasets(self):
"""Write the datasets to the hdf5 file."""
if not self._arrays_created:
self._logger.warn('Arrays not created, cannot write datasets from frame data')
return
if self._need_to_write_data:
self._logger.info('Writing data to datasets at write count ' + str(self.write_count) + ' for acqID ' + str(self._acquisition_id))
self._hdf5_datasets["start_time"][0:self._num_frames_to_write] = self._data_set_arrays["start_time"]
self._hdf5_datasets["stop_time"][0:self._num_frames_to_write] = self._data_set_arrays["stop_time"]
self._hdf5_datasets["real_time"][0:self._num_frames_to_write] = self._data_set_arrays["real_time"]
self._hdf5_datasets["frame"][0:self._num_frames_to_write] = self._data_set_arrays["frame"]
self._hdf5_datasets["size"][0:self._num_frames_to_write] = self._data_set_arrays["size"]
self._hdf5_datasets["hash"][0:self._num_frames_to_write] = self._data_set_arrays["hash"]
self._hdf5_datasets["encoding"][0:self._num_frames_to_write] = self._data_set_arrays["encoding"]
self._hdf5_datasets["datatype"][0:self._num_frames_to_write] = self._data_set_arrays["datatype"]
self._hdf5_datasets["frame_series"][0:self._num_frames_to_write] = self._data_set_arrays["frame_series"]
self._hdf5_datasets["frame_written"].resize(self._num_frame_offsets_written, axis=0)
self._hdf5_datasets["frame_written"][0:self._num_frame_offsets_written] = self._data_set_arrays["frame_written"][0:self._num_frame_offsets_written]
self._hdf5_datasets["offset_written"].resize(self._num_frame_offsets_written, axis=0)
self._hdf5_datasets["offset_written"][0:self._num_frame_offsets_written] = self._data_set_arrays["offset_written"][0:self._num_frame_offsets_written]
self._hdf5_datasets["start_time"].flush()
self._hdf5_datasets["stop_time"].flush()
self._hdf5_datasets["real_time"].flush()
self._hdf5_datasets["frame"].flush()
self._hdf5_datasets["size"].flush()
self._hdf5_datasets["hash"].flush()
self._hdf5_datasets["encoding"].flush()
self._hdf5_datasets["datatype"].flush()
self._hdf5_datasets["frame_series"].flush()
self._hdf5_datasets["frame_written"].flush()
self._hdf5_datasets["offset_written"].flush()
self._last_flushed = time.time()
self._need_to_write_data = False
def stop(self):
"""Stop this acquisition."""
self._frame_offset_dict.clear()
self.close_file()
def process_message(self, message, userheader, receiver):
"""Process a meta message.
:param message: The message
:param userheader: The user header
:param receiver: The ZeroMQ socket the data was received on
"""
self._logger.debug('Eiger Meta Writer Handling message')
if message['parameter'] == "eiger-globalnone":
receiver.recv_json()
self.handle_global_header_none(message)
elif message['parameter'] == "eiger-globalconfig":
config = receiver.recv_json()
self.handle_global_header_config(userheader, config)
elif message['parameter'] == "eiger-globalflatfield":
flatfield = receiver.recv()
self.handle_flatfield_header(userheader, flatfield)
elif message['parameter'] == "eiger-globalmask":
mask = receiver.recv()
self.handle_mask_header(userheader, mask)
elif message['parameter'] == "eiger-globalcountrate":
countrate = receiver.recv()
self.handle_countrate_header(userheader, countrate)
elif message['parameter'] == "eiger-headerappendix":
appendix = receiver.recv()
self.handle_global_header_appendix(appendix)
elif message['parameter'] == "eiger-imagedata":
imageMetaData = receiver.recv_json()
self.handle_data(imageMetaData)
elif message['parameter'] == "eiger-imageappendix":
appendix = receiver.recv()
self.handle_image_appendix(userheader, appendix)
elif message['parameter'] == "eiger-end":
receiver.recv()
self.handle_end(message)
elif message['parameter'] == "createfile":
fileName = receiver.recv()
self.handle_frame_writer_create_file(userheader, fileName)
elif message['parameter'] == "closefile":
receiver.recv()
self.handle_frame_writer_close_file()
elif message['parameter'] == "startacquisition":
receiver.recv()
self.handle_frame_writer_start_acquisition(userheader)
elif message['parameter'] == "stopacquisition":
receiver.recv()
self.handle_frame_writer_stop_acquisition(userheader)
elif message['parameter'] == "writeframe":
value = receiver.recv_json()
self.handle_frame_writer_write_frame(value)
else:
self._logger.error('unknown parameter: ' + str(message))
value = receiver.recv()
self._logger.error('value: ' + str(value))
return
| python |
import os
def join_muspath(name: str):
return os.path.join("assets", "audio", "music", name)
menu1 = join_muspath("menu1.ogg")
menu2 = join_muspath("menu2.ogg")
piano1 = join_muspath("piano1.ogg")
MENU = [menu1, menu2, piano1]
| python |
from unittest import TestCase
from . import db_conn, es_conn, APP_DIR, DATABASE_URL
from .queries import CREATE_TEST_TABLE, DROP_TEST_TABLE
from .writer import Writer
from .scanner import Scanner
import os
import subprocess
import time
from importlib import import_module
from click.testing import CliRunner
from .command import *
class ESWrapTest(TestCase):
conn = db_conn(DATABASE_URL)
es = es_conn()
def setUp(self):
with self.conn.cursor() as c:
c.execute(CREATE_TEST_TABLE)
def tearDown(self):
with self.conn.cursor() as c:
c.execute(DROP_TEST_TABLE)
filename = "foo_table_es_mapping.py"
files = os.listdir('{}/{}'.format(os.getcwd(), APP_DIR))
if filename in files:
cmd = "rm {}/{}/{}".format(os.getcwd(), APP_DIR, filename)
subprocess.run([cmd], shell=True, check=True)
# Add method for clearing indices.
if self.es.indices.exists(index=['test_index']):
self.es.indices.delete(index=['test_index'])
def test_writer(self):
w = Writer(DATABASE_URL)
w.write_mapping('foo_table', 'foo_document')
files = os.listdir('{}/{}'.format(os.getcwd(), APP_DIR))
self.assertIn('foo_table_es_mapping.py', files)
module = import_module('{}.foo_table_es_mapping'.format(APP_DIR))
self.assertTrue(hasattr(module, 'foo_table_mapping'))
mapping = getattr(module, 'foo_table_mapping')
expected = {
'mappings': {
'foo_document': {
'properties': {
'foo': {'type': 'string'},
'id': {'type': 'integer'},
'bar': {'type': 'string'}
}
}
}
}
self.assertEqual(mapping, expected)
def test_scanner(self):
s = Scanner(DATABASE_URL)
props = s.build_props('foo_table')
self.assertEqual(props['id'], dict(type='integer'))
self.assertEqual(props['foo'], dict(type='string'))
self.assertEqual(props['bar'], dict(type='string'))
def test_create_index(self):
w = Writer(DATABASE_URL)
w.write_mapping('foo_table', 'foo_document')
module = import_module('{}.foo_table_es_mapping'.format(APP_DIR))
mapping = getattr(module, 'foo_table_mapping')
self.es.indices.create(index='test_index', body=mapping)
time.sleep(2)
self.assertTrue(self.es.indices.exists(index=['test_index']))
actual_mapping = self.es.indices.get_mapping(
index=['test_index'], doc_type=['foo_document'])
exp_mapping = {
'mappings': {
'foo_document': {
'properties': {
'foo': {'type': 'string'},
'id': {'type': 'integer'},
'bar': {'type': 'string'}
}
}
}
}
self.assertEqual(actual_mapping['test_index'], exp_mapping)
def test_click_app(self):
runner = CliRunner()
result = runner.invoke(cli, [
'--database-url={}'.format(DATABASE_URL),
'--file-path={}/{}'.format(os.getcwd(), APP_DIR),
'--table-name=foo_table',
'--document-type=foo_document'])
self.assertEqual(result.output, 'Writing map.\n')
self.assertEqual(result.exit_code, 0)
def test_click_badparam(self):
runner = CliRunner()
result = runner.invoke(cli, [])
self.assertTrue('The --database-url cannot be blank' in result.output)
| python |
"""
********************************************************************************
pyconmech
********************************************************************************
.. currentmodule:: pyconmech
This library provides python wrappers for efficient evaluation of construction mechanics.
.. toctree::
:maxdepth: 3
pyconmech.frame_analysis
pyconmech.database
"""
from __future__ import print_function
import os
import sys
import decimal
from .frame_analysis import StiffnessChecker
from .__version__ import __author__, __author_email__, __copyright__, __description__, __license__, __title__, __url__, __version__
__all__ = [
'__author__', '__author_email__', '__copyright__', '__description__',
'__license__', '__title__', '__url__', '__version__',
'raise_if_windows',
'raise_if_not_windows',
'raise_if_ironpython',
'raise_if_not_ironpython',
]
def is_windows():
"""Check if the operating system is Windows.
Returns
-------
bool
True if the OS is Windows. False otherwise
"""
return os.name == 'nt'
WINDOWS = is_windows()
def is_ironpython():
"""Check if the Python implementation is IronPython.
Returns
-------
bool
True if the implementation is IronPython. False otherwise
"""
return 'ironpython' in sys.version.lower()
IPY = is_ironpython()
def raise_if_not_windows():
if not WINDOWS:
raise
def raise_if_windows():
if WINDOWS:
raise
def raise_if_not_ironpython():
if not IPY:
raise
def raise_if_ironpython():
if IPY:
raise | python |
from src.utils.general import pd_utils
from datetime import date
import logging
import pandas as pd
pd.set_option('display.width', None)
class LCReviewer:
"""
LC Reviewer help review LC
"""
def __init__(self):
self.df = pd_utils.pd_read_csv('../../data/files/lc_record.csv')
self.review_df = pd_utils.pd_read_csv('../../data/files/lc_review.csv')
today_datestr = date.today().strftime("%Y-%m-%d")
if self.review_df.iloc[-1]['datestr'] != today_datestr:
new_row = {'row_num': self.review_df.iloc[-1]['row_num'], 'datestr': date.today().strftime("%Y-%m-%d")}
self.review_df = self.review_df.append(new_row, ignore_index=True)
self.cur_row = self.review_df.iloc[-1]['row_num']
self.today_num = 0
self.save_df()
def save_df(self):
pd_utils.pd_write_csv(self.review_df, '../../data/files/lc_review.csv')
def next(self):
self.cur_row = self.review_df.iloc[-1]['row_num']
if self.cur_row >= len(self.df):
print("Good job! All LC review done.")
return
self.save()
row = self.df.iloc[self.cur_row]
print("\nNext Question:\nNum:\t{}\nURL:\t{}\nDate:\t{}\nToday:\t{}\n".format(
row['lc_num'], row['lc_url'], row['date'], self.today_num))
def save(self):
self.cur_row += 1
self.review_df.iloc[-1, self.review_df.columns.get_loc('row_num')] = self.cur_row
self.today_num = self.cur_row - self.review_df.iloc[-2]['row_num']
self.save_df()
if __name__ == "__main__":
lc_reviewer = LCReviewer()
print("Start review")
while True:
lc_reviewer.next()
receive = input("input: ")
if receive == "exit":
break
| python |
from localground.apps.site.api import serializers
from localground.apps.site import models
from localground.apps.site.api.views.abstract_views import \
MediaList, MediaInstance
class MapImageList(MediaList):
ext_whitelist = ['jpg', 'jpeg', 'gif', 'png']
serializer_class = serializers.MapImageSerializerCreate
model = models.MapImage
class MapImageInstance(MediaInstance):
serializer_class = serializers.MapImageSerializerUpdate
model = models.MapImage
| python |
import pytest
from rasa.shared.nlu.training_data import util
from rasa.nlu.config import RasaNLUModelConfig
import rasa.shared.nlu.training_data.loading
from rasa.nlu.train import Trainer, Interpreter
from rasa.utils.tensorflow.constants import (
EPOCHS,
MASKED_LM,
NUM_TRANSFORMER_LAYERS,
TRANSFORMER_SIZE,
)
from rasa.nlu.selectors.response_selector import ResponseSelector
@pytest.mark.parametrize(
"pipeline",
[
[
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{"name": "ResponseSelector", EPOCHS: 1},
],
[
{"name": "WhitespaceTokenizer"},
{"name": "CountVectorsFeaturizer"},
{
"name": "ResponseSelector",
EPOCHS: 1,
MASKED_LM: True,
TRANSFORMER_SIZE: 256,
NUM_TRANSFORMER_LAYERS: 1,
},
],
],
)
def test_train_selector(pipeline, component_builder, tmpdir):
# use data that include some responses
training_data = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.md"
)
training_data_responses = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa-responses.md"
)
training_data = training_data.merge(training_data_responses)
nlu_config = RasaNLUModelConfig({"language": "en", "pipeline": pipeline})
trainer = Trainer(nlu_config)
trainer.train(training_data)
persisted_path = trainer.persist(tmpdir)
assert trainer.pipeline
loaded = Interpreter.load(persisted_path, component_builder)
parsed = loaded.parse("hello")
assert loaded.pipeline
assert parsed is not None
assert (parsed.get("response_selector").get("all_retrieval_intents")) == [
"chitchat"
]
assert (
parsed.get("response_selector")
.get("default")
.get("response")
.get("intent_response_key")
) is not None
assert (
parsed.get("response_selector")
.get("default")
.get("response")
.get("template_name")
) is not None
assert (
parsed.get("response_selector")
.get("default")
.get("response")
.get("response_templates")
) is not None
ranking = parsed.get("response_selector").get("default").get("ranking")
assert ranking is not None
for rank in ranking:
assert rank.get("confidence") is not None
assert rank.get("intent_response_key") is not None
@pytest.mark.parametrize(
"use_text_as_label, label_values",
[
[False, ["chitchat/ask_name", "chitchat/ask_weather"]],
[True, ["I am Mr. Bot", "It's sunny where I live"]],
],
)
def test_ground_truth_for_training(use_text_as_label, label_values):
# use data that include some responses
training_data = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.md"
)
training_data_responses = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa-responses.md"
)
training_data = training_data.merge(training_data_responses)
response_selector = ResponseSelector(
component_config={"use_text_as_label": use_text_as_label}
)
response_selector.preprocess_train_data(training_data)
assert response_selector.responses == training_data.responses
assert (
sorted(list(response_selector.index_label_id_mapping.values())) == label_values
)
@pytest.mark.parametrize(
"predicted_label, train_on_text, resolved_intent_response_key",
[
["chitchat/ask_name", False, "chitchat/ask_name"],
["It's sunny where I live", True, "chitchat/ask_weather"],
],
)
def test_resolve_intent_response_key_from_label(
predicted_label, train_on_text, resolved_intent_response_key
):
# use data that include some responses
training_data = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa.md"
)
training_data_responses = rasa.shared.nlu.training_data.loading.load_data(
"data/examples/rasa/demo-rasa-responses.md"
)
training_data = training_data.merge(training_data_responses)
response_selector = ResponseSelector(
component_config={"use_text_as_label": train_on_text}
)
response_selector.preprocess_train_data(training_data)
label_intent_response_key = response_selector._resolve_intent_response_key(
{"id": hash(predicted_label), "name": predicted_label}
)
assert resolved_intent_response_key == label_intent_response_key
assert (
response_selector.responses[
util.intent_response_key_to_template_key(label_intent_response_key)
]
== training_data.responses[
util.intent_response_key_to_template_key(resolved_intent_response_key)
]
)
| python |
"""Define _TSP Models
Time Series Predictions (TSPs) are attempt to predict what will happen based
on what has happened before. While there are a plethora of ways to do this,
the teaspoon module foucsses on using the last few observations to predict
the next and mechanisms to combine several of these predictions.
"""
import multiprocessing
import pandas as pd
import numpy as np
def ts_to_labels(_ts, _n, col=None):
"""Convert a time series iterable into a set of features and labels ready
for training.
Args:
_ts (array-like): time series to be used for training.
_n (int): number of step features for each label.
col (any): column identifier for dataframe time series, in case only
a subsection of it will be used for training.
"""
_ts = _ts if isinstance(_ts, pd.DataFrame) \
else pd.DataFrame(_ts, columns=["x"])
_x, _y = list(), list()
_ts.rolling(_n+1).apply(append_window,
args=(_x, _y, _n),
kwargs={"col": col})
return np.array(_x), np.array(_y)
def append_window(_w, _x, _y, _n, col=None):
"""Helper function to append the features and labels from a time series
rolling window into a feature and label array.
Args:
_w (pd.DataFrame or pd.Series): time series data window element of
the .rolling(_n+1) method.
_x (list): feature list to append features to.
_y (list): feature list to append features to.
_n (int): number of step features for each label.
col (any): column identifier for dataframe time series, in case only
a subsection of it will be used for training.
"""
_x.append(np.array(_w.iloc[:_n]))
_y.append(np.array(_w.iloc[_n]) if col is None
else np.array(_w.iloc[_n][col]))
return 1
class SimpleModelWrapper:
"""Wrapper object used to "translate" a model's core functionaliy into
one that can be used in _TSP instances.
This wrapper by default simply calls an alternative function as specifed
upon initialization, with assumed positional arguments.
This class can be inheritted to incorporate more complex mechanics of
whichever model is being used.
Attributes:
_model (any): model with fit and predict capabilities.
_fit_attr (str): model attribute used for fitting.
_predict_attr (str): model attribute used for predicting values.
"""
def __init__(self, model, fit_attr="fit", predict_attr="predict"):
"""Initialize object instance.
Args:
model (any): model with fit and predict capabilities.
fit_attr (str), default "fit": model attribute used for fitting.
predict_attr (str), default "predict": model attribute used for
predicting values.
Raise:
TypeError: if fit_attr or predict_attr are not strings.
"""
self._model = model
if not isinstance(fit_attr, str):
raise TypeError(f"fit_attr parameter must be {str}, \
not {type(fit_attr)}")
self._fit_attr = fit_attr
if not isinstance(predict_attr, str):
raise TypeError(f"predict_attr parameter must be {str}, \
not {type(predict_attr)}")
self._predict_attr = predict_attr
def fit(self, features, labels, *args, **kwargs):
"""Fit model(s)
Args:
features (array or matrix like): features used for fitting
labels (array or matrix like): labels used for fitting
*args, **kwargs: arguments used for fitting
"""
return self._model.__getattribute__(self._fit_attr)(
features,
labels,
*args,
**kwargs
)
def predict(self, features, *args, **kwargs):
"""Predict value(s)
Args:
features (array or matrix like): features used for fitting
*args, **kwargs: arguments used for fitting
"""
return self._model.__getattribute__(self._predict_attr)(
features,
*args,
**kwargs
)
class _TSP:
"""Abstract Time Series Prediction class.
Attributes:
_model (any with "fit" and "predict" parameter): model that takes in
past steps and predicts future ones.
"""
def fit(self, _ts, *args, **kwargs):
"""Fit model from data.
Args:
_ts (array-like): time series data used to fit the model.
"""
raise NotImplementedError()
def predict(self, _ts, *args, start=None, horizon=1, **kwargs):
"""Predict future steps from past ones.
Args:
_ts (array-like): time series to get past steps from.
start (any), Optional, None by default: first step to predict
from.
horizon (int), 1 by default: how many steps ahead to predict.
"""
raise NotImplementedError()
@property
def model(self):
"""Define model fetching mechanism to ensure model must be set to
be accessed
Raises:
AttributeError: if model is not set.
"""
if self._model is None:
raise AttributeError("model attribute is not set")
return self._model
@model.setter
def model(self, new_model):
"""Define model setting mechanism to ensure model can be fit and
used for prediction.
Raises:
AttributeError: if model does not have a "fit" or "predict"
parameter.
"""
if not hasattr(new_model, "fit"):
raise AttributeError("specified model must have a 'fit' \
attribute")
if not hasattr(new_model, "predict"):
raise AttributeError("specified model must have a 'predict' \
attribute")
self._model = new_model
class UTSP(_TSP):
"""Univarate Time Series Prediction model.
This is used to predict the next step given a one-dimentional array.
Attributes:
_n (int): how many past steps considered for predicting the next.
"""
def __init__(self, model, n):
"""Initialize model parameters.
Args:
n (int): how many past steps considered for predicting the next.
model (any): fittable model that takes in {n} one-dimentional
inputs and returns a single value for the predicted next
step.
"""
self.model = model
self._n = n
def fit(self, _ts, *args, shuffle=True, **kwargs):
if (len(_ts.shape) == 1 and _ts.shape[1] != 1) \
or (len(_ts.shape) >= 2):
raise ValueError(f"input time series must be a 1D array, not \
{len(_ts.shape)}D")
_x, _y = ts_to_labels(_ts, self._n)
_x = _x.reshape(-1, 1) if len(_x.shape) == 1 else _x
_y = _y.reshape(-1, 1)
if shuffle:
concat = np.concatenate((_x, _y), axis=1)
np.random.shuffle(concat)
_x, _y = np.split(concat, [self._n], axis=1)
self.model.fit(_x, _y, *args, **kwargs)
def predict(self, _ts, *args, start=None, horizon=1, **kwargs):
if (len(_ts.shape) == 1 and _ts.shape[1] != 1) \
or (len(_ts.shape) >= 2):
raise ValueError(f"input time series must be a 1D array, not \
{len(_ts.shape)}D")
if len(_ts) < self._n:
ValueError(f"input musut have at least {self._n} items.")
ret_x, ret_pred = [], []
curr_x, curr_pred = np.empty(0), None
if start is None:
curr_x = _ts[:-self._n]
else:
if len(_ts[start:]) < self._n:
ValueError(f"specify a start with more than {self._n} items \
ahead of it.")
curr_x = _ts[start:start+self._n]
curr_x = np.array(curr_x)
for _ in range(horizon):
curr_pred = self.model.predict(np.array(curr_x.reshape(1, -1)),
*args,
**kwargs)
ret_x.append(curr_x)
ret_pred.append(curr_pred)
curr_x[:self._n-1], curr_x[self._n-1] = curr_x[1:], curr_pred
return np.array(ret_x), np.array(ret_pred)
class MTSP(_TSP):
"""Multivariate Time Series Prediction models.
These models are highly flexible ways of predicting future values based
on a 2D aray of last steps over multiple features. While this feature is
commonly used to look a single step ahead, users can specify more
granularly how each step variable on a step ahead should be predicted.
Attributes:
_n (int): number of steps before used to predict a step ahead.
_col (str): column this model tried to predict.
_submodels (dict): dictionary of submodels used to predict a column's
value based on previous steps. These must be either USTP
models for single columns or MTSP for multiple columns. This
dictionary should have a tuple (multiple columns) or string
(single column) as keys and _TSP instances as values.
_min_cols (list): all columns specified by the initialization.
n_jobs (int): number of jobs to run for fiting and predicting.
"""
def __init__(self,
model,
n,
col,
submodels=None,
n_jobs=1):
"""
Args:
n (int): number of steps before used to predict a step ahead.
col (str): column this model tried to predict.
submodels (dict): dictionary of submodels used to predict any
dataframe varible in a custom way. These must be either USTP
models for single columns or MTSP for multiple columns. This
dictionary should have a tuple (multiple columns) or string
(single column) as keys and _TSP instances as values. This
variable will be filled upon fitting to account for
unspecified columns.
n_jobs (int): number of jobs to run for fiting and predicting.
"""
self.model = model
min_cols = set()
self._col = col
min_cols.add(self._col)
self._n = n
if isinstance(submodels, dict):
raise TypeError(f"mutlistep_models parameter must be of type \
{dict} not {type(submodels)}")
for col_name, tsp in submodels.items():
if isinstance(col_name, tuple):
if not isinstance(tsp, MTSP):
raise TypeError(f"multistep model for column {col_name} \
must be of type {MTSP} not {type(tsp)} if \
predicting based on single variable")
col1, col2 = col
min_cols.add(col1)
if isinstance(col2, (tuple, list)):
min_cols.update(col2)
else:
min_cols.add(col2)
else:
if not isinstance(tsp, UTSP):
raise TypeError(f"multistep model for column {col_name} \
must be of type {UTSP} not {type(tsp)} if \
predicting based on multiple variables")
min_cols.add(col)
self._min_cols = list(min_cols)
self.n_jobs = n_jobs
def fit(self, _ts, *args, **kwargs):
if not isinstance(_ts, pd.DataFrame):
raise TypeError(f"argument _ts must be of type {pd.DataFrame} \
not {type(_ts)}")
if not all([col_name in _ts.columns
for col_name in self._min_cols]):
raise ValueError(f"time series should have the following columns \
specified upon model initialization: {self._min_cols}")
_x, _y = ts_to_labels(_ts, self._n)
self.model.fit(_x,
_y.reshape(-1, 1),
*args,
**kwargs)
with multiprocessing.Pool(processes=self.n_jobs) as pool:
results = [pool.apply_async(tsp.fit, (_ts[col_name],))
for col_name, tsp in self._submodels]
for res in results:
res.get()
def predict(self, _ts, *args, start=None, horizon=1, **kwargs):
if not isinstance(_ts, pd.DataFrame):
raise TypeError(f"argument _ts must be of type {pd.DataFrame} \
not {type(_ts)}")
if len(_ts) < self._n:
ValueError(f"input musut have at least {self._n} items.")
if not all([col_name in _ts.columns
for col_name in self._min_cols]):
raise ValueError(f"time series should have the following columns \
specified upon model initialization: {self._min_cols}")
ret_x, ret_pred = [], []
curr_x, curr_pred = np.empty(0), None
if start is None:
start = len(_ts) - self._n
if len(_ts.iloc[start:]) < self._n:
ValueError(f"specify a start with more than {self._n} items \
ahead of it.")
# we will append to the time series, so we create a copy now.
# copy will only have the necessary number of steps behind to
# save memory.
_ts = _ts.copy().iloc[start:
-max([sm._n for sm in self._submodels.values()]
+ [self._n])]
curr_x = _ts.iloc[start:start+self._n].values
col_names_idx = {col: i for i, col in enumerate(_ts.columns)}
pred_cols = col_names_idx[self._col] if isinstance(self._col, str) \
else [col_names_idx[c] for c in self._col]
for _ in range(horizon):
curr_pred = self.model.predict(curr_x.reshape(1, -1),
*args,
**kwargs)
ret_x.append(curr_x)
ret_pred.append(curr_pred)
new_step = curr_x[-1]
new_step[pred_cols] = curr_pred
for col_name, tsp in self._submodels:
# TODO: parallelize
if isinstance(col_name, tuple):
col_name, col_sl = col_name
else:
col_sl = col_name
new_step[col_names_idx[col_name]] = tsp.predict(_ts[col_sl])
curr_x[:self._n-1], curr_x[-1] = curr_x[1:], new_step
_ts[len(_ts)] = new_step
return np.array(ret_x), np.array(ret_pred)
@property
def n_jobs(self):
"""Get n_jobs attribute"""
@n_jobs.setter
def n_jobs(self, _n):
"""Set n_jobs attribute ensuring it new value is an integer"""
if not isinstance(_n, int):
raise TypeError(f"attribute n_jobs must be of type {int} \
not {type(_n)}")
self._n_jobs = _n
| python |
class AccessDeniedError(Exception):
pass
| python |
#!/usr/bin/env python3
import numpy as np
import json, logging
import pylab as plt
from frbpa.utils import get_phase, get_params
logging_format = '%(asctime)s - %(funcName)s -%(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=logging_format)
def make_obs_phase_plot(data_json, period, ref_mjd=None, nbins=40, save=False, show=False):
"""
Generates burst phase and observation phase distribution plot for a given period.
:param data_json: json file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param nbins: number of bins in the phase histogram
:param save: to save the plot
:param show: to show the plot
"""
with open(data_json, 'r') as f:
data = json.load(f)
assert 'obs_duration' in data.keys()
assert 'bursts' in data.keys()
assert 'obs_startmjds' in data.keys()
burst_dict = data['bursts']
obs_duration_dict = data['obs_duration']
obs_startmjds_dict = data['obs_startmjds']
assert len(obs_duration_dict.keys()) == len(obs_startmjds_dict.keys())
assert len(obs_duration_dict.keys()) < 20
assert len(burst_dict.keys()) < 10
new_obs_startmjds_dict = {}
new_obs_duration_dict = {}
for k in obs_startmjds_dict.keys():
start_times = obs_startmjds_dict[k]
durations = obs_duration_dict[k]
new_start_times = []
new_durations = []
for i, t in enumerate(start_times):
new_start_times.append(t)
new_durations.append(durations[i]//2)
new_start_times.append(t + (durations[i]//2)/(60*60*24))
new_durations.append(durations[i]//2)
new_obs_startmjds_dict[k] = new_start_times
new_obs_duration_dict[k] = new_durations
obs_duration_dict = new_obs_duration_dict
obs_startmjds_dict = new_obs_startmjds_dict
bursts = []
for k in burst_dict.keys():
bursts = bursts + burst_dict[k]
obs_duration = []
for k in obs_duration_dict.keys():
obs_duration = obs_duration + obs_duration_dict[k]
obs_startmjds = []
for k in obs_startmjds_dict.keys():
obs_startmjds = obs_startmjds + obs_startmjds_dict[k]
assert len(obs_startmjds) == len(obs_duration)
bursts = np.array(bursts)
obs_duration = np.array(obs_duration)
obs_startmjds = np.array(obs_startmjds)
obs_start_phases = get_phase(obs_startmjds, period, ref_mjd=ref_mjd)
hist, bin_edges_obs = np.histogram(obs_start_phases, bins=nbins)
obs_start_phases_dict = {}
duration_per_phase_dict = {}
for k in obs_startmjds_dict.keys():
obs_start_phases_dict[k] = get_phase(np.array(obs_startmjds_dict[k]),
period)
durations = np.array(obs_duration_dict[k])
start_phases = obs_start_phases_dict[k]
d_hist = []
for i in range(len(bin_edges_obs)):
if i>0:
d_hist.append(durations[(start_phases < bin_edges_obs[i]) &
(start_phases > bin_edges_obs[i-1])].sum())
duration_per_phase_dict[k] = np.array(d_hist)/(60*60)
obs_duration = np.array(obs_duration)
duration_hist = []
for i in range(len(bin_edges_obs)):
if i>0:
duration_hist.append(obs_duration[(obs_start_phases < bin_edges_obs[i]) &
(obs_start_phases > bin_edges_obs[i-1])].sum())
duration_hist = np.array(duration_hist)/(60*60)
bin_mids = (bin_edges_obs[:-1] + bin_edges_obs[1:])/2
phase_lst = []
for k in burst_dict.keys():
phase_lst.append(list(get_phase(np.array(burst_dict[k]), period)))
cm = plt.cm.get_cmap('tab20').colors
burst_hist_colors = []
obs_hist_colors = []
e = 0
o = 1
for k in obs_duration_dict.keys():
if k in burst_dict.keys():
color = cm[e]
e += 2
burst_hist_colors.append(color)
else:
color = cm[o]
o += 2
obs_hist_colors.append(color)
fig, ax = plt.subplots(2, 1, sharex=True)
ax1 = ax[0]
ax1_right = ax1.twinx()
ax1.hist(phase_lst, bins=bin_edges_obs, stacked=True, density=False, label=burst_dict.keys(),
edgecolor='black', linewidth=0.5, color=burst_hist_colors)
ax1.set_xlabel('Phase')
ax1.set_ylabel('No. of Bursts')
ax1_right.scatter(bin_mids, duration_hist, label='Obs duration', c='k', alpha=0.5)
ax1_right.set_ylabel('Observation Duration (hrs)')
ax1.legend()
ax1_right.legend(loc=2)
ax2 = ax[1]
cum_ds = np.zeros(nbins)
for i, k in enumerate(duration_per_phase_dict):
d = duration_per_phase_dict[k]
ax2.bar(bin_edges_obs[:-1], d, width=bin_edges_obs[1]-bin_edges_obs[0], align='edge', bottom=cum_ds,
alpha=1, label=k, edgecolor='black', linewidth=0.2, color=obs_hist_colors[i])
cum_ds += d
ax2.set_xlabel('Phase')
ax2.set_ylabel('Observation Duration (hrs)')
ax2.legend()
plt.tight_layout()
if save:
plt.savefig('burst_obs_phase_hist.png', bbox_inches='tight')
plt.savefig('burst_obs_phase_hist.pdf', bbox_inches='tight')
if show:
plt.show()
def make_phase_plot(data_json, period, ref_mjd=None, nbins=40, cmap=None, title=None, save=False, show=False):
"""
Generates burst phase distribution plot at a given period.
:param data_json: json file with data
:param period: period to use for phase calculation
:param ref_mjd: reference MJD to use
:param nbins: number of bins in the phase histogram
:param cmap: matplotlib colormap to use
:param title: title of the plot
:param save: to save the plot
:param show: to show the plot
"""
with open(data_json, 'r') as f:
data = json.load(f)
burst_dict = data['bursts']
all_bursts = []
for k in burst_dict.keys():
all_bursts += burst_dict[k]
if not ref_mjd:
ref_mjd = np.min(all_bursts)
l = []
for k in burst_dict:
l.append(get_phase(np.array(burst_dict[k]), period, ref_mjd=ref_mjd))
refphases = np.linspace(0,1,1000)
_, bin_edges = np.histogram(refphases, bins=nbins)
names = burst_dict.keys()
num_colors = len(names)
plt.figure(figsize=(10,8))
if not cmap:
if num_colors < 20:
cmap = 'tab20'
colors = plt.get_cmap(cmap).colors[:num_colors]
else:
cmap = 'jet'
cm = plt.get_cmap(cmap)
colors = [cm(1.*i/num_colors) for i in range(num_colors)]
params = get_params()
plt.rcParams.update(params)
_ = plt.hist(l, bins=bin_edges, stacked=True, density=False, label=names, edgecolor='black',
linewidth=0.5, color=colors)
plt.xlabel('Phase')
plt.ylabel('No. of Bursts')
if not title:
title = f'Burst phases of {len(all_bursts)} bursts at a period of {period} days'
plt.title(title)
plt.legend()
if save:
plt.savefig('burst_phase_histogram.png', bbox_inches='tight')
if show:
plt.show()
| python |
import FWCore.ParameterSet.Config as cms
from DQMServices.Core.DQMEDAnalyzer import DQMEDAnalyzer
from RecoMuon.TrackingTools.MuonServiceProxy_cff import MuonServiceProxy
from DQMOffline.Muon.gemEfficiencyAnalyzerCosmicsDefault_cfi import gemEfficiencyAnalyzerCosmicsDefault as _gemEfficiencyAnalyzerCosmicsDefault
gemEfficiencyAnalyzerCosmics = _gemEfficiencyAnalyzerCosmicsDefault.clone(
ServiceParameters = MuonServiceProxy.ServiceParameters.clone(),
muonTag = cms.InputTag('muons'),
name = cms.untracked.string('Cosmic 2-Leg STA Muon'),
folder = cms.untracked.string('GEM/Efficiency/type1'),
)
gemEfficiencyAnalyzerCosmicsOneLeg = _gemEfficiencyAnalyzerCosmicsDefault.clone(
ServiceParameters = MuonServiceProxy.ServiceParameters.clone(),
muonTag = cms.InputTag('muons1Leg'),
name = cms.untracked.string('Cosmic 1-Leg STA Muon'),
folder = cms.untracked.string('GEM/Efficiency/type2'),
)
from Configuration.Eras.Modifier_phase2_GEM_cff import phase2_GEM
phase2_GEM.toModify(gemEfficiencyAnalyzerCosmics,
etaNbins=cms.untracked.int32(15),
etaUp=cms.untracked.double(3.0))
phase2_GEM.toModify(gemEfficiencyAnalyzerCosmicsOneLeg,
etaNbins=cms.untracked.int32(15),
etaUp=cms.untracked.double(3.0))
| python |
# -*- coding: utf-8 -*-
"""
This module implements various utilities for WSGI applications. Most of
them are used by the request and response wrappers but especially for
middleware development it makes sense to use them without the wrappers.
"""
import re
import os
import sys
import pkgutil
from ._compat import unichr, text_type, string_types, reraise, PY2, to_unicode, to_native, BytesIO
try:
import simplejson as json
except:
import json
import functools
if PY2:
from urlparse import urljoin, SplitResult as UrlSplitResult
from urllib import urlencode, quote as urlquote, unquote as urlunquote
else:
from urllib.parse import urljoin, SplitResult as UrlSplitResult
from urllib.parse import urlencode, quote as urlquote, unquote as urlunquote
urlunquote = functools.partial(urlunquote, encoding='latin1')
def urldecode(qs):
r = []
for pair in qs.replace(';', '&').split('&'):
if not pair: continue
nv = pair.split('=', 1)
if len(nv) != 2: nv.append('')
key = urlunquote(nv[0].replace('+', ' '))
value = urlunquote(nv[1].replace('+', ' '))
r.append((key, value))
return r
class ConfigDict(dict):
def __contains__(self, k):
try:
return dict.__contains__(self, k) or hasattr(self, k)
except:
return False
# only called if k not found in normal places
def __getattr__(self, k):
try:
# Throws exception if not in prototype chain
return object.__getattribute__(self, k)
except AttributeError:
try:
return self[k]
except KeyError:
raise AttributeError(k)
def __setattr__(self, k, v):
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
self[k] = v
except:
raise AttributeError(k)
else:
object.__setattr__(self, k, v)
def __delattr__(self, k):
try:
# Throws exception if not in prototype chain
object.__getattribute__(self, k)
except AttributeError:
try:
del self[k]
except KeyError:
raise AttributeError(k)
else:
object.__delattr__(self, k)
class cached_property(object):
""" A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property. """
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls=None):
if obj is None: return self
if self.func.__name__ not in obj.__dict__:
obj.__dict__[self.func.__name__] = self.func(obj)
value = obj.__dict__[self.func.__name__]
return value
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from ... import opcodes as OperandDef
from ..datasource import tensor as astensor
from .core import TensorFFTMixin, validate_fft, TensorHermitianFFT
class TensorIHFFT(TensorHermitianFFT, TensorFFTMixin):
_op_type_ = OperandDef.IHFFT
def __init__(self, n=None, axis=-1, norm=None, **kw):
super().__init__(_n=n, _axis=axis, _norm=norm, **kw)
@classmethod
def _get_shape(cls, op, shape):
new_shape = list(shape)
shape = op.n if op.n is not None else shape[op.axis]
if shape % 2 == 0:
shape = (shape // 2) + 1
else:
shape = (shape + 1) // 2
new_shape[op.axis] = shape
return tuple(new_shape)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
Parameters
----------
a : array_like
Input tensor.
n : int, optional
Length of the inverse FFT, the number of points along
transformation axis in the input to use. If `n` is smaller than
the length of the input, the input is cropped. If it is larger,
the input is padded with zeros. If `n` is not given, the length of
the input along the axis specified by `axis` is used.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
norm : {None, "ortho"}, optional
Normalization mode (see `numpy.fft`). Default is None.
Returns
-------
out : complex Tensor
The truncated or zero-padded input, transformed along the axis
indicated by `axis`, or the last one if `axis` is not specified.
The length of the transformed axis is ``n//2 + 1``.
See also
--------
hfft, irfft
Notes
-----
`hfft`/`ihfft` are a pair analogous to `rfft`/`irfft`, but for the
opposite case: here the signal has Hermitian symmetry in the time
domain and is real in the frequency domain. So here it's `hfft` for
which you must supply the length of the result if it is to be odd:
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
Examples
--------
>>> import mars.tensor as mt
>>> spectrum = mt.array([ 15, -4, 0, -1, 0, -4])
>>> mt.fft.ifft(spectrum).execute()
array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
>>> mt.fft.ihfft(spectrum).execute()
array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
"""
a = astensor(a)
validate_fft(a, axis=axis, norm=norm)
op = TensorIHFFT(n=n, axis=axis, norm=norm, dtype=np.dtype(np.complex_))
return op(a)
| python |
import pandas as pd
from bokeh.models import ColumnDataSource
from bokeh.models import TableColumn, DataTable
from .base import TableView
from ..models import concurrent_requests
class OverviewTable(TableView):
def __init__(self, df, df_downloads):
super().__init__(df)
self.df_downloads = df_downloads
def data(self):
# requests
total = self.df.uuid.count()
failed = self.df.loc[self.df["status"] == 5].uuid.count()
# requests per day
counts = self.df.groupby(pd.Grouper(key="time_start", freq="1D")).uuid.count()
# duration
duration = self.df["time_end"] - self.df["time_start"]
duration = duration.dt.seconds
duration = duration[lambda x: x > 0]
# concurrency
cdf = concurrent_requests(self.df)
running = cdf.groupby(pd.Grouper(key="time", freq="1D")).running.max()
# downloads
downloads = self.df_downloads.groupby(
pd.Grouper(key="datetime", freq="1D")
).request_type.count()
# data transfer
tdf = self.df_downloads.groupby(pd.Grouper(key="datetime", freq="1D")).sum()
tdf["size"] = tdf["size"].apply(lambda x: x / 1024**3)
transfer = tdf["size"]
data_ = dict(
property=[
"Total Requests",
"Failed Requests",
"Requests per day (min/max/median)",
"Duration (min/max/median)",
"Concurrency per day (min/max/median)",
"Downloads per day (min/max/median)",
"Data transfer per day (min/max/median)",
"Total data transfer",
"Data transfer per request",
],
value=[
total,
failed,
f"{counts.min()} / {counts.max()} / {counts.median()}",
f"{duration.min()} / {duration.max()} / {duration.median()}",
f"{running.min()} / {running.max()} / {running.median()}",
f"{downloads.min()} / {downloads.max()} / {downloads.median()}",
f"{transfer.min():.2f} GB / {transfer.max():.2f} GB / {transfer.median():.2f} GB",
f"{transfer.sum():.2f} GB",
f"{transfer.sum() / (total - failed):.2f} GB",
],
)
return data_
def table(self):
columns = [
TableColumn(field="property", title="Property"),
TableColumn(field="value", title="Value"),
]
table = DataTable(source=ColumnDataSource(self.data()), columns=columns)
return table
| python |
import os
import json
from flask import Flask
from flask_bootstrap import Bootstrap
from oidc_rp.client import Client
client_config = {}
with open('../client.json', 'r') as f:
client_config = json.loads(f.read())
client = Client(client_config)
app = Flask(__name__)
# SECRET_KEY
## Insert your secret key
# To generate a secret key in a python shell:
## >>> import os
## >>> os.urandom(24)
app.secret_key = '\x8c:\x03\xbd\xb6\xa4\r\xa0\xf1+o\x08\xa3OU\x92u\xf4(k\x12\xf9?\xad'
bootstrap = Bootstrap(app)
from oidc_rp import webserver
| python |
#!/usr/bin/python
#-------------------------------------------------------------------------------
#License GPL v3.0
#Author: Alexandre Manhaes Savio <[email protected]>
#Grupo de Inteligencia Computational <www.ehu.es/ccwintco>
#Universidad del Pais Vasco UPV/EHU
#Use this at your own risk!
#2012-07-26
#-------------------------------------------------------------------------------
from IPython.core.debugger import Tracer; debug_here = Tracer()
import os, sys, argparse
import numpy as np
import nibabel as nib
import scipy.io as sio
import aizkolari_utils as au
import aizkolari_export as ae
def set_parser():
parser = argparse.ArgumentParser(description='Saves a file with feature sets extracted from NIFTI files. The format of this file can be selected to be used in different software packages, including Numpy binary format, Weka, Octave/Matlab and SVMPerf.')
parser.add_argument('-s', '--subjsf', dest='subjs', required=True, help='list file with the subjects for the analysis. Each line: <class_label>,<subject_file>')
parser.add_argument('-o', '--outdir', dest='outdir', required=True,
help='''name of the output directory where the results will be saved. \n
In this directory the following files will be created:
- included_subjects: list of full path to the subjects included in the feature set.
- excluded_subjects: list of full path to the subjects excluded from the feature set. if any.
- included_subjlabels: list of class labels of each subject in included_subjects.
- excluded_subjlabels: list of class labels of each subject in excluded_subjects, if any.
- features.*: file containing a NxM matrix with the features extracted from subjects (N: subj number, M: feat number).
''')
parser.add_argument('-m', '--mask', dest='mask', required=True,
help='Mask file to extract feature voxels, any voxel with values > 0 will be included in the extraction.')
parser.add_argument('-d', '--datadir', dest='datadir', required=False,
help='folder path where the subjects are, if the absolute path is not included in the subjects list file.', default='')
parser.add_argument('-p', '--prefix', dest='prefix', default='', required=False,
help='Prefix for the output filenames.')
parser.add_argument('-e', '--exclude', dest='exclude', default='', required=False,
help='subject list mask, i.e., text file where each line has 0 or 1 indicating with 1 which subject should be excluded in the measure. To help calculating measures for cross-validation folds.')
parser.add_argument('-t', '--type', dest='type', default='numpybin', choices=['numpybin','octave','arff', 'svmperf'], required=False,
help='type of the output file. Alloweds: numpybin (Numpy binary file), octave (Octave/Matlab binary file using Scipy.io.savemat), arff (Weka text file), svmperfdat (.dat for SVMPerf).')
parser.add_argument('-n', '--name', dest='dataname', default='aizkolari_extracted', required=False,
help='Name of the dataset. It is used for internal usage in SVMPerf and Weka.')
parser.add_argument('-k', '--scale', dest='scale', default=False, action='store_true', required=False,
help='This option will enable Range scaling of the non-excluded data and save a .range file with the max and min of the scaled dataset to scale other dataset with the same transformation.')
parser.add_argument('--scale_min', dest='scale_min', default=-1, type=int, required=False, help='Minimum value for the new scale range.')
parser.add_argument('--scale_max', dest='scale_max', default= 1, type=int, required=False, help='Maximum value for the new scale range.')
parser.add_argument('-r', '--thrP', dest='thresholdP', default='', required=False,
help='use following percentage (0-100) of ROBUST RANGE to threshold mask image (zero anything below the number). One or quoted list of floats separated by blank space.')
parser.add_argument('-b', '--thr', dest='lthreshold', default='', required=False,
help='use following number to threshold mask image (zero anything below the number).')
parser.add_argument('-u', '--uthr', dest='uthreshold', default='', required=False,
help='use following number to upper-threshold mask image (zero anything above the number).')
parser.add_argument('-a', '--abs', dest='absolute', action='store_true', required=False,
help='use absolute value of mask before thresholding.')
parser.add_argument('-l', '--leave', dest='leave', default=-1, required=False, type=int, help='index from subject list (counting from 0) indicating one subject to be left out of the training set. For leave-one-out measures.')
parser.add_argument('-v', '--verbosity', dest='verbosity', required=False, type=int, default=2, help='Verbosity level: Integer where 0 for Errors, 1 for Progression reports, 2 for Debug reports')
return parser
#-------------------------------------------------------------------------------
def get_out_extension (otype):
if otype == 'numpybin':
ext = au.numpyio_ext()
elif otype == 'octave':
ext = au.octaveio_ext()
elif otype == 'svmperf':
ext = au.svmperfio_ext()
elif otype == 'arff':
ext = au.wekaio_ext()
else:
err = 'get_out_extension: Extension type not supported: ' + otype
raise Exception(err)
return ext
#-------------------------------------------------------------------------------
def get_filepath (outdir, filename, otype):
filename = outdir + os.path.sep + filename
try:
filename += get_out_extension(otype)
except Exception, err:
au.log.error (str(err))
sys.exit(-1)
return filename
#-------------------------------------------------------------------------------
def rescale (data, range_min, range_max, data_min=np.NaN, data_max=np.NaN):
if np.isnan(data_min):
dmin = float(data.min())
else:
dmin = float(data_min)
if np.isnan(data_max):
dmax = float(data.max())
else:
dmax = float(data_max)
try:
factor = float(((range_max-range_min)/(dmax-dmin)) + ((range_min*dmax-range_max*dmin)/(dmax-dmin)))
d = data*factor
except Exception, err:
au.log.error (str(err))
sys.exit(-1)
return d, dmin, dmax
#-------------------------------------------------------------------------------
def write_scalingrange_file (fname, dmin, dmax, scale_min, scale_max):
f = open (fname, 'w')
f.write('#data_min, data_max, range_min, range_max')
f.write('\n')
f.write(str(dmin) + ',' + str(dmax) + ',' + str(scale_min) + ',' + str(scale_max))
f.close()
#-------------------------------------------------------------------------------
def save_data (outdir, prefix, dataname, otype, excluding, leave, feats, labels, exclfeats, exclulabels, dmin, dmax, scale, scale_min, scale_max, lthr, uthr, thrp, absolute):
#setting output file name
ofname = au.feats_str()
if leave > -1:
ofname += '.' + au.excluded_str() + str(leave)
if absolute: ofname += '.' + au.abs_str()
if lthr: ofname += '.lthr_' + str(lthr)
if uthr: ofname += '.uthr_' + str(uthr)
if thrp: ofname += '.thrP_' + str(thrp)
if scale: ofname += '.' + au.scaled_str()
if excluding:
excl_ofname = au.excluded_str() + '_' + ofname
exclfilename = get_filepath (outdir, excl_ofname , otype)
if prefix:
ofname = prefix + '_' + ofname
excl_ofname = prefix + '_' + excl_ofname
filename = get_filepath (outdir, ofname, otype)
#writing in a text file the scaling values of this training set
if scale:
write_scalingrange_file (outdir + os.path.sep + ofname + '.scaling_range', dmin, dmax, scale_min, scale_max)
#saving binary file depending on output type
if otype == 'numpybin':
np.save (filename, feats)
if excluding:
np.save (exclfilename, exclfeats)
elif otype == 'octave':
sio.savemat (filename, {au.feats_str(): feats, au.labels_str(): labels})
if excluding:
exclulabels[exclulabels == 0] = -1
sio.savemat (exclfilename, {au.feats_str(): exclfeats, au.labels_str(): exclulabels})
elif otype == 'svmperf':
labels[labels == 0] = -1
ae.write_svmperf_dat(filename, dataname, feats, labels)
if excluding:
exclulabels[exclulabels == 0] = -1
ae.write_svmperf_dat(exclfilename, dataname, exclfeats, exclulabels)
elif otype == 'arff':
featnames = np.arange(nfeats) + 1
ae.write_arff (filename, dataname, featnames, feats, labels)
if excluding:
ae.write_arff (exclfilename, dataname, featnames, exclfeats, exclulabels)
else:
err = 'Output method not recognised!'
au.log.error(err)
sys.exit(-1)
return [filename, exclfilename]
#-------------------------------------------------------------------------------
def extract_features (subjs, exclusubjs, mask, maskf, scale, scale_min, scale_max):
#population features
nsubjs = len(subjs)
s = nib.load(subjs[0])
subjsiz = np.prod (s.shape)
stype = s.get_data_dtype()
#loading subject data
data = np.empty([nsubjs, subjsiz], dtype=stype)
#number of voxels > 0 in mask
mask = mask.flatten()
nfeats = np.sum(mask > 0)
#reading each subject and saving the features in a vector
feats = np.empty([nsubjs, nfeats], dtype=stype)
#extracting features from non-excluded subjects
c = 0
for s in subjs:
au.log.debug("Reading " + s)
#check geometries
au.check_has_same_geometry (s, maskf)
#load subject
subj = nib.load(s).get_data().flatten()
#mask data and save it
feats[c,:] = subj[mask > 0]
c += 1
#scaling if asked
dmin = scale_min
dmax = scale_max
if scale:
au.log.info("Scaling data.")
[feats, dmin, dmax] = rescale(feats, scale_min, scale_max)
#extracting features from excluded subjects
exclfeats = []
if exclusubjs:
au.log.info("Processing excluded subjects.")
nexcl = len(exclusubjs)
exclfeats = np.empty([nexcl, nfeats], dtype=stype)
c = 0
for s in exclusubjs:
au.log.debug("Reading " + s)
#check geometries
au.check_has_same_geometry (s, maskf)
#load subject
subj = nib.load(s).get_data().flatten()
#mask data and save it
exclfeats[c,:] = subj[mask > 0]
c += 1
if scale:
[exclfeats, emin, emax] = rescale(exclfeats, scale_min, scale_max, dmin, dmax)
return [feats, exclfeats, dmin, dmax]
#-------------------------------------------------------------------------------
## START EXTRACT FEATSET
#-------------------------------------------------------------------------------
def main():
#parsing arguments
parser = set_parser()
try:
args = parser.parse_args ()
except argparse.ArgumentError, exc:
au.log.error (exc.message + '\n' + exc.argument)
parser.error(str(msg))
return -1
subjsf = args.subjs.strip ()
outdir = args.outdir.strip ()
datadir = args.datadir.strip ()
excluf = args.exclude.strip ()
otype = args.type.strip ()
dataname = args.dataname.strip()
maskf = args.mask.strip()
prefix = args.prefix.strip()
leave = args.leave
scale = args.scale
scale_min = args.scale_min
scale_max = args.scale_max
thrps = args.thresholdP.strip().split()
lthr = args.lthreshold.strip()
uthr = args.uthreshold.strip()
absolute = args.absolute
au.setup_logger(args.verbosity)
#checking number of files processed
if not os.path.exists(maskf):
err = 'Mask file not found: ' + maskf
au.log.error(err)
sys.exit(-1)
#number of subjects
subjsnum = au.file_len(subjsf)
#reading subjects list
subjlabels = np.zeros(subjsnum, dtype=int)
subjslist = {}
subjfile = open(subjsf, 'r')
c = 0
for s in subjfile:
line = s.strip().split(',')
subjlabels[c] = int(line[0])
subjfname = line[1].strip()
if not os.path.isabs(subjfname) and datadir:
subjslist[c] = datadir + os.path.sep + subjfname
else:
subjslist[c] = subjfname
c += 1
subjfile.close()
#excluding if excluf or leave > -1
subjmask = []
excluding = False
if excluf:
excluding = True
subjmask = np.loadtxt(excluf, dtype=int)
else:
subjmask = np.zeros(subjsnum, dtype=int)
if leave > -1:
excluding = True
subjmask[leave] = 1
subjs = [ subjslist[elem] for elem in subjslist if subjmask[elem] == 0]
labels = subjlabels[subjmask == 0]
exclusubjs = [ subjslist[elem] for elem in subjslist if subjmask[elem] == 1]
exclulabels = subjlabels[subjmask == 1]
if not excluding:
exclusubjs = []
#mask process
au.log.info('Processing ' + maskf)
#loading mask and masking it with globalmask
mask = nib.load(maskf).get_data()
#thresholding
if absolute: mask = np.abs(mask)
if lthr: mask[mask < lthr] = 0
if uthr: mask[mask > uthr] = 0
if thrps:
for t in thrps:
au.log.info ("Thresholding " + maskf + " with robust range below " + str(t) + " percent.")
thrm = au.threshold_robust_range (mask, t)
au.log.info ("Extracting features.")
[feats, exclfeats, dmin, dmax] = extract_features (subjs, exclusubjs, thrm, maskf, scale, scale_min, scale_max)
au.log.info ("Saving data files.")
[filename, exclfilename] = save_data (outdir, prefix, dataname, otype, excluding, leave, feats, labels, exclfeats, exclulabels, dmin, dmax, scale, scale_min, scale_max, lthr, uthr, t, absolute)
else:
au.log.info ("Extracting features.")
[feats, exclfeats, dmin, dmax] = extract_features (subjs, exclusubjs, mask, maskf, scale, scale_min, scale_max)
au.log.info ("Saving data files.")
[filename, exclfilename] = save_data (outdir, prefix, dataname, otype, excluding, leave, feats, labels, exclfeats, exclulabels, dmin, dmax, scale, scale_min, scale_max, lthr, uthr, thrps, absolute)
au.log.info ("Saved " + filename)
if excluding:
au.log.info ("Saved " + exclfilename)
#saving description files
np.savetxt(filename + '.' + au.subjectfiles_str(), subjs, fmt='%s')
np.savetxt(filename + '.' + au.labels_str(), labels, fmt='%i')
if excluding:
np.savetxt(exclfilename + '.' + au.subjectfiles_str(), exclusubjs, fmt='%s')
np.savetxt(exclfilename + '.' + au.labels_str(), exclulabels, fmt='%i')
return 1
#-------------------------------------------------------------------------------
## END EXTRACT FEATSET
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
| python |
import unittest
from mock import Mock, patch
import repstruct.dataset as dataset
import repstruct.configuration as configuration
class TestDataSet(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def __assertProperties(self, instance):
t = type(instance)
property_names = [item for item in dir(t) if
isinstance(getattr(t, item), property)]
new_value = 'new_value'
for property_name in property_names:
setattr(instance, property_name, new_value)
self.assertEqual(new_value, getattr(instance, property_name))
def testDataSet(self):
tag = 'test_tag'
data = dataset.DataSet(tag)
self.assertEqual(tag, data.tag)
self.assertTrue(type(data.collection) is dataset.CollectionDataSet)
self.assertTrue(type(data.feature) is dataset.FeatureDataSet)
self.assertTrue(type(data.descriptor) is dataset.DescriptorDataSet)
self.assertTrue(type(data.pca) is dataset.PcaDataSet)
self.assertTrue(type(data.analysis) is dataset.AnalysisDataSet)
self.assertTrue(type(data.plot) is dataset.PlotDataSet)
self.__assertProperties(data)
@patch('os.makedirs')
@patch('os.path.exists')
def testDataSetBase(self, exists_mock, makedirs_mock):
path = 'path'
folder = 'folder'
config = 'config'
folder_path = path + '/' + folder
exists_mock.return_value = False
data = dataset.DataSetBase(path, folder, config)
self.assertEqual(folder_path, data.path)
self.assertEqual(config, data.config)
self.__assertProperties(data)
exists_mock.assert_called_with(folder_path)
makedirs_mock.assert_called_with(folder_path)
@patch('os.makedirs')
@patch('os.path.exists')
def testDataSetBaseNoPath(self, exists_mock, makedirs_mock):
path = None
folder = None
config = None
exists_mock.return_value = False
data = dataset.DataSetBase(path, folder, config)
self.assertEqual(None, data.path)
self.assertEqual(None, data.config)
self.assertEqual(0, exists_mock.call_count)
self.assertEqual(0, makedirs_mock.call_count)
@patch('numpy.savez')
@patch('os.path.join')
def testDataSetBaseSave(self, join_mock, save_mock):
file_name = 'fn'
argument = 'arg'
join_mock.return_value = file_name + '.npz'
data = dataset.DataSetBase(None, None, None)
data._save(file_name, arg=argument)
save_mock.assert_called_with(join_mock.return_value, arg=argument)
@patch('numpy.load')
@patch('os.path.join')
def testDataSetBaseLoad(self, join_mock, load_mock):
file_name = 'fn'
join_mock.return_value = file_name + '.npz'
data = dataset.DataSetBase(None, None, None)
data._load(file_name)
load_mock.assert_called_with(join_mock.return_value)
@patch('os.path.isfile')
@patch('os.listdir')
@patch('os.path.join')
def testCollectionDataSetImages(self, join_mock, listdir_mock, isfile_mock):
data = dataset.CollectionDataSet(None, None)
ims = ['im1.jpg, im2.jpg', 'no_im.txt']
listdir_mock.return_value = ims
join_mock.return_value = ''
isfile_mock.return_value = True
result = data.images()
self.assertSequenceEqual(ims[:1], list(result))
def testFeatureDataSetSave(self):
data = dataset.FeatureDataSet(None, None)
data._save = Mock()
im = 'im'
locations = 'locations'
descriptors = 'descriptors'
data.save(im, locations, descriptors)
data._save.assert_called_with(im + '.sift', locations=locations, descriptors=descriptors)
def testFeatureDataSetLoad(self):
data = dataset.FeatureDataSet(None, None)
result = {'locations': 1, 'descriptors': 0}
data._load = Mock(return_value=result)
im = 'im'
data.load(im)
data._load.assert_called_with(im + '.sift')
def testDescriptorDataSetSave(self):
data = dataset.DescriptorDataSet(None)
data._save = Mock()
im = 'im'
descriptors = 'descriptors'
desc_colors = 'desc_colors'
rand_colors = 'rand_colors'
data.save(im, descriptors, desc_colors, rand_colors)
data._save.assert_called_with(im + '.descriptors',
descriptors=descriptors,
descriptor_colors=desc_colors,
random_colors=rand_colors)
def testDescriptorDataSetLoad(self):
data = dataset.DescriptorDataSet(None)
im = 'im'
result = {'descriptors': 0, 'descriptor_colors': 1, 'random_colors': 2}
data._load = Mock(return_value=result)
data.load(im)
data._load.assert_called_with(im + '.descriptors')
def testPcaDataSetSave(self):
data = dataset.PcaDataSet(None, None)
data._save = Mock()
images = 'images'
pc_projections = 'pc_projections'
principal_components = 'principal_components'
data.save(images, pc_projections, principal_components)
data._save.assert_called_with('principal_components',
images=images,
pc_projections=pc_projections,
principal_components=principal_components)
def testPcaDataSetLoad(self):
data = dataset.PcaDataSet(None, None)
result = {'images': 0, 'pc_projections': 0, 'principal_components': 0}
data._load = Mock(return_value=result)
data.load()
data._load.assert_called_with('principal_components')
def testAnalysisDataSetSaveClosest(self):
data = dataset.AnalysisDataSet(None, None)
data._save = Mock()
closest_group = 'closest_group'
representative = 'representative'
data.save_closest(closest_group, representative)
data._save.assert_called_with('closest',
closest_group=closest_group,
representative=representative)
def testAnalysisDataSetLoadClosest(self):
data = dataset.AnalysisDataSet(None, None)
result = {'closest_group': 0, 'representative': 1}
data._load = Mock(return_value=result)
data.load_closest()
data._load.assert_called_with('closest')
def testAnalysisDataSetSaveStructures(self):
data = dataset.AnalysisDataSet(None, None)
data._save = Mock()
centroids = 'centroids'
structures = 'structures'
data.save_structures(centroids, structures)
data._save.assert_called_with('structures',
centroids=centroids,
structures=structures)
def testAnalysisDataSetLoadStrucutures(self):
data = dataset.AnalysisDataSet(None, None)
result = {'centroids': 0, 'structures': 1}
data._load = Mock(return_value=result)
data.load_structures()
data._load.assert_called_with('structures')
def testAnalysisDataSetSaveScoredStructures(self):
data = dataset.AnalysisDataSet(None, None)
data._save = Mock()
scored_structures = 'scored_structures'
data.save_scored_structures(scored_structures)
data._save.assert_called_with('scored_structures',
scored_structures=scored_structures)
def testAnalysisDataSetLoadScoredStrucutures(self):
data = dataset.AnalysisDataSet(None, None)
result = {'scored_structures': 0}
data._load = Mock(return_value=result)
data.load_scored_structures()
data._load.assert_called_with('scored_structures')
def testCollectionDataSetConfigType(self):
data = dataset.CollectionDataSet(None, None)
self.assertTrue(type(data.config) is configuration.CollectionConfiguration)
def testFeatureDataSetConfigType(self):
data = dataset.FeatureDataSet(None, None)
self.assertTrue(type(data.config) is configuration.FeatureConfiguration)
def testDescriptorDataSetConfigType(self):
data = dataset.DescriptorDataSet(None)
self.assertTrue(type(data.config) is dict)
def testPcaDataSetConfigType(self):
data = dataset.PcaDataSet(None, None)
self.assertTrue(type(data.config) is configuration.PcaConfiguration)
def testAnalysisDataSetConfigType(self):
data = dataset.AnalysisDataSet(None, None)
self.assertTrue(type(data.config) is configuration.AnalysisConfiguration)
def testPlotDataSetConfigType(self):
data = dataset.PlotDataSet(None, None)
self.assertTrue(type(data.config) is configuration.PlotConfiguration)
if __name__ == '__main__':
unittest.main()
| python |
# -*- coding: utf-8 -*-
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('heroes', '0001_initial'),
('actions', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='metaactionmember',
name='hero',
field=models.ForeignKey(related_name='+', on_delete=django.db.models.deletion.PROTECT, to='heroes.Hero'),
preserve_default=True,
),
]
| python |
"""
*Demon*
The managers of the brain with a policy and foresight.
They have the power to interpret fractums.
"""
from fractum import Fractum
from abc import ABCMeta
from dataclasses import dataclass
@dataclass
class Demon(
Fractum,
metaclass=ABCMeta,
):
depth: int
age: float
gamma: float
shall_amplify: bool
# abstract type AbstractPlayer end
######
###### AlphaZero player
######
# function guess_mcts_arena_params(env::Env)
# p = env.params
# return isnothing(p.arena) ? p.self_play.mcts : p.arena.mcts
# end
# function guess_use_gpu(env::Env)
# p = env.params
# return isnothing(p.arena) ? p.self_play.sim.use_gpu : p.arena.sim.use_gpu
# end
| python |
# -*- coding: utf-8; -*-
#
# Copyright (c) 2014 Georgi Valkov. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. Neither the name of author nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL GEORGI
# VALKOV BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
__author__ = 'Georgi Valkov'
__version__ = '0.1.1'
__license__ = 'Revised BSD License'
"""
HelpFormatter classes for optparse:
CompactHelpFormatter:
A less verbose and neater looking optparse help formatter.
CompactColorHelpFormatter:
A less verbose and neater looking optparse help formatter that can
colorize options and headings.
Usage:
from optparse import OptionParser
from optparse_mooi import *
fmt = CompactHelpFormatter(
metavar_format=' <{}>',
metavar_column=None,
option_separator=', ',
align_long_opts=False,
help_string_formatter=None,
preformatted_description=True,
preformatted_epilog=True
)
fmt = CompactColorHelpFormatter(
heading_color='white-bold',
usage_color='white-bold',
shopt_color=None,
lopt_color=None,
description_color=None,
epilog_color=None,
metavar_color=None,
help_color=None,
option_colormap=None
)
parser = OptionParser(formatter=fmt)
"""
import os, re
from optparse import IndentedHelpFormatter
from functools import partial
class CompactHelpFormatter(IndentedHelpFormatter):
"""A less verbose and neater looking optparse help formatter."""
def __init__(self,
metavar_format=' <{}>',
metavar_column=None,
option_separator=', ',
align_long_opts=False,
help_string_formatter=None,
preformatted_description=True,
preformatted_epilog=True,
*args, **kw):
"""
:arg metavar_format:
Evaluated as `metavar_format.format(metavar)` if string.
If callable, evaluated as `metavar_format(metavar)`.
:arg metavar_column:
Column to which all metavars should be aligned.
:arg option_separator:
String between short and long option. E.g: ', ' -> '-f, --format'.
:arg align_long_opts:
Align all long options on the current indent level to the same
column. For example:
align_long_opts=False align_long_opts=True
-------------------------- --------------------------
-h, --help show this ... -h, --help show this ...
--fast avoid slow ... --fast avoid slow ...
:arg help_string_format:
Function to call to call on help string after expansion. Called
as `help_string_format(help, option)`.
:arg preformatted_description:
If True, description will be displayed as-is, instead of
text-wrapping it first.
:arg preformatted_description:
If True, epilog will be displayed as-is, instead of
text-wrapping it first.
:arg width:
Maximum help message width. Defaults to 78 unless $COLUMNS is set.
"""
if not callable(metavar_format):
func = partial(format_option_metavar, fmt=metavar_format)
else:
func = metavar_format
self.metavar_format = func
self.metavar_column = metavar_column
self.align_long_opts = align_long_opts
self.option_separator = option_separator
self.help_string_formatter = help_string_formatter
if 'width' not in kw:
try:
kw['width'] = int(os.environ['COLUMNS']) - 2
except (KeyError, ValueError):
kw['width'] = 78
kw['max_help_position'] = kw.get('max_help_position', kw['width'])
kw['indent_increment'] = kw.get('indent_increment', 1)
kw['short_first'] = kw.get('short_first', 1)
# leave full control of description and epilog to us
self.preformatted_description = preformatted_description
self.preformatted_epilog = preformatted_epilog
IndentedHelpFormatter.__init__(self, *args, **kw)
def format_option_strings(self, option):
opts = format_option_strings(
option,
self.metavar_format,
self.option_separator,
self.align_long_opts,
)
if not option.takes_value():
return ''.join(opts)
if not self.metavar_column:
return ''.join(opts)
# align metavar to self.metavar_column
lpre = sum(len(i) for i in opts[:-1])
lpre += self.current_indent * self.indent_increment
opts.insert(-1, ' '*(self.metavar_column - lpre))
return ''.join(opts)
def expand_default(self, option):
help = IndentedHelpFormatter.expand_default(self, option)
if callable(self.help_string_formatter):
return self.help_string_formatter(help, option)
return help
def format_usage(self, usage, raw=False):
# If there is no description, ensure that there is only one
# newline between the usage string and the first heading.
msg = usage if raw else 'Usage: %s' % usage
if self.parser.description:
msg += '\n'
return msg
def format_heading(self, heading):
if heading == 'Options':
return '\n'
return heading + ':\n'
def format_description(self, description):
if self.preformatted_description:
return description if description else ''
else:
return IndentedHelpFormatter.format_description(self, description)
def format_epilog(self, epilog):
if self.preformatted_epilog:
return epilog if epilog else ''
else:
return IndentedHelpFormatter.format_epilog(self, epilog)
def format_option_strings(option, metavar_format, separator, align_long_opts=False):
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, separator)
if not option._short_opts and align_long_opts:
opts.insert(0, ' %*s' % (len(separator), ''))
if option.takes_value():
opts.append(metavar_format(option))
return opts
def format_option_metavar(option, fmt):
metavar = option.metavar or option.dest.lower()
return fmt.format(metavar)
def get_optimal_max_help_position(formatter, parser):
from itertools import chain
max_width = 0
options = [parser.option_list]
if hasattr(parser, 'option_groups'):
options.extend(i.option_list for i in parser.option_groups)
for option in chain(*options):
formatted = len(formatter.format_option_strings(option))
max_width = formatted if formatted > max_width else max_width
return max_width
class CompactColorHelpFormatter(CompactHelpFormatter):
"""
A less verbose and neater looking optparse help formatter that can
colorize options and headings. Works only on ANSI capable terminals.
"""
def __init__(self,
heading_color='white-bold',
usage_color='white-bold',
shopt_color=None,
lopt_color=None,
description_color=None,
epilog_color=None,
metavar_color=None,
help_color=None,
option_colormap=None,
*args, **kw):
"""
Accepts all arguments that `CompactHelpFormatter` accepts in
addition to:
:arg heading_color:
Color to use for headings (such as group names).
:arg usage_color:
Color to use for the usage line.
:arg shopt_color:
COlor to use for all short options.
:arg lopt_color:
Color to use for all long options.
:arg epilog_color:
Color to use for the epilog section.
:arg description_color:
Color to use for the description secrion.
:arg metavar_color:
Color to use for all metavars.
:arg help_color:
Color to use for all help messages.
:arg option_colormap:
A mapping of option flags to colors. For example:
option_colormap = {
# -h, -v, -h in white, their long opt in green,
# metavar in red and help message in bold red.
('-h', '-v', '-j'): ('white', 'green', 'red', 'red-bold'),
# --quiet's help message in blue
'--quiet': (None, None, None, 'blue'),
}
Keys are short or long opts, or a list of short or long
opts. Values specify the color to be applied to the short
opt, long opt, metavar and help message (in that order).
Available colors:
black, red, green, yellow, blue, purple, cyan, white
Available modifiers:
bold, underline
Example color specifiers:
red-bold, red-bold-underline, red-underline
"""
# regex for stripping ansi escape codes from strings
self.re_ansi = re.compile('\033\[([14];)?\d\d?m')
colors = {
'black': '30', 'red': '31',
'green': '32', 'yellow': '33',
'blue': '34', 'purple': '35',
'cyan': '36', 'white': '37',
}
# color spec to partial(ansiwrap, ...)
# 'white-bold' -> #(ansiwrap(37, %, True))
# 'red' -> #(ansiwrap(31, %))
# None -> #(str(%))
# 'red-bold-underline' -> #(ansiwrap(31, %, True, True))
def _ansiwrap(color):
if not color: return str
spec = color.split('-')
color = colors[spec[0]]
bold, uline = 'bold' in spec, 'underline' in spec
return partial(ansiwrap, color, bold=bold, underline=uline)
self.heading_color = _ansiwrap(heading_color)
self.shopt_color = _ansiwrap(shopt_color)
self.lopt_color = _ansiwrap(lopt_color)
self.usage_color = _ansiwrap(usage_color)
self.help_color = _ansiwrap(help_color)
self.metavar_color = _ansiwrap(metavar_color)
self.epilog_color = _ansiwrap(epilog_color)
self.description_color = _ansiwrap(description_color)
self.colormap = {}
# flatten all keys and ensure that values is a four element list
option_colormap = option_colormap if option_colormap else {}
for opts, val in option_colormap.items():
f = [_ansiwrap(i) if i else None for i in val]
f = f + [None] * (4 - len(f))
if not isseq(opts):
self.colormap[opts] = f
else:
for opt in opts:
self.colormap[opt] = f
CompactHelpFormatter.__init__(self, *args, **kw)
def format_option(self, option):
result = CompactHelpFormatter.format_option(self, option)
shopt, lopt, meta, help = find_color(option, self.colormap)
if option._short_opts and (shopt or self.shopt_color):
re_short = rx_short(option._short_opts)
shopt = shopt or self.shopt_color
result = re_short.sub(shopt(r'\1'), result, 0)
if option._long_opts and (lopt or self.lopt_color):
re_long = rx_long(option._long_opts)
lopt = lopt or self.lopt_color
result = re_long.sub(lopt(r'\1'), result, 0)
if option.takes_value() and (meta or self.metavar_color):
var = self.metavar_format(option)
meta = meta or self.metavar_color
result = result.replace(var, meta(var), 1)
if option.help and (help or self.help_color):
l1 = '( %s.*$)(\s*^.*)*' % re.escape(option.help[:4])
re_help = re.compile(l1, re.MULTILINE)
help = help or self.help_color
result = re_help.sub(help('\g<0>'), result)
return result
def format_heading(self, heading):
if heading == 'Options':
return '\n'
heading = self.heading_color(heading)
heading = CompactHelpFormatter.format_heading(self, heading)
return heading
def format_usage(self, usage):
usage = self.usage_color('Usage: %s' % usage)
usage = CompactHelpFormatter.format_usage(self, usage, True)
return usage
def format_description(self, description):
description = self.description_color(description if description else '')
return CompactHelpFormatter.format_description(self, description)
def format_epilog(self, epilog):
epilog = self.epilog_color(epilog if epilog else '')
return CompactHelpFormatter.format_epilog(self, epilog)
# --- utility functions ---------------------------------------------
def find_color(option, cmap):
g1 = (i for i in option._short_opts if i in cmap)
g2 = (i for i in option._long_opts if i in cmap)
res = next(g1, None) or next(g2, None)
return cmap.get(res, [None]*4)
def rx_short(it):
rx = ''.join(i[1] for i in it)
if rx:
rx = '( -[%s])' % rx
return re.compile(rx)
def rx_long(it):
rx = '|'.join(i[2:] for i in it)
if rx:
rx = '(--%s)' % rx
return re.compile(rx)
def ansiwrap(code, text, bold=True, underline=False):
code = "4;%s" % code if underline else code
code = "1;%s" % code if bold else code
return "\033[%sm%s\033[0m" % (code, text)
def isseq(it):
return isinstance(it, (list, tuple, set))
__all__ = (
CompactHelpFormatter,
CompactColorHelpFormatter,
)
| python |
"""
Módulo que contêm as views de usuário
"""
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import PasswordChangeForm
from core.funcoes import arruma_url_page
from core.models import *
from core.forms import *
@login_required(login_url="/admin")
def change_pw_form(request):
""" Pagina para trocar senha do user """
success = request.GET.get('success', False)
if request.method == 'POST':
form_pw = PasswordChangeForm(request.user, request.POST)
if form_pw.is_valid():
user = form_pw.save()
update_session_auth_hash(request, user)
url = str(request.path_info) + str('?success=True')
return HttpResponseRedirect(url)
else:
form_pw = PasswordChangeForm(request.user)
# Checa se existe algum erro para ativar no template
form_errors = False
for field in form_pw:
if field.errors:
form_errors = True
context = {
"form_pw": form_pw,
"success": success,
"form_errors": form_errors
}
return render(request, "usuario/forms/AlterarSenha.html", context)
@login_required(login_url="/admin")
def update_user_form(request):
""" Página para atualizar as infos de login do usuário """
success = request.GET.get('success', False)
if request.method == 'POST':
form_user_info = UpdateInfoForm(request.POST, instance=request.user)
if form_user_info.is_valid():
user = form_user_info.save()
update_session_auth_hash(request, user)
url = str(request.path_info) + str('?success=True')
return HttpResponseRedirect(url)
else:
form_user_info = UpdateInfoForm(instance=request.user)
# Checa se existe algum erro para ativar no template
form_errors = False
for field in form_user_info:
if field.errors:
form_errors = True
context = {
"form_user_info": form_user_info,
"form_errors": form_errors,
"success": success
}
return render(request, "usuario/forms/AlterarInfo.html", context)
@login_required(login_url="/admin")
def user_main(request):
""" Página do usuário """
usuario = request.user
context = {
"user_usuario": usuario.get_username(),
"user_name": usuario.get_full_name(),
"user_email": usuario.email
}
return render(request, "usuario/user_main.html", context)
| python |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
from terminals import MTerminal
from sys import argv
"""Summary of module 'launcher' here.
This is a entry of entire laserjet program
class Launcher launches 'Batch' task or 'Play' task depends on options.
- Batch options (e.g.:'-exec', '-sync', '-fetch', '-inspect') which accomplish
tasks on every remote nodes.
- Play options (e.g.: '-deploy') which conditionally execute actions following
a playbook.
"""
__version__ = "0.1"
__author__ = "yyg"
__all__ = []
# Exceptions
class NoneOptionException(Exception):
"""Exception raised by Launcher._get_option()."""
pass
class WrongOptionException(Exception):
"""Exception raise by Launcher._get_option()"""
pass
class Launcher(object):
def __init__(self):
self.laserjet_terminal = MTerminal()
def _get_option(self):
pass
| python |
# -*- test-case-name: admin.test.test_packaging -*-
# Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Helper utilities for Flocker packaging.
"""
import platform
import sys
import os
from subprocess import check_output, check_call, CalledProcessError, call
from tempfile import mkdtemp
from textwrap import dedent, fill
from eliot import Logger, start_action, to_file
from twisted.python.constants import ValueConstant, Values
from twisted.python.filepath import FilePath
from twisted.python import usage, log
from characteristic import attributes, Attribute
import virtualenv
from flocker.common.version import make_rpm_version
class PackageTypes(Values):
"""
Constants representing supported target packaging formats.
"""
RPM = ValueConstant('rpm')
DEB = ValueConstant('deb')
# Associate package formats with platform operating systems.
PACKAGE_TYPE_MAP = {
PackageTypes.RPM: ('centos',),
PackageTypes.DEB: ('ubuntu',),
}
PACKAGE_NAME_FORMAT = {
PackageTypes.RPM: '{}-{}-{}.{}.rpm',
PackageTypes.DEB: '{}_{}-{}_{}.deb',
}
ARCH = {
'all': {
PackageTypes.RPM: 'noarch',
PackageTypes.DEB: 'all',
},
'native': { # HACK
PackageTypes.RPM: 'x86_64',
PackageTypes.DEB: 'amd64',
},
}
# Path from the root of the source tree to the directory holding possible build
# targets. A build target is a directory containing a Dockerfile.
BUILD_TARGETS_SEGMENTS = [b"admin", b"build_targets"]
PACKAGE_ARCHITECTURE = {
'clusterhq-flocker-cli': 'all',
'clusterhq-flocker-node': 'all',
'clusterhq-flocker-docker-plugin': 'all',
'clusterhq-python-flocker': 'native',
}
def package_filename(package_type, package, architecture, rpm_version):
package_name_format = PACKAGE_NAME_FORMAT[package_type]
return package_name_format.format(
package, rpm_version.version,
rpm_version.release, ARCH[architecture][package_type])
@attributes(['name', 'version'])
class Distribution(object):
"""
A linux distribution.
:ivar bytes name: The name of the distribution.
:ivar bytes version: The version of the distribution.
"""
@classmethod
def _get_current_distribution(klass):
"""
:return: A ``Distribution`` representing the current platform.
"""
name, version, _ = (
platform.linux_distribution(full_distribution_name=False))
return klass(name=name.lower(), version=version)
def package_type(self):
distribution_name = self.name.lower()
for package_type, distribution_names in PACKAGE_TYPE_MAP.items():
if distribution_name.lower() in distribution_names:
return package_type
else:
raise ValueError("Unknown distribution.", distribution_name)
def native_package_architecture(self):
"""
:return: The ``bytes`` representing the native package architecture for
this distribution.
"""
return ARCH['native'][self.package_type()]
DISTRIBUTION_NAME_MAP = {
'centos-7': Distribution(name="centos", version="7"),
'ubuntu-14.04': Distribution(name="ubuntu", version="14.04"),
'ubuntu-16.04': Distribution(name="ubuntu", version="16.04"),
}
CURRENT_DISTRIBUTION = Distribution._get_current_distribution()
def _native_package_type():
"""
:return: The ``bytes`` name of the native package format for this platform.
"""
distribution_name = CURRENT_DISTRIBUTION.name.lower()
for package_type, distribution_names in PACKAGE_TYPE_MAP.items():
if distribution_name.lower() in distribution_names:
return package_type
else:
raise ValueError("Unknown distribution.", distribution_name)
@attributes(['steps'])
class BuildSequence(object):
"""
Run the supplied ``steps`` consecutively.
:ivar tuple steps: A sequence of steps.
"""
logger = Logger()
_system = u"packaging:buildsequence:run"
def run(self):
for step in self.steps:
with start_action(self.logger, self._system, step=repr(step)):
step.run()
def run_command(args, added_env=None, cwd=None):
"""
Run a subprocess and return its output. The command line and its
environment are logged for debugging purposes.
:param dict env: Addtional environment variables to pass.
:return: The output of the command.
"""
log.msg(
format="Running %(args)r with environment %(env)r "
"and working directory %(cwd)s",
args=args, env=added_env, cwd=cwd)
if added_env:
env = os.environ.copy()
env.update(env)
else:
env = None
try:
return check_output(args=args, env=env, cwd=cwd,)
except CalledProcessError as e:
print e.output
@attributes([
Attribute('package'),
Attribute('compare', default_value=None),
Attribute('version', default_value=None)])
class Dependency(object):
"""
A package dependency.
:ivar bytes package: The name of the dependency package.
:ivar bytes compare: The operator to use when comparing required and
available versions of the dependency package.
:ivar bytes version: The version of the dependency package.
"""
def __init__(self):
"""
:raises ValueError: If ``compare`` and ``version`` values are not
compatible.
"""
if (self.compare is None) != (self.version is None):
raise ValueError(
"Must specify both or neither compare and version.")
def format(self, package_type):
"""
:return: A ``bytes`` representation of the desired version comparison
which can be parsed by the package management tools associated with
``package_type``.
:raises: ``ValueError`` if supplied with an unrecognised
``package_type``.
"""
if package_type == PackageTypes.DEB:
if self.version:
return "%s (%s %s)" % (
self.package, self.compare, self.version)
else:
return self.package
elif package_type == PackageTypes.RPM:
if self.version:
return "%s %s %s" % (self.package, self.compare, self.version)
else:
return self.package
else:
raise ValueError("Unknown package type.")
# We generate four packages. ``clusterhq-python-flocker`` contains the
# entire code base. ``clusterhq-flocker-cli``,
# ``clusterhq-flocker-docker-plugin`` and ``clusterhq-flocker-node`` are
# meta packages which symlink only the relevant scripts and load only the
# dependencies required to satisfy those scripts. This map represents the
# dependencies for each of those packages and accounts for differing
# dependency package names and versions on various platforms.
DEPENDENCIES = {
'python': {
'centos': (
Dependency(package='python'),
),
'ubuntu': (
Dependency(package='python2.7'),
),
},
'node': {
'centos': (
Dependency(package='/usr/sbin/iptables'),
Dependency(package='openssh-clients'),
Dependency(package='lshw'),
),
'ubuntu': (
Dependency(package='iptables'),
Dependency(package='openssh-client'),
Dependency(package='lshw'),
),
},
# For now the plan is to tell users to install Docker themselves,
# since packaging is still in flux, with different packages from
# vendor and OS:
'docker-plugin': {
'centos': (),
'ubuntu': (),
},
'cli': {
'centos': (
Dependency(package='openssh-clients'),
),
'ubuntu': (
Dependency(package='openssh-client'),
),
},
}
def make_dependencies(package_name, package_version, distribution):
"""
Add the supplied version of ``python-flocker`` to the base dependency lists
defined in ``DEPENDENCIES``.
:param bytes package_name: The name of the flocker package to generate
dependencies for.
:param bytes package_version: The flocker version.
:param Distribution distribution: The distribution for which to
generate dependencies.
:return: A list of ``Dependency`` instances.
"""
dependencies = DEPENDENCIES[package_name][distribution.name]
if package_name in ('node', 'cli', 'docker-plugin'):
dependencies += (
Dependency(
package='clusterhq-python-flocker',
compare='=',
version=package_version),)
return dependencies
def create_virtualenv(root):
"""
Create a virtualenv in ``root``.
:param FilePath root: The directory in which to install a virtualenv.
:returns: A ``VirtualEnv`` instance.
"""
# We call ``virtualenv`` as a subprocess rather than as a library, so that
# we can turn off Python byte code compilation.
run_command(
['virtualenv', '--python=/usr/bin/python2.7', '--quiet', root.path],
added_env=dict(PYTHONDONTWRITEBYTECODE='1')
)
# XXX: Virtualenv doesn't link to pyc files when copying its bootstrap
# modules. See https://github.com/pypa/virtualenv/issues/659
for module_name in virtualenv.REQUIRED_MODULES:
py_base = root.descendant(
['lib', 'python2.7', module_name])
py = py_base.siblingExtension('.py')
if py.exists() and py.islink():
pyc = py_base.siblingExtension('.pyc')
py_target = py.realpath()
pyc_target = FilePath(
py_target.splitext()[0]).siblingExtension('.pyc')
if pyc.exists():
pyc.remove()
if pyc_target.exists():
pyc_target.linkTo(pyc)
return VirtualEnv(root=root)
@attributes(['virtualenv'])
class InstallVirtualEnv(object):
"""
Install a virtualenv in the supplied ``target_path``.
:ivar FilePath target_path: The path to a directory in which to create the
virtualenv.
"""
_create_virtualenv = staticmethod(create_virtualenv)
def run(self):
self._create_virtualenv(root=self.virtualenv.root)
@attributes(['name', 'version'])
class PythonPackage(object):
"""
A model representing a single pip installable Python package.
:ivar bytes name: The name of the package.
:ivar bytes version: The version of the package.
"""
@attributes(['root'])
class VirtualEnv(object):
"""
A model representing a virtualenv directory.
"""
def install(self, package_uri):
"""
Install package and its dependencies into this virtualenv.
"""
# We can't just call pip directly, because in the virtualenvs created
# in tests, the shebang line becomes too long and triggers an
# error. See http://www.in-ulm.de/~mascheck/various/shebang/#errors
python_path = self.root.child('bin').child('python').path
run_command(
[python_path, '-m', 'pip', '--quiet', 'install', package_uri],
)
@attributes(['virtualenv', 'package_uri'])
class InstallApplication(object):
"""
Install the supplied ``package_uri`` using the supplied ``virtualenv``.
:ivar VirtualEnv virtualenv: The virtual environment in which to install
``package``.
:ivar bytes package_uri: A pip compatible URI.
"""
def run(self):
self.virtualenv.install(self.package_uri)
@attributes(['links'])
class CreateLinks(object):
"""
Create symlinks to the files in ``links``.
"""
def run(self):
"""
If link is a directory, the target filename will be used as the link
name within that directory.
"""
for target, link in self.links:
if link.isdir():
name = link.child(target.basename())
else:
name = link
target.linkTo(name)
@attributes(['virtualenv', 'package_name'])
class GetPackageVersion(object):
"""
Record the version of ``package_name`` installed in ``virtualenv_path`` by
examining ``<package_name>.__version__``.
:ivar VirtualEnv virtualenv: The ``virtualenv`` containing the package.
:ivar bytes package_name: The name of the package whose version will be
recorded.
:ivar version: The version string of the supplied package. Default is
``None`` until the step has been run. or if the supplied
:raises: If ``package_name`` is not found.
"""
version = None
def run(self):
python_path = self.virtualenv.root.child('bin').child('python').path
output = check_output(
[python_path,
'-c', '; '.join([
'from sys import stdout',
'stdout.write(__import__(%r).__version__)' % self.package_name
])])
self.version = output
@attributes([
'package_type', 'destination_path', 'source_paths', 'name', 'prefix',
'epoch', 'rpm_version', 'license', 'url', 'vendor', 'maintainer',
'architecture', 'description', 'dependencies', 'category',
Attribute('directories', default_factory=list),
Attribute('after_install', default_value=None),
])
class BuildPackage(object):
"""
Use ``fpm`` to build an RPM file from the supplied ``source_path``.
:ivar package_type: A package type constant from ``PackageTypes``.
:ivar FilePath destination_path: The path in which to save the resulting
RPM package file.
:ivar dict source_paths: A dictionary mapping paths in the filesystem to
the path in the package.
:ivar bytes name: The name of the package.
:ivar FilePath prefix: The path beneath which the packaged files will be
installed.
:ivar bytes epoch: An integer string tag used to help RPM determine version
number ordering.
:ivar rpm_version rpm_version: An object representing an RPM style version
containing a release and a version attribute.
:ivar bytes license: The name of the license under which this package is
released.
:ivar bytes url: The URL of the source of this package.
:ivar unicode vendor: The name of the package vendor.
:ivar bytes maintainer: The email address of the package maintainer.
:ivar bytes architecture: The OS architecture for which this package is
targeted. Default ``None`` means architecture independent.
:ivar unicode description: A description of the package.
:ivar unicode category: The category of the package.
:ivar list dependencies: The list of dependencies of the package.
:ivar list directories: List of directories the package should own.
"""
def run(self):
architecture = self.architecture
command = [
'fpm',
'--force',
'-s', 'dir',
'-t', self.package_type.value,
'--package', self.destination_path.path,
'--name', self.name,
'--prefix', self.prefix.path,
'--version', self.rpm_version.version,
'--iteration', self.rpm_version.release,
'--license', self.license,
'--url', self.url,
'--vendor', self.vendor,
'--maintainer', self.maintainer,
'--architecture', architecture,
'--description', self.description,
'--category', self.category,
]
if not (self.package_type is PackageTypes.DEB and self.epoch == '0'):
# Leave epoch unset for deb's with epoch 0
command.extend(['--epoch', self.epoch])
for requirement in self.dependencies:
command.extend(
['--depends', requirement.format(self.package_type)])
for directory in self.directories:
command.extend(
['--directories', directory.path])
if self.after_install is not None:
command.extend(
['--after-install', self.after_install.path])
for source_path, package_path in self.source_paths.items():
# Think of /= as a separate operator. It causes fpm to copy the
# content of the directory rather than the directory its self.
command.append(
"%s/=%s" % (source_path.path, package_path.path))
run_command(command)
@attributes(['package_version_step'])
class DelayedRpmVersion(object):
"""
Pretend to be an ``rpm_version`` instance providing a ``version`` and
``release`` attribute.
The values of these attributes will be calculated from the Python version
string read from a previous ``GetPackageVersion`` build step.
:ivar GetPackageVersion package_version_step: An instance of
``GetPackageVersion`` whose ``run`` method will have been called and
from which the version string will be read.
"""
_rpm_version = None
@property
def rpm_version(self):
"""
:return: An ``rpm_version`` and cache it.
"""
if self._rpm_version is None:
self._rpm_version = make_rpm_version(
self.package_version_step.version
)
return self._rpm_version
@property
def version(self):
"""
:return: The ``version`` string.
"""
return self.rpm_version.version
@property
def release(self):
"""
:return: The ``release`` string.
"""
return self.rpm_version.release
def __str__(self):
return self.rpm_version.version + '-' + self.rpm_version.release
IGNORED_WARNINGS = {
PackageTypes.RPM: (
# Ignore the summary line rpmlint prints.
# We always check a single package, so we can hardcode the numbers.
'1 packages and 0 specfiles checked;',
# This isn't an distribution package so we deliberately install in /opt
'dir-or-file-in-opt',
# We don't care enough to fix this
'python-bytecode-inconsistent-mtime',
# /opt/flocker/lib/python2.7/no-global-site-packages.txt will be empty.
'zero-length',
# cli/node packages have symlink to base package
'dangling-symlink',
# Should be fixed
'no-documentation',
'no-manual-page-for-binary',
# changelogs are elsewhere
'no-changelogname-tag',
# virtualenv's interpreter is correct.
'wrong-script-interpreter',
# rpmlint on CentOS 7 doesn't see python in the virtualenv.
'no-binary',
# These are in our dependencies.
'incorrect-fsf-address',
'pem-certificate',
'non-executable-script',
'devel-file-in-non-devel-package',
'unstripped-binary-or-object',
# Firewall and systemd configuration live in /usr/lib
'only-non-binary-in-usr-lib',
# We don't allow configuring ufw firewall applications.
'non-conffile-in-etc /etc/ufw/applications.d/flocker-control',
# Upstart control files are not installed as conffiles.
'non-conffile-in-etc /etc/init/flocker-dataset-agent.conf',
'non-conffile-in-etc /etc/init/flocker-container-agent.conf',
'non-conffile-in-etc /etc/init/flocker-control.conf',
'non-conffile-in-etc /etc/init/flocker-docker-plugin.conf',
# rsyslog files are not installed as conffiles.
'non-conffile-in-etc /etc/rsyslog.d/flocker.conf',
# Cryptography hazmat bindings
'package-installs-python-pycache-dir opt/flocker/lib/python2.7/site-packages/cryptography/hazmat/bindings/__pycache__/', # noqa
# /opt/flocker/lib/python2.7/site-packages/sphinx/locale/.tx
'hidden-file-or-dir',
# /opt/flocker/lib/python2.7/site-packages/pbr/tests/testpackage/doc/source/conf.py
'script-without-shebang',
# E.g.
# /opt/flocker/lib/python2.7/site-packages/sphinx/locale/bn/LC_MESSAGES/sphinx.mo
'file-not-in-%lang',
# Twisted 16.6 includes an executable C source file.
# https://twistedmatrix.com/trac/ticket/8921
'spurious-executable-perm /opt/flocker/lib/python2.7/site-packages/twisted/internet/iocpreactor/iocpsupport/iocpsupport.c', # noqa
),
# See https://www.debian.org/doc/manuals/developers-reference/tools.html#lintian # noqa
PackageTypes.DEB: (
# This isn't an distribution package so we deliberately install in /opt
'dir-or-file-in-opt',
# This isn't a distribution package, so the precise details of the
# distro portion of the version don't need to be followed.
'debian-revision-not-well-formed',
# virtualenv's interpreter is correct.
'wrong-path-for-interpreter',
# Virtualenv creates symlinks for local/{bin,include,lib}. Ignore them.
'symlink-should-be-relative',
# We depend on python2.7 which depends on libc
'missing-dependency-on-libc',
# We are installing in a virtualenv, so we can't easily use debian's
# bytecompiling infrastructure. It doesn't provide any benefit, either.
'package-installs-python-bytecode',
# https://github.com/jordansissel/fpm/issues/833
('file-missing-in-md5sums '
'usr/share/doc/'),
# lintian expects python dep for .../python shebang lines.
# We are in a virtualenv that points at python2.7 explictly and has
# that dependency.
'python-script-but-no-python-dep',
# Should be fixed
'binary-without-manpage',
'no-copyright-file',
# These are in our dependencies.
'script-not-executable',
'embedded-javascript-library',
'extra-license-file',
'unstripped-binary-or-object',
# Werkzeug installs various images with executable permissions.
# https://github.com/mitsuhiko/werkzeug/issues/629
# Fixed upstream, but not released.
'executable-not-elf-or-script',
# libffi installs shared libraries with executable bit setContent
# '14:59:26 E: clusterhq-python-flocker: shlib-with-executable-bit
# opt/flocker/lib/python2.7/site-packages/.libs_cffi_backend/libffi-72499c49.so.6.0.4
'shlib-with-executable-bit',
# Our omnibus packages are never going to be used by upstream so
# there's no bug to close.
# https://lintian.debian.org/tags/new-package-should-close-itp-bug.html
'new-package-should-close-itp-bug',
# We don't allow configuring ufw firewall applications.
('file-in-etc-not-marked-as-conffile '
'etc/ufw/applications.d/flocker-control'),
# Upstart control files are not installed as conffiles.
'file-in-etc-not-marked-as-conffile etc/init/flocker-dataset-agent.conf', # noqa
'file-in-etc-not-marked-as-conffile etc/init/flocker-container-agent.conf', # noqa
'file-in-etc-not-marked-as-conffile etc/init/flocker-control.conf',
'file-in-etc-not-marked-as-conffile etc/init/flocker-docker-plugin.conf', # noqa
# rsyslog files are not installed as conffiles.
'file-in-etc-not-marked-as-conffile etc/rsyslog.d/flocker.conf',
# Cryptography hazmat bindings
'package-installs-python-pycache-dir opt/flocker/lib/python2.7/site-packages/cryptography/hazmat/bindings/__pycache__/', # noqa
# files included by netaddr - we put the whole python we need in the
# flocker package, and lint complains. See:
# https://lintian.debian.org/tags/package-installs-ieee-data.html
"package-installs-ieee-data opt/flocker/lib/python2.7/site-packages/"
"netaddr/eui/iab.idx",
"package-installs-ieee-data opt/flocker/lib/python2.7/site-packages/"
"netaddr/eui/iab.txt",
"package-installs-ieee-data opt/flocker/lib/python2.7/site-packages/"
"netaddr/eui/oui.idx",
"package-installs-ieee-data opt/flocker/lib/python2.7/site-packages/"
"netaddr/eui/oui.txt",
"package-contains-timestamped-gzip",
"systemd-service-file-outside-lib",
# The binaries in ManyLinux wheel files are not compiled using Debian
# compile flags especially those related to hardening:
# https://wiki.debian.org/Hardening
# These are important security precautions which we should enforce in
# our packages.
# Remove this once binary wheel files have been hardened upstream or
# alternatively consider compiling from source rather than installing
# wheels from PyPI:
# https://github.com/pypa/manylinux/issues/59
"hardening-no-relro",
# Ubuntu Wily lintian complains about missing changelog.
# https://lintian.debian.org/tags/debian-changelog-file-missing-or-wrong-name.html
"debian-changelog-file-missing-or-wrong-name",
# The alabaster package contains some Google AdSense bugs.
# https://lintian.debian.org/tags/privacy-breach-google-adsense.html
"privacy-breach-google-adsense",
# Only occurs when building locally
"non-standard-dir-perm",
"non-standard-file-perm",
# Sphinx 1.5.1 contains various untracked files.
# https://github.com/sphinx-doc/sphinx/issues/3256
"macos-ds-store-file-in-package opt/flocker/lib/python2.7/site-packages/sphinx/locale/.DS_Store", # noqa
),
}
@attributes([
'package_type',
'destination_path',
'epoch',
'rpm_version',
'package',
'architecture',
])
class LintPackage(object):
"""
Run package linting tool against a package and fail if there are any errors
or warnings that aren't whitelisted.
"""
output = sys.stdout
@staticmethod
def check_lint_output(warnings, ignored_warnings):
"""
Filter the output of a linting tool against a list of ignored
warnings.
:param list warnings: List of warnings produced.
:param list ignored_warnings: List of warnings to ignore. A warning is
ignored it it has a substring matching something in this list.
"""
unacceptable = []
for warning in warnings:
# Ignore certain warning lines
for ignored in ignored_warnings:
if ignored in warning:
break
else:
unacceptable.append(warning)
return unacceptable
def run(self):
filename = package_filename(
package_type=self.package_type,
package=self.package, rpm_version=self.rpm_version,
architecture=self.architecture)
output_file = self.destination_path.child(filename)
try:
check_output([
{
PackageTypes.RPM: 'rpmlint',
PackageTypes.DEB: 'lintian',
}[self.package_type],
output_file.path,
])
except CalledProcessError as e:
results = self.check_lint_output(
warnings=e.output.splitlines(),
ignored_warnings=IGNORED_WARNINGS[self.package_type],
)
if results:
self.output.write("Package errors (%s):\n" % (self.package))
self.output.write('\n'.join(results) + "\n")
raise SystemExit(1)
class PACKAGE(Values):
"""
Constants for ClusterHQ specific metadata that we add to all three
packages.
"""
EPOCH = ValueConstant(b'0')
LICENSE = ValueConstant(b'ASL 2.0')
URL = ValueConstant(b'https://clusterhq.com')
VENDOR = ValueConstant(b'ClusterHQ')
MAINTAINER = ValueConstant(b'ClusterHQ <[email protected]>')
class PACKAGE_PYTHON(PACKAGE):
DESCRIPTION = ValueConstant(
'Flocker: a container data volume manager for your ' +
'Dockerized applications\n' +
fill('This is the base package of scripts and libraries.', 79)
)
class PACKAGE_CLI(PACKAGE):
DESCRIPTION = ValueConstant(
'Flocker: a container data volume manager for your' +
' Dockerized applications\n' +
fill('This meta-package contains links to the Flocker client '
'utilities, and has only the dependencies required to run '
'those tools', 79)
)
class PACKAGE_NODE(PACKAGE):
DESCRIPTION = ValueConstant(
'Flocker: a container data volume manager for your' +
' Dockerized applications\n' +
fill('This meta-package contains links to the Flocker node '
'utilities, and has only the dependencies required to run '
'those tools', 79)
)
class PACKAGE_DOCKER_PLUGIN(PACKAGE):
DESCRIPTION = ValueConstant(
'Flocker volume plugin for Docker\n' +
fill('This meta-package contains links to the Flocker Docker plugin',
79)
)
def omnibus_package_builder(
distribution, destination_path, package_uri,
package_files, target_dir=None):
"""
Build a sequence of build steps which when run will generate a package in
``destination_path``, containing the package installed from ``package_uri``
and all its dependencies.
The steps are:
* Create a virtualenv with ``--system-site-packages`` which allows certain
python libraries to be supplied by the operating system.
* Install Flocker and all its dependencies in the virtualenv.
* Find the version of the installed Flocker package, as reported by
``pip``.
* Build an RPM from the virtualenv directory using ``fpm``.
:param package_type: A package type constant from ``PackageTypes``.
:param FilePath destination_path: The path to a directory in which to save
the resulting RPM file.
:param Package package: A ``Package`` instance with a ``pip install``
compatible package URI.
:param FilePath package_files: Directory containg system-level files
to be installed with packages.
:param FilePath target_dir: An optional path in which to create the
virtualenv from which the package will be generated. Default is a
temporary directory created using ``mkdtemp``.
:return: A ``BuildSequence`` instance containing all the required build
steps.
"""
if target_dir is None:
target_dir = FilePath(mkdtemp())
flocker_shared_path = target_dir.child('flocker-shared')
flocker_shared_path.makedirs()
flocker_cli_path = target_dir.child('flocker-cli')
flocker_cli_path.makedirs()
flocker_node_path = target_dir.child('flocker-node')
flocker_node_path.makedirs()
flocker_docker_plugin_path = target_dir.child('flocker-docker-plugin')
flocker_docker_plugin_path.makedirs()
empty_path = target_dir.child('empty')
empty_path.makedirs()
# Flocker is installed in /opt.
# See http://fedoraproject.org/wiki/Packaging:Guidelines#Limited_usage_of_.2Fopt.2C_.2Fetc.2Fopt.2C_and_.2Fvar.2Fopt # noqa
virtualenv_dir = FilePath('/opt/flocker')
virtualenv = VirtualEnv(root=virtualenv_dir)
get_package_version_step = GetPackageVersion(
virtualenv=virtualenv, package_name='flocker')
rpm_version = DelayedRpmVersion(
package_version_step=get_package_version_step)
category = {
PackageTypes.RPM: 'Applications/System',
PackageTypes.DEB: 'admin',
}[distribution.package_type()]
return BuildSequence(
steps=(
InstallVirtualEnv(virtualenv=virtualenv),
InstallApplication(
virtualenv=virtualenv,
package_uri='-r/flocker/requirements/flocker.txt'
),
InstallApplication(virtualenv=virtualenv,
package_uri=package_uri),
# get_package_version_step must be run before steps that reference
# rpm_version
get_package_version_step,
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/eliot-prettyprint'),
flocker_shared_path),
(FilePath('/opt/flocker/bin/eliot-tree'),
flocker_shared_path),
],
),
BuildPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
source_paths={virtualenv_dir: virtualenv_dir,
flocker_shared_path: FilePath("/usr/bin")},
name='clusterhq-python-flocker',
prefix=FilePath('/'),
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
license=PACKAGE.LICENSE.value,
url=PACKAGE.URL.value,
vendor=PACKAGE.VENDOR.value,
maintainer=PACKAGE.MAINTAINER.value,
architecture=PACKAGE_ARCHITECTURE['clusterhq-python-flocker'],
description=PACKAGE_PYTHON.DESCRIPTION.value,
category=category,
dependencies=make_dependencies(
'python', rpm_version, distribution),
directories=[virtualenv_dir],
),
LintPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
package='clusterhq-python-flocker',
architecture=PACKAGE_ARCHITECTURE['clusterhq-python-flocker'],
),
# flocker-cli steps
# First, link command-line tools that should be available. If you
# change this you may also want to change entry_points in setup.py.
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker'),
flocker_cli_path),
(FilePath('/opt/flocker/bin/flocker-ca'),
flocker_cli_path),
]
),
BuildPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
source_paths={flocker_cli_path: FilePath("/usr/bin")},
name='clusterhq-flocker-cli',
prefix=FilePath('/'),
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
license=PACKAGE.LICENSE.value,
url=PACKAGE.URL.value,
vendor=PACKAGE.VENDOR.value,
maintainer=PACKAGE.MAINTAINER.value,
architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-cli'],
description=PACKAGE_CLI.DESCRIPTION.value,
category=category,
dependencies=make_dependencies(
'cli', rpm_version, distribution),
),
LintPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
package='clusterhq-flocker-cli',
architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-cli'],
),
# flocker-node steps
# First, link command-line tools that should be available. If you
# change this you may also want to change entry_points in setup.py.
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-volume'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-control'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-container-agent'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-dataset-agent'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-diagnostics'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-benchmark'),
flocker_node_path),
(FilePath('/opt/flocker/bin/flocker-node-era'),
flocker_node_path),
]
),
BuildPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
source_paths={
flocker_node_path: FilePath("/usr/sbin"),
# CentOS firewall configuration
package_files.child('firewalld-services'):
FilePath("/usr/lib/firewalld/services/"),
# Ubuntu firewall configuration
package_files.child('ufw-applications.d'):
FilePath("/etc/ufw/applications.d/"),
# SystemD configuration
package_files.child('systemd'):
FilePath('/usr/lib/systemd/system'),
# Upstart configuration
package_files.child('upstart'):
FilePath('/etc/init'),
# rsyslog configuration
package_files.child(b'rsyslog'):
FilePath(b'/etc/rsyslog.d'),
# Flocker Control State dir
empty_path: FilePath('/var/lib/flocker/'),
},
name='clusterhq-flocker-node',
prefix=FilePath('/'),
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
license=PACKAGE.LICENSE.value,
url=PACKAGE.URL.value,
vendor=PACKAGE.VENDOR.value,
maintainer=PACKAGE.MAINTAINER.value,
architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-node'],
description=PACKAGE_NODE.DESCRIPTION.value,
category=category,
dependencies=make_dependencies(
'node', rpm_version, distribution),
after_install=package_files.child('after-install.sh'),
directories=[FilePath('/var/lib/flocker/')],
),
LintPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
package='clusterhq-flocker-node',
architecture=PACKAGE_ARCHITECTURE['clusterhq-flocker-node'],
),
# flocker-docker-plugin steps
# First, link command-line tools that should be available. If you
# change this you may also want to change entry_points in setup.py.
CreateLinks(
links=[
(FilePath('/opt/flocker/bin/flocker-docker-plugin'),
flocker_docker_plugin_path),
]
),
BuildPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
source_paths={
flocker_docker_plugin_path: FilePath("/usr/sbin"),
# SystemD configuration
package_files.child('docker-plugin').child('systemd'):
FilePath('/usr/lib/systemd/system'),
# Upstart configuration
package_files.child('docker-plugin').child('upstart'):
FilePath('/etc/init'),
},
name='clusterhq-flocker-docker-plugin',
prefix=FilePath('/'),
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
license=PACKAGE.LICENSE.value,
url=PACKAGE.URL.value,
vendor=PACKAGE.VENDOR.value,
maintainer=PACKAGE.MAINTAINER.value,
architecture=PACKAGE_ARCHITECTURE[
'clusterhq-flocker-docker-plugin'],
description=PACKAGE_DOCKER_PLUGIN.DESCRIPTION.value,
category=category,
dependencies=make_dependencies(
'docker-plugin', rpm_version, distribution),
),
LintPackage(
package_type=distribution.package_type(),
destination_path=destination_path,
epoch=PACKAGE.EPOCH.value,
rpm_version=rpm_version,
package='clusterhq-flocker-docker-plugin',
architecture=PACKAGE_ARCHITECTURE[
'clusterhq-flocker-docker-plugin'],
),
)
)
@attributes(['tag', 'build_directory'])
class DockerBuild(object):
"""
Build a docker image and tag it.
:ivar bytes tag: The tag name which will be assigned to the generated
docker image.
:ivar FilePath build_directory: The directory containing the ``Dockerfile``
to build.
"""
def run(self):
check_call(
['docker', 'build',
'--pull', '--tag', self.tag,
self.build_directory.path])
@attributes(['tag', 'volumes', 'command'])
class DockerRun(object):
"""
Run a docker image with the supplied volumes and command line arguments.
:ivar bytes tag: The tag name of the image to run.
:ivar dict volumes: A dict mapping ``FilePath`` container path to
``FilePath`` host path for each docker volume.
:ivar list command: The command line arguments which will be supplied to
the docker image entry point.
"""
def run(self):
volume_options = []
for container, host in self.volumes.iteritems():
volume_options.extend(
['--volume', '%s:%s' % (host.path, container.path)])
result = call(
['docker', 'run', '--rm'] +
volume_options + [self.tag] + self.command)
if result:
raise SystemExit(result)
def available_distributions(flocker_source_path):
"""
Determine the distributions for which packages can be built.
:param FilePath flocker_source_path: The top-level directory of a Flocker
source checkout. Distributions will be inferred from the build targets
available in this checkout.
:return: A ``set`` of ``bytes`` giving distribution names which can be
used with ``build_in_docker`` (and therefore with the
``--distribution`` command line option of ``build-package``).
"""
return set(
path.basename()
for path
in flocker_source_path.descendant(BUILD_TARGETS_SEGMENTS).children()
if path.isdir() and path.child(b"Dockerfile").exists()
)
def build_in_docker(destination_path, distribution, top_level, package_uri):
"""
Build a flocker package for a given ``distribution`` inside a clean docker
container of that ``distribution``.
:param FilePath destination_path: The directory to which the generated
packages will be copied.
:param bytes distribution: The distribution name for which to build a
package.
:param FilePath top_level: The Flocker source code directory.
:param bytes package_uri: The ``pip`` style python package URI to install.
"""
if destination_path.exists() and not destination_path.isdir():
raise ValueError("go away")
volumes = {
FilePath('/output'): destination_path,
FilePath('/flocker'): top_level,
}
# Special case to allow building the currently checked out Flocker code.
if package_uri == top_level.path:
package_uri = '/flocker'
tag = "clusterhq/build-%s" % (distribution,)
build_targets_directory = top_level.descendant(BUILD_TARGETS_SEGMENTS)
build_directory = build_targets_directory.child(distribution)
# The <src> path must be inside the context of the build; you cannot COPY
# ../something /something, because the first step of a docker build is to
# send the context directory (and subdirectories) to the docker daemon.
# To work around this, we copy a shared requirements file into the build
# directory.
requirements_directory = top_level.child('requirements')
requirements_directory.copyTo(
build_directory.child('requirements')
)
return BuildSequence(
steps=[
DockerBuild(
tag=tag,
build_directory=build_directory
),
DockerRun(
tag=tag,
volumes=volumes,
command=[package_uri]
),
])
class DockerBuildOptions(usage.Options):
"""
Command line options for the ``build-package-entrypoint`` tool.
"""
synopsis = 'build-package-entrypoint [options] <package-uri>'
optParameters = [
['destination-path', 'd', '.',
'The path to a directory in which to create package files and '
'artifacts.'],
]
longdesc = dedent("""\
Arguments:
<package-uri>: The Python package url or path to install using ``pip``.
""")
def parseArgs(self, package_uri):
"""
The Python package to install.
"""
self['package-uri'] = package_uri
def postOptions(self):
"""
Coerce paths to ``FilePath``.
"""
self['destination-path'] = FilePath(self['destination-path'])
class DockerBuildScript(object):
"""
Check supplied command line arguments, print command line argument errors
to ``stderr`` otherwise build the RPM package.
:ivar build_command: The function responsible for building the
package. Allows the command to be overridden in tests.
"""
build_command = staticmethod(omnibus_package_builder)
def __init__(self, sys_module=None):
"""
:param sys_module: A ``sys`` like object whose ``argv``, ``stdout`` and
``stderr`` will be used in the script. Can be overridden in tests
to make assertions about the script argument parsing and output
printing. Default is ``sys``.
"""
if sys_module is None:
sys_module = sys
self.sys_module = sys_module
def main(self, top_level=None, base_path=None):
"""
Check command line arguments and run the build steps.
:param FilePath top_level: The top-level of the flocker repository.
:param base_path: ignored.
"""
to_file(self.sys_module.stderr)
options = DockerBuildOptions()
try:
options.parseOptions(self.sys_module.argv[1:])
except usage.UsageError as e:
self.sys_module.stderr.write("%s\n" % (options,))
self.sys_module.stderr.write("%s\n" % (e,))
raise SystemExit(1)
# Currently we add system control files for both EL and Debian-based
# systems. We should probably be more specific. See FLOC-1736.
self.build_command(
distribution=CURRENT_DISTRIBUTION,
destination_path=options['destination-path'],
package_uri=options['package-uri'],
package_files=top_level.descendant(['admin', 'package-files']),
).run()
docker_main = DockerBuildScript().main
class BuildOptions(usage.Options):
"""
Command line options for the ``build-package`` tool.
"""
synopsis = 'build-package [options] <package-uri>'
optParameters = [
['destination-path', 'd', '.',
'The path to a directory in which to create package files and '
'artifacts.'],
['distribution', None, None,
# {} is formatted in __init__
'The target distribution. One of {}'],
]
longdesc = dedent("""\
Arguments:
<package-uri>: The Python package url or path to install using ``pip``.
""")
def __init__(self, distributions):
"""
:param distributions: An iterable of the names of distributions which
are acceptable as values for the ``--distribution`` parameter.
"""
usage.Options.__init__(self)
self.docs["distribution"] = self.docs["distribution"].format(
', '.join(sorted(distributions))
)
def parseArgs(self, package_uri):
"""
The Python package to install.
"""
self['package-uri'] = package_uri
def postOptions(self):
"""
Coerce paths to ``FilePath`` and select a suitable ``native``
``package-type``.
"""
self['destination-path'] = FilePath(self['destination-path'])
if self['distribution'] is None:
raise usage.UsageError('Must specify --distribution.')
class BuildScript(object):
"""
Check supplied command line arguments, print command line argument errors
to ``stderr`` otherwise build the RPM package.
:ivar build_command: The function responsible for building the
package. Allows the command to be overridden in tests.
"""
build_command = staticmethod(build_in_docker)
def __init__(self, sys_module=None):
"""
:param sys_module: A ``sys`` like object whose ``argv``, ``stdout`` and
``stderr`` will be used in the script. Can be overridden in tests
to make assertions about the script argument parsing and output
printing. Default is ``sys``.
"""
if sys_module is None:
sys_module = sys
self.sys_module = sys_module
def main(self, top_level=None, base_path=None):
"""
Check command line arguments and run the build steps.
:param top_level: The path to the root of the checked out flocker
directory.
:param base_path: ignored.
"""
to_file(self.sys_module.stderr)
distributions = available_distributions(top_level)
options = BuildOptions(distributions)
try:
options.parseOptions(self.sys_module.argv[1:])
except usage.UsageError as e:
self.sys_module.stderr.write("%s\n" % (options,))
self.sys_module.stderr.write("%s\n" % (e,))
raise SystemExit(1)
self.build_command(
destination_path=options['destination-path'],
package_uri=options['package-uri'],
top_level=top_level,
distribution=options['distribution'],
).run()
main = BuildScript().main
| python |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class UsageStatistics(Model):
"""Statistics related to pool usage information.
:param start_time: The start time of the time range covered by the
statistics.
:type start_time: datetime
:param last_update_time: The time at which the statistics were last
updated. All statistics are limited to the range between startTime and
lastUpdateTime.
:type last_update_time: datetime
:param dedicated_core_time: The aggregated wall-clock time of the
dedicated compute node cores being part of the pool.
:type dedicated_core_time: timedelta
"""
_validation = {
'start_time': {'required': True},
'last_update_time': {'required': True},
'dedicated_core_time': {'required': True},
}
_attribute_map = {
'start_time': {'key': 'startTime', 'type': 'iso-8601'},
'last_update_time': {'key': 'lastUpdateTime', 'type': 'iso-8601'},
'dedicated_core_time': {'key': 'dedicatedCoreTime', 'type': 'duration'},
}
def __init__(self, start_time, last_update_time, dedicated_core_time):
self.start_time = start_time
self.last_update_time = last_update_time
self.dedicated_core_time = dedicated_core_time
| python |
#!/usr/bin/env python
import getopt, getpass, os, subprocess, sys
import github
def _shell(cmd):
print '$ {}'.format(cmd)
subprocess.check_output(cmd, shell = True)
print
def main(args):
username = None
password = None
fetchGists = False
# Parse options
opts, _ = getopt.getopt(args, 'gau:', ['gists', 'auth', 'user='])
for opt, arg in opts:
if opt in ('-a', '--auth'):
username = raw_input('Username? ')
password = getpass.getpass('Password? ')
elif opt in ('-u', '--user'):
if ':' in arg:
username, password = arg.split(':')
else:
username = arg
elif opt in ('-g', '--gists'):
fetchGists = True
# Exit if no username set
if not username:
print 'Please provide a username with -u or ask for a username prompt with -a.'
sys.exit(0)
# Authenticate if password set
if not password:
API = github.GitHub()
if fetchGists:
repos = API.users(username).gists()
else:
repos = API.users(username).repos()
# Anonymous if no password set
else:
API = github.GitHub(username = username, password = password)
if fetchGists:
repos = API.gists()
else:
repos = API.user().repos()
# Iterate repos and clone
repos = repos.get()
for repo in repos:
if fetchGists:
url = repo.git_pull_url
path = repo.id
else:
url = repo.ssh_url
path = repo.name
# Don't clone if it already exists in this directory
if not os.path.exists(path):
_shell('git clone {}'.format(url))
else:
print '{} exists, aborting clone'.format(path)
if __name__ == '__main__':
main(sys.argv[1:])
| python |
"""CreateAnimalTable Migration."""
from masoniteorm.migrations import Migration
class CreateAnimalTable(Migration):
def up(self):
"""
Run the migrations.
"""
with self.schema.create("animals") as table:
table.increments("id")
table.string("name")
table.string("scientific_name")
table.string("url")
table.string("class")
table.string("lifespan")
table.string("origin")
table.string("fun_fact")
table.timestamps()
def down(self):
"""
Revert the migrations.
"""
self.schema.drop("animals")
| python |
from typing import Dict, Any, Union
import requests
import logging
from requests.exceptions import HTTPError, ConnectionError
from zulip_bots.custom_exceptions import ConfigValidationError
GIPHY_TRANSLATE_API = 'http://api.giphy.com/v1/gifs/translate'
GIPHY_RANDOM_API = 'http://api.giphy.com/v1/gifs/random'
class GiphyHandler:
"""
This plugin posts a GIF in response to the keywords provided by the user.
Images are provided by Giphy, through the public API.
The bot looks for messages starting with @mention of the bot
and responds with a message with the GIF based on provided keywords.
It also responds to private messages.
"""
def usage(self) -> str:
return '''
This plugin allows users to post GIFs provided by Giphy.
Users should preface keywords with the Giphy-bot @mention.
The bot responds also to private messages.
'''
@staticmethod
def validate_config(config_info: Dict[str, str]) -> None:
query = {'s': 'Hello',
'api_key': config_info['key']}
try:
data = requests.get(GIPHY_TRANSLATE_API, params=query)
data.raise_for_status()
except ConnectionError as e:
raise ConfigValidationError(str(e))
except HTTPError as e:
error_message = str(e)
if data.status_code == 403:
error_message += ('This is likely due to an invalid key.\n'
'Follow the instructions in doc.md for setting an API key.')
raise ConfigValidationError(error_message)
def initialize(self, bot_handler: Any) -> None:
self.config_info = bot_handler.get_config_info('giphy')
def handle_message(self, message: Dict[str, str], bot_handler: Any) -> None:
bot_response = get_bot_giphy_response(
message,
bot_handler,
self.config_info
)
bot_handler.send_reply(message, bot_response)
class GiphyNoResultException(Exception):
pass
def get_url_gif_giphy(keyword: str, api_key: str) -> Union[int, str]:
# Return a URL for a Giphy GIF based on keywords given.
# In case of error, e.g. failure to fetch a GIF URL, it will
# return a number.
query = {'api_key': api_key}
if len(keyword) > 0:
query['s'] = keyword
url = GIPHY_TRANSLATE_API
else:
url = GIPHY_RANDOM_API
try:
data = requests.get(url, params=query)
except requests.exceptions.ConnectionError: # Usually triggered by bad connection.
logging.exception('Bad connection')
raise
data.raise_for_status()
try:
gif_url = data.json()['data']['images']['original']['url']
except (TypeError, KeyError): # Usually triggered by no result in Giphy.
raise GiphyNoResultException()
return gif_url
def get_bot_giphy_response(message: Dict[str, str], bot_handler: Any, config_info: Dict[str, str]) -> str:
# Each exception has a specific reply should "gif_url" return a number.
# The bot will post the appropriate message for the error.
keyword = message['content']
try:
gif_url = get_url_gif_giphy(keyword, config_info['key'])
except requests.exceptions.ConnectionError:
return ('Uh oh, sorry :slightly_frowning_face:, I '
'cannot process your request right now. But, '
'let\'s try again later! :grin:')
except GiphyNoResultException:
return ('Sorry, I don\'t have a GIF for "%s"! '
':astonished:' % (keyword))
return ('[Click to enlarge](%s)'
'[](/static/images/interactive-bot/giphy/powered-by-giphy.png)'
% (gif_url))
handler_class = GiphyHandler
| python |
import os
import tbs.logger.log as logger
import tbs.helper.filedescriptor as fd
def checkRoot(message):
"""
Check if the user is root otherwise error out
"""
if os.geteuid() != 0:
logger.log(message, logger.LOG_ERROR)
raise Exception("You need root privileges to do this operation.")
def inCorrectDirectory(subpath="toslive"):
"""
try to check if the current directory is the directory containing the build files
"""
# check if the current repo is correct
result = fd.CMD(["git", "remote", "-v"]).execute()
if not result.exitcode == 0:
logger.log("Something went wrong when scanning the current directory for build files")
raise Exception(result.stderr)
if not "ODEX-TOS/tos-live" in result.stdout:
logger.log("Current directory does not contain build files, downloading files")
return False
result = fd.CMD(["git", "rev-parse", "--show-toplevel"]).execute()
if not result.exitcode == 0:
logger.log("Could not move to the correct location in the repo")
raise Exception(result.stderr)
os.chdir(result.stdout+"/"+subpath)
return True | python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
acq400.py interface to one acq400 appliance instance
- enumerates all site services, available as uut.sX.knob
- simple property interface allows natural "script-like" usage
- eg::
uut1.s0.set_arm = 1
- equivalent to running this on a logged in shell session on the UUT::
set.site1 set_arm=1
- monitors transient status on uut, provides blocking events
- read_channels() - reads all data from channel data service.
Created on Sun Jan 8 12:36:38 2017
@author: pgm
"""
import threading
import re
import os
import errno
import signal
import sys
from . import netclient
import numpy as np
import socket
import timeit
import time
class AcqPorts:
"""server port constants"""
TSTAT = 2235
STREAM = 4210
SITE0 = 4220
SEGSW = 4250
SEGSR = 4251
DPGSTL = 4521
GPGSTL= 4541
GPGDUMP = 4543
WRPG = 4606
BOLO8_CAL = 45072
DATA0 = 53000
MULTI_EVENT_TMP = 53555
MULTI_EVENT_DISK = 53556
LIVETOP = 53998
ONESHOT = 53999
AWG_ONCE = 54201
AWG_AUTOREARM = 54202
MGTDRAM = 53990
MGTDRAM_PULL_DATA = 53991
class AcqSites:
# site service at AcqPorts.SITE0+ AcqSites.SITEi
SITE0 = 0
SITE1 = 1
SITE2 = 2
SITE3 = 3
SITE4 = 4
SITE5 = 5
SITE6 = 6
SITE_CA = 13
SITE_CB = 12
SITE_DSP = 14
class SF:
"""state constants"""
STATE = 0
PRE = 1
POST = 2
ELAPSED = 3
DEMUX = 5
class STATE:
"""transient states"""
IDLE = 0
ARM = 1
RUNPRE = 2
RUNPOST = 3
POPROCESS = 4
CLEANUP = 5
@staticmethod
def str(st):
if st==STATE.IDLE:
return "IDLE"
if st==STATE.ARM:
return "ARM"
if st==STATE.RUNPRE:
return "RUNPRE"
if st==STATE.RUNPOST:
return "RUNPOST"
if st==STATE.POPROCESS:
return "POPROCESS"
if st==STATE.CLEANUP:
return "CLEANUP"
return "UNDEF"
class Signals:
EXT_TRG_DX = 'd0'
INT_TRG_DX = 'd1'
MB_CLK_DX = 'd1'
class StreamClient(netclient.Netclient):
"""handles live streaming data"""
def __init__(self, addr):
print("worktodo")
class RawClient(netclient.Netclient):
""" handles raw data from any service port
"""
def __init__(self, addr, port):
netclient.Netclient.__init__(self, addr, port)
def read(self, nelems, data_size=2, ncols=1, maxbuf=0x400000):
"""read ndata from channel data server, return as np array.
Args:
nelems number of data elements, each data_size*ncols
nelems <=0 :: read until the end
data_size : 2|4 short or int
ncols : optional, to create a 2D array
"""
_dtype = np.dtype('i4' if data_size == 4 else 'i2') # hmm, what if unsigned?
if nelems <= 0:
nelems = 0x80000000 #2GB approximates infinity. what is infinity in python?
bytestogo = nelems * data_size * ncols
total_buf = ""
while bytestogo > 0:
new_buf = self.sock.recv(bytestogo)
if not new_buf:
break # end of file
bytestogo = bytestogo - len(new_buf)
total_buf += new_buf # still dubious of append :-)
return np.frombuffer(total_buf, _dtype)
def get_blocks(self, nelems, data_size=2, ncols=1):
block = np.array([1])
while len(block) > 0:
block = self.read(nelems, data_size=data_size, ncols=ncols)
if len(block) > 0:
yield block
class MgtDramPullClient(RawClient):
def __init__(self, addr):
RawClient.__init__(self, addr, AcqPorts.MGTDRAM_PULL_DATA)
class ChannelClient(netclient.Netclient):
"""handles post shot data for one channel.
Args:
addr (str) : ip address or dns name
ch (int) : channel number 1..N
"""
def __init__(self, addr, ch):
netclient.Netclient.__init__(self, addr, AcqPorts.DATA0+ch)
# on Linux, recv returns on ~mtu
# on Windows, it may buffer up, and it's very slow unless we use a larger buffer
def read(self, ndata, data_size=2, maxbuf=0x400000):
"""read ndata from channel data server, return as np array.
Args:
ndata (int): number of elements
data_size : 2|4 short or int
maxbuf=4096 : max bytes to read per packet
Returns:
np: data array
* TODO buffer +=
this is probably horribly inefficient probably better::
retbuf = np.array(dtype, ndata)
retbuf[cursor].
"""
_dtype = np.dtype('i4' if data_size == 4 else 'i2')
total_buffer = buffer = self.sock.recv(maxbuf)
if int(ndata) == 0 or int(ndata) == -1:
while True:
buffer = self.sock.recv(maxbuf)
if not buffer:
return np.frombuffer(total_buffer, dtype=_dtype, count=-1)
total_buffer += buffer
while len(buffer) < ndata*data_size:
buffer += self.sock.recv(maxbuf)
return np.frombuffer(buffer, dtype=_dtype, count=ndata)
class ExitCommand(Exception):
pass
def signal_handler(signal, frame):
raise ExitCommand()
class Statusmonitor:
""" monitors the status channel
Efficient event-driven monitoring in a separate thread
"""
st_re = re.compile(r"([0-9]) ([0-9]+) ([0-9]+) ([0-9]+) ([0-9])+" )
def __repr__(self):
return repr(self.logclient)
def st_monitor(self):
while self.quit_requested == False:
st = self.logclient.poll()
match = self.st_re.search(st)
# status is a match. need to look at group(0). It's NOT a LIST!
if match:
statuss = match.groups()
status1 = [int(x) for x in statuss]
if self.trace > 1:
print("%s <%s" % (repr(self), status1))
if self.status != None:
# print("Status check %s %s" % (self.status0[0], status[0]))
if self.status[SF.STATE] != 0 and status1[SF.STATE] == 0:
if self.trace:
print("%s STOPPED!" % (self.uut))
self.stopped.set()
self.armed.clear()
# print("status[0] is %d" % (status[0]))
if status1[SF.STATE] == 1:
if self.trace:
print("%s ARMED!" % (self.uut))
self.armed.set()
self.stopped.clear()
if self.status[SF.STATE] == 0 and status1[SF.STATE] > 1:
if self.trace:
print("ERROR: %s skipped ARM %d -> %d" % (self.uut, self.status[0], status1[0]))
self.quit_requested = True
os.kill(self.main_pid, signal.SIGINT)
sys.exit(1)
self.status = status1
elif self.trace > 1:
print("%s <%s>" % (repr(self), st))
def get_state(self):
return self.status[SF.STATE]
def wait_event(self, ev, descr):
# print("wait_%s 02 %d" % (descr, ev.is_set()))
while ev.wait(0.1) == False:
if self.quit_requested:
print("QUIT REQUEST call exit %s" % (descr))
sys.exit(1)
# print("wait_%s 88 %d" % (descr, ev.is_set()))
ev.clear()
# print("wait_%s 99 %d" % (descr, ev.is_set()))
def wait_armed(self):
"""
blocks until uut is ARMED
"""
self.wait_event(self.armed, "armed")
def wait_stopped(self):
"""
blocks until uut is STOPPED
"""
self.wait_event(self.stopped, "stopped")
trace = int(os.getenv("STATUSMONITOR_TRACE", "0"))
def __init__(self, _uut, _status):
self.quit_requested = False
self.trace = Statusmonitor.trace
self.uut = _uut
self.main_pid = os.getpid()
self.status = _status
self.stopped = threading.Event()
self.armed = threading.Event()
self.logclient = netclient.Logclient(_uut, AcqPorts.TSTAT)
self.st_thread = threading.Thread(target=self.st_monitor)
self.st_thread.setDaemon(True)
self.st_thread.start()
class NullFilter:
def __call__ (self, st):
print(st)
null_filter = NullFilter()
class ProcessMonitor:
st_re = re.compile(r"^END" )
def st_monitor(self):
while self.quit_requested == False:
st = self.logclient.poll()
self.output_filter(st)
match = self.st_re.search(st)
if match:
self.quit_requested = True
def __init__(self, _uut, _filter):
self.quit_requested = False
self.output_filter = _filter
self.logclient = netclient.Logclient(_uut, AcqPorts.MGTDRAM)
self.logclient.termex = re.compile("(\n)")
self.st_thread = threading.Thread(target=self.st_monitor)
self.st_thread.setDaemon(True)
self.st_thread.start()
class Acq400:
"""
host-side proxy for Acq400 uut.
discovers and maintains all site servers
maintains a monitor thread on the monitor port
handles multiple channel post shot upload
Args:
_uut (str) : ip-address or dns name
monitor=True (bool) : set false to stub monitor,
useful for tracing on a second connection to an active system.
"""
def init_site_client(self, site):
svc = netclient.Siteclient(self.uut, AcqPorts.SITE0+site)
self.svc["s%d" % site] = svc
self.modules[site] = svc
if self.awg_site == 0 and svc.module_name.startswith("ao"):
self.awg_site = site
self.mod_count += 1
@classmethod
def create_uuts(cls, uut_names):
""" create_uuts(): factory .. create them in parallel
*** Experimental Do Not Use ***
"""
uuts = []
uut_threads = {}
for uname in uut_names:
uut_threads[uname] = \
threading.Thread(\
target=lambda u, l: l.append(cls(u)), \
args=(uname, uuts))
for uname in uut_names:
uut_threads[uname].start()
for t in uut_threads:
uut_threads[t].join(10.0)
return uuts
def __init__(self, _uut, monitor=True):
self.NL = re.compile(r"(\n)")
self.uut = _uut
self.trace = 0
self.save_data = None
self.svc = {}
self.modules = {}
self.mod_count = 0
# channel index from 1,..
self.cal_eslo = [0, ]
self.cal_eoff = [0, ]
self.mb_clk_min = 4000000
s0 = self.svc["s0"] = netclient.Siteclient(self.uut, AcqPorts.SITE0)
sl = s0.SITELIST.split(",")
sl.pop(0)
self.awg_site = 0
site_enumerators = {}
for sm in sl:
site_enumerators[sm] = \
threading.Thread(target=self.init_site_client,\
args=(int(sm.split("=").pop(0)),)\
)
for sm in sl:
site_enumerators[sm].start()
for sm in sl:
# print("join {}".format(site_enumerators[sm]))
site_enumerators[sm].join(10.0)
# init _status so that values are valid even if this Acq400 doesn't run a shot ..
_status = [int(x) for x in s0.state.split(" ")]
if monitor:
self.statmon = Statusmonitor(self.uut, _status)
def __getattr__(self, name):
if self.svc.get(name) != None:
return self.svc.get(name)
else:
msg = "'{0}' object has no attribute '{1}'"
raise AttributeError(msg.format(type(self).__name__, name))
def state(self):
return self.statmon.status[SF.STATE]
def post_samples(self):
return self.statmon.status[SF.POST]
def pre_samples(self):
return self.statmon.status[SF.PRE]
def elapsed_samples(self):
return self.statmon.status[SF.ELAPSED]
def demux_status(self):
return self.statmon.status[SF.DEMUX]
def samples(self):
return self.pre_samples() + self.post_samples()
def get_aggregator_sites(self):
return self.s0.aggregator.split(' ')[1].split('=')[1].split(',')
def fetch_all_calibration(self):
print("Fetching calibration data")
for m in (self.modules[int(c)] for c in self.get_aggregator_sites()):
self.cal_eslo.extend(m.AI_CAL_ESLO.split(' ')[3:])
self.cal_eoff.extend(m.AI_CAL_EOFF.split(' ')[3:])
def scale_raw(self, raw, volts=False):
for (sx, m) in list(self.modules.items()):
if m.MODEL.startswith("ACQ43"):
rshift = 8
elif m.data32 == '1':
# volts calibration is normalised to 24b
if m.adc_18b == '1':
rshift = 14 - (8 if volts else 0)
else:
rshift = 16 - (8 if volts else 0)
else:
rshift = 0
break
return np.right_shift(raw, rshift)
def chan2volts(self, chan, raw):
""" chan2volts(self, chan, raw) returns calibrated volts for channel
Args:
chan: 1..nchan
raw: raw bits to convert.
"""
if len(self.cal_eslo) == 1:
self.fetch_all_calibration()
eslo = float(self.cal_eslo[chan])
eoff = float(self.cal_eoff[chan])
return np.add(np.multiply(raw, eslo), eoff)
def read_chan(self, chan, nsam = 0):
if chan != 0 and nsam == 0:
nsam = self.pre_samples()+self.post_samples()
cc = ChannelClient(self.uut, chan)
ccraw = cc.read(nsam, data_size=(4 if self.s0.data32 == '1' else 2))
if self.save_data:
try:
os.makedirs(self.save_data)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
with open("%s/%s_CH%02d" % (self.save_data, self.uut, chan), 'wb') as fid:
ccraw.tofile(fid, '')
return ccraw
def nchan(self):
return int(self.s0.NCHAN)
def read_channels(self, channels=(), nsam=0):
"""read all channels post shot data.
Returns:
chx (list) of np arrays.
"""
if channels == ():
channels = list(range(1, self.nchan()+1))
elif type(channels) == int:
channels = (channels,)
# print("channels {}".format(channels))
chx = []
for ch in channels:
if self.trace:
print("%s CH%02d start.." % (self.uut, ch))
start = timeit.default_timer()
chx.append(self.read_chan(ch, nsam))
if self.trace:
tt = timeit.default_timer() - start
print("%s CH%02d complete.. %.3f s %.2f MB/s" %
(self.uut, ch, tt, len(chx[-1])*2/1000000/tt))
return chx
# DEPRECATED
def load_segments(self, segs):
with netclient.Netclient(self.uut, AcqPorts.SEGSW) as nc:
for seg in segs:
nc.sock.send((seg+"\n").encode())
# DEPRECATED
def show_segments(self):
with netclient.Netclient(self.uut, AcqPorts.SEGSR) as nc:
while True:
buf = nc.sock.recv(1024)
if buf:
print(buf)
else:
break
def clear_counters(self):
for s in self.svc:
self.svc[s].sr('*RESET=1')
def set_sync_routing_master(self, clk_dx="d1", trg_dx="d0"):
self.s0.SIG_SYNC_OUT_CLK = "CLK"
self.s0.SIG_SYNC_OUT_CLK_DX = clk_dx
self.s0.SIG_SYNC_OUT_TRG = "TRG"
self.s0.SIG_SYNC_OUT_TRG_DX = trg_dx
def set_sync_routing_slave(self):
self.set_sync_routing_master()
self.s0.SIG_SRC_CLK_1 = "HDMI"
self.s0.SIG_SRC_TRG_0 = "HDMI"
def set_sync_routing(self, role):
# deprecated
# set sync mode on HDMI daisychain
# valid roles: master or slave
if role == "master":
self.set_sync_routing_master()
elif role == "slave":
self.set_sync_routing_slave()
else:
raise ValueError("undefined role {}".format(role))
def set_mb_clk(self, hz=4000000, src="zclk", fin=1000000):
hz = int(hz)
if src == "zclk":
self.s0.SIG_ZCLK_SRC = "INT33M"
self.s0.SYS_CLK_FPMUX = "ZCLK"
self.s0.SIG_CLK_MB_FIN = 33333000
elif src == "xclk":
self.s0.SYS_CLK_FPMUX = "XCLK"
self.s0.SIG_CLK_MB_FIN = 32768000
else:
self.s0.SYS_CLK_FPMUX = "FPCLK"
self.s0.SIG_CLK_MB_FIN = fin
if hz >= self.mb_clk_min:
self.s0.SIG_CLK_MB_SET = hz
self.s1.CLKDIV = '1'
else:
for clkdiv in range(1,2000):
if hz*clkdiv >= self.mb_clk_min:
self.s0.SIG_CLK_MB_SET = hz*clkdiv
self.s1.CLKDIV = clkdiv
return
raise ValueError("frequency out of range {}".format(hz))
def load_stl(self, stl, port, trace = False, wait_eof = False):
termex = re.compile("\n")
with netclient.Netclient(self.uut, port) as nc:
lines = stl.split("\n")
for ll in lines:
if trace:
print("> {}".format(ll))
if len(ll) < 2:
if trace:
print("skip blank")
continue
if ll.startswith('#'):
if trace:
print("skip comment")
continue
nc.sock.send((ll+"\n").encode())
rx = nc.sock.recv(4096)
if trace:
print("< {}".format(rx))
nc.sock.send("EOF\n".encode())
nc.sock.shutdown(socket.SHUT_WR)
wait_end = True
while wait_end:
rx = nc.sock.recv(4096)
if trace:
print("< {}".format(rx))
if (str(rx).find("EOF")) != -1:
break
wait_end = wait_eof
def load_gpg(self, stl, trace = False):
self.load_stl(stl, AcqPorts.GPGSTL, trace, True)
def load_dpg(self, stl, trace = False):
self.load_stl(stl, AcqPorts.DPGSTL, trace)
def load_wrpg(self, stl, trace = False):
self.load_stl(stl, AcqPorts.WRPG, trace, True)
class AwgBusyError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def load_awg(self, data, autorearm=False):
if self.awg_site > 0:
if self.modules[self.awg_site].task_active == '1':
raise self.AwgBusyError("awg busy")
port = AcqPorts.AWG_AUTOREARM if autorearm else AcqPorts.AWG_ONCE
with netclient.Netclient(self.uut, port) as nc:
nc.sock.send(data)
nc.sock.shutdown(socket.SHUT_WR)
while True:
rx = nc.sock.recv(128)
if not rx or rx.startswith(b"DONE"):
break
nc.sock.close()
def run_service(self, port, eof="EOF", prompt='>'):
txt = ""
with netclient.Netclient(self.uut, port) as nc:
while True:
rx = nc.receive_message(self.NL, 256)
txt += rx
txt += "\n"
print("{}{}".format(prompt, rx))
if rx.startswith(eof):
break
nc.sock.shutdown(socket.SHUT_RDWR)
nc.sock.close()
return txt
def run_oneshot(self):
with netclient.Netclient(self.uut, AcqPorts.ONESHOT) as nc:
while True:
rx = nc.receive_message(self.NL, 256)
print("{}> {}".format(self.s0.HN, rx))
if rx.startswith("SHOT_COMPLETE"):
break
nc.sock.shutdown(socket.SHUT_RDWR)
nc.sock.close()
def run_livetop(self):
with netclient.Netclient(self.uut, AcqPorts.LIVETOP) as nc:
print(nc.receive_message(self.NL, 256))
nc.sock.shutdown(socket.SHUT_RDWR)
nc.sock.close()
def disable_trigger(self):
#master.s0.SIG_SRC_TRG_0 = 'NONE'
#master.s0.SIG_SRC_TRG_1 = 'NONE'
self.s0.SIG_SRC_TRG_0 = 'HOSTB'
self.s0.SIG_SRC_TRG_1 = 'HOSTA'
def enable_trigger(self, trg_0='EXT', trg_1='STRIG'):
if trg_0 is not None:
self.s0.SIG_SRC_TRG_0 = trg_0
if trg_1 is not None:
self.s0.SIG_SRC_TRG_1 = trg_1
def configure_post(self, role, trigger=[1,1,1], post=100000):
"""
Configure UUT for a regular transient capture. Default: internal soft
trigger starts the capture.
"Role" is a mandatory argument. For master systems, role should be the
string "master", if the system is a slave then role should be the string
"slave"
Default post samples: 100k.
"""
print(trigger)
self.s0.transient = "PRE=0 POST={} SOFT_TRIGGER={}".format(post, trigger[1])
self.s1.TRG = 1
if role == "slave" or trigger[1] == 0:
self.s1.TRG_DX = 0
else:
self.s1.TRG_DX = 1
self.s1.TRG_SENSE = trigger[2]
self.s1.EVENT0 = 0
self.s1.EVENT0_DX = 0
self.s1.EVENT0_SENSE = 0
self.s1.RGM = 0
self.s1.RGM_DX = 0
self.s1.RGM_SENSE = 0
self.s1.RGM = 0 # Make sure RGM mode is turned off.
self.s0.SIG_EVENT_SRC_0 = 0
return None
def configure_pre_post(self, role, trigger=[1,1,1], event=[1,1,1], pre=50000, post=100000):
"""
Configure UUT for pre/post mode. Default: soft trigger starts the
data flow and trigger the event on a hard external trigger.
"Role" is a mandatory argument. For master systems, role should be the
string "master", if the system is a slave then role should be the string
"slave"
Default pre trigger samples: 50k.
Default post trigger samples: 100k.
"""
if pre > post:
print("PRE samples cannot be greater than POST samples. Config not set.")
return None
trg = 1 if trigger[1] == 1 else 0
self.s0.transient = "PRE={} POST={} SOFT_TRIGGER={}".format(pre, post, trg)
self.s1.TRG = trigger[0]
if role == "slave" or trigger[1] == 0:
self.s1.TRG_DX = 0
else:
self.s1.TRG_DX = 1
self.s1.TRG_SENSE = trigger[2]
self.s1.EVENT0 = event[0]
self.s1.EVENT0_DX = event[1]
self.s1.EVENT0_SENSE = event[2]
self.s1.RGM = 0
self.s1.RGM_DX = 0
self.s1.RGM_SENSE = 0
self.s1.RGM = 0 # Make sure RGM mode is turned off.
self.s0.SIG_EVENT_SRC_0 = 0
return None
def configure_rtm(self, role, trigger=[1,1,1], event=[1,1,1], post=50000, rtm_translen=5000, gpg=0):
"""
Configure UUT for rtm mode. Default: external trigger starts the capture
and takes 5000 samples, each subsequent trigger gives us another 5000
samples.
"Role" is a mandatory argument. For master systems, role should be the
string "master", if the system is a slave then role should be the string
"slave"
Default rtm_translen: 5k samples.
Default post: 50k samples
GPG can be used in RTM mode as the Event. If you are using the GPG
then this function can put the GPG output onto the event bus (to use as
an Event for RTM).
"""
self.s0.transient = "PRE=0 POST={}".format(post)
self.s1.rtm_translen = rtm_translen
self.s1.TRG = 1
if role == "slave" or trigger[1] == 0:
self.s1.TRG_DX = 0
else:
self.s1.TRG_DX = 1
self.s1.TRG_SENSE = trigger[2]
self.s1.EVENT0 = event[0]
self.s1.EVENT0_DX = event[1]
self.s1.EVENT0_SENSE = event[2]
self.s1.RGM = 3
self.s1.RGM_DX = 0
self.s1.RGM_SENSE = 1
self.s0.SIG_EVENT_SRC_0 = 1 if gpg == 1 else 0
return None
def configure_transient(self, pre=0, post=100000,
sig_DX='d0', auto_soft_trigger=0, demux=1, edge='rising'):
"""
Configure uut for transient capture.
sig_DX is the signal line responsible for TRIGGER or EVENT depending on mode;
function makes appropriate selection.
Function is aware of sync_role and sets sig_DX accordingly
"""
sync_role = self.s0.sync_role
if sync_role == 'role not set' and sync_role == 'slave':
sig_DX = 'd0'
sigdef = "1,{},{}".format(sig_DX[1], 1 if edge == 'rising' else 0)
if pre > 0:
self.s1.event0 = sigdef
self.s1.trg = '1,1,1'
else:
self.s1.event0 = '0,0,0'
self.s1.trg = sigdef
self.s0.transient = "PRE={} POST={} SOFT_TRIGGER={} DEMUX={}".\
format(pre, post, auto_soft_trigger, demux)
def configure_rgm(self, role, trigger=[1,0,1], event=[1,1,1], post="100000", gpg=0):
"""
Configure UUT for RGM mode. Default: external trigger starts the capture
and the system takes samples every clock whenever the trigger is high.
"Role" is a mandatory argument. For master systems, role should be the
string "master", if the system is a slave then role should be the string
"slave"
Default post: 100k samples.
GPG can be used in RGM mode as the Event. If you are using the GPG then
this function can put the GPG output onto the event bus (to use as an
Event for RGM).
"""
self.s0.transient = "PRE=0 POST={}".format(post)
self.s1.TRG = 1
if role == "slave" or trigger[1] == 0:
self.s1.TRG_DX = 0
else:
self.s1.TRG_DX = 1
self.s1.TRG_SENSE = trigger[2]
self.s1.EVENT0 = 0#event[0]
self.s1.EVENT0_DX = 0#event[1]
self.s1.EVENT0_SENSE = 0
self.s1.RGM = 2
self.s1.RGM_DX = 0
self.s1.RGM_SENSE = 1
self.s0.SIG_EVENT_SRC_0 = 1 if gpg == 1 else 0
return None
def get_demux_state(self):
"""
Returns the current state of demux. Beware: if demux is set after the
shot then this function will return the new state. There is no way to
determine what the state was during the previous shot.
"""
transient = self.s0.transient
demux_state = transient.split("DEMUX=",1)[1][0]
return int(demux_state)
def pull_plot(self, channels=(), demux=-1):
"""
Pulls data from 53000 or 5300X depending on the status of demux.
This function takes a tuple of integers and will return the
corresponding data from each 5300X port (if demux is on) and will return
the corresponding data filtered from 53000 if demux is off.
The user can also override the detected demux state if they want to: 1
is demux on and 0 is demux off. Default is -1 and means autodetect.
This function returns an array of the specified channels and plots the
data.
"""
data = []
if demux == -1:
demux = self.get_demux_state()
if demux == 1:
data = self.read_channels(channels)
elif demux == 0:
mux_data = self.read_muxed_data()
print("mux data = ", mux_data)
nchan = self.nchan()
if channels == ():
channels = list(range(1,nchan+1))
for ch in channels:
print("Channel - ", ch)
data.append(mux_data[ch-1::nchan])
import matplotlib.pyplot as plt
for channel in data:
plt.plot(channel)
plt.grid(True)
plt.show()
return data
def read_muxed_data(self):
"""
A function that returns data from port 53000. Only use if demux is
turned off. If demux is turned on then this function will not return the
expected muxed data. To check if demux is enabled use the
get_demux_state() function.
"""
data = self.read_channels((0), -1)
return data[0]
def pull_data(self):
"""
A function to pull data based on whatever demux is set to. Should be
entirely automated. The function will check what channels are AI
channels and pull the data from those channels.
"""
demux_state = self.get_demux_state()
channels = list(range(1, self.get_ai_channels()+1))
nchan = channels[-1]
if demux_state == 1:
data = self.read_channels(channels, -1)
elif demux_state == 0:
data = []
mux_data = self.read_muxed_data()
for ch in channels:
data.append(mux_data[ch-1::nchan])
return data
def get_ai_channels(self):
"""
Returns all of the AI channels. This is a more robust way to get the
total number of AI channels, as sometimes nchan can be set to include
the scratch pad.
"""
ai_channels = 0
site_types = self.get_site_types()
for ai_site in site_types["AISITES"]:
ai_site = "s{}".format(ai_site)
ai_channels += int(getattr(getattr(self, ai_site), "NCHAN"))
return ai_channels
def get_site_types(self):
"""
Returns a dictionary with keys AISITES, AOSITES, and DIOSITES with the
corresponding values as lists of the channels which are AI, AO, and DIO.
"""
AISITES = []
AOSITES = []
DIOSITES = []
for site in [1,2,3,4,5,6]:
try:
module_name = eval('self.s{}.module_name'.format(site))
if module_name.startswith('acq'):
AISITES.append(site)
elif module_name.startswith('ao'):
AOSITES.append(site)
elif module_name.startswith('dio'):
DIOSITES.append(site)
except Exception:
continue
site_types = { "AISITES": AISITES, "AOSITES": AOSITES, "DIOSITES": DIOSITES }
return site_types
def get_es_indices(self, file_path="default", nchan="default", human_readable=0, return_hex_string=0):
"""
Returns the location of event samples.
get_es_indices will pull data from a system by default (it will also
read in a raw datafile) and reads through the data in order to find the
location of the event samples. The system will also return the raw
event sample data straight from the system.
If human_readable is set to 1 then the function will return the hex
interpretations of the event sample data. The indices will remain
unchanged.
If return_hex_string is set to 1 (provided human_readable has ALSO been
set) then the function will return one single string containing all of
the event samples.
Data returned by the function looks like:
[ [Event sample indices], [Event sample data] ]
"""
# a function that return the location of event samples.
# returns:
# [ [event sample indices], [ [event sample 1], ...[event sample N] ] ]
indices = []
event_samples = []
nchan = self.nchan() if nchan == "default" else nchan
if file_path == "default":
data = self.read_muxed_data()
data = np.array(data)
if data.dtype == np.int16:
# convert shorts back to raw bytes and then to longs.
data = np.frombuffer(data.tobytes(), dtype=np.uint32)
else:
data = np.fromfile(file_path, dtype=np.uint32)
if int(self.s0.data32) == 0:
nchan = nchan / 2 # "effective" nchan has halved if data is shorts.
nchan = int(nchan)
for index, sample in enumerate(data[0::nchan]):
# if sample == np.int32(0xaa55f154): # aa55
if sample == np.uint32(0xaa55f154): # aa55
indices.append(index)
event_samples.append(data[index*nchan:index*nchan + nchan])
if human_readable == 1:
# Change decimal to hex.
ii = 0
while ii < len(event_samples):
if type(event_samples[ii]) == np.ndarray:
event_samples[ii] = event_samples[ii].tolist()
for indice, channel in enumerate(event_samples[ii]):
event_samples[ii][indice] = '0x{0:08X}'.format(channel)
ll = int(len(event_samples[ii])/int(len(self.get_aggregator_sites())))
# print(event_samples[ii])
event_samples[ii] = [event_samples[ii][i:i + ll] for i in range(0, len(event_samples[ii]), ll)]
ii += 1
if return_hex_string == 1:
# Make a single string containing the hex values.
es_string = ""
for num, sample in enumerate(event_samples):
for i in range(len(sample[0])):
for x in event_samples[num]:
es_string = es_string + str(x[i]) + " "
es_string = es_string + "\n"
es_string = es_string + "\n"
event_samples = es_string
return [indices, event_samples]
def stream(self, sink):
nc = netclient.Netclient(self.uut, AcqPorts.STREAM)
finished = False
while not sink(nc.sock.recv(4096*32*2)):
continue
class Acq2106(Acq400):
""" Acq2106 specialization of Acq400
Defines features specific to ACQ2106
"""
def __init__(self, _uut, monitor=True, has_dsp=False):
print("acq400_hapi.Acq2106 %s" % (_uut))
Acq400.__init__(self, _uut, monitor)
self.mb_clk_min = 100000
if has_dsp:
sn_map = (('cA', AcqSites.SITE_CA), ('cB', AcqSites.SITE_CB), ('s14', AcqSites.SITE_DSP))
else:
sn_map = (('cA', AcqSites.SITE_CA), ('cB', AcqSites.SITE_CB))
for ( service_name, site ) in sn_map:
try:
self.svc[service_name] = netclient.Siteclient(self.uut, AcqPorts.SITE0+site)
except socket.error:
print("uut {} site {} not populated".format(_uut, site))
self.mod_count += 1
def set_mb_clk(self, hz=4000000, src="zclk", fin=1000000):
print("set_mb_clk {} {} {}".format(hz, src, fin))
Acq400.set_mb_clk(self, hz, src, fin)
try:
self.s0.SYS_CLK_DIST_CLK_SRC = 'Si5326'
except AttributeError:
print("SYS_CLK_DIST_CLK_SRC, deprecated")
self.s0.SYS_CLK_OE_CLK1_ZYNQ = '1'
def set_sync_routing_slave(self):
Acq400.set_sync_routing_slave(self)
self.s0.SYS_CLK_OE_CLK1_ZYNQ = '1'
def set_master_trg(self, trg, edge = "rising", enabled=True):
if trg == "fp":
self.s0.SIG_SRC_TRG_0 = "EXT" if enabled else "HOSTB"
elif trg == "int":
self.s0.SIG_SRC_TRG_1 = "STRIG"
class Acq2106_Mgtdram8(Acq2106):
MGT_BLOCK_BYTES = 0x400000
MGT_BLOCK_MULTIPLE = 16
def __init__(self, uut, monitor=True):
print("acq400_hapi.Acq2106_MgtDram8 %s" % (uut))
Acq2106.__init__(self, uut, monitor, has_dsp=True)
def run_mgt(self, _filter = null_filter):
pm = ProcessMonitor(self.uut, _filter)
while pm.quit_requested != True:
time.sleep(1)
def create_mgtdram_pull_client(self):
return MgtDramPullClient(self.uut)
def run_unit_test():
SERVER_ADDRESS = '10.12.132.22'
if len(sys.argv) > 1:
SERVER_ADDRESS = sys.argv[1]
print("create Acq400 %s" %(SERVER_ADDRESS))
uut = Acq400(SERVER_ADDRESS)
print("MODEL %s" %(uut.s0.MODEL))
print("SITELIST %s" %(uut.s0.SITELIST))
print("MODEL %s" %(uut.s1.MODEL))
print("Module count %d" % (uut.mod_count))
print("POST SAMPLES %d" % uut.post_samples())
for sx in sorted(uut.svc):
print("SITE:%s MODEL:%s" % (sx, uut.svc[sx].sr("MODEL")))
if __name__ == '__main__':
run_unit_test()
| python |
import click
import utils
@click.command()
@click.option('--test', '-t', default=None)
def cli(test):
if test is not None:
data = test
else:
data = utils.load('day-5.txt')
if __name__ == '__main__':
cli()
| python |
# chat/routing.py
from django.conf.urls import url
from . import consumers
websocket_urlpatterns = [
url(r'^ws/performance$', consumers.PerformanceConsumer),
url(r'^ws/collect$', consumers.CollectionConsumer),
]
| python |
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
from django.core.files.uploadedfile import SimpleUploadedFile
from s3file.middleware import S3FileMiddleware
class TestS3FileMiddleware:
def test_get_files_from_storage(self):
content = b'test_get_files_from_storage'
default_storage.save('test_get_files_from_storage', ContentFile(content))
files = S3FileMiddleware.get_files_from_storage(['test_get_files_from_storage'])
file = next(files)
assert file.read() == content
def test_process_request(self, rf):
uploaded_file = SimpleUploadedFile('uploaded_file.txt', b'uploaded')
request = rf.post('/', data={'file': uploaded_file})
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist('file')
assert request.FILES.get('file').read() == b'uploaded'
default_storage.save('s3_file.txt', ContentFile(b's3file'))
request = rf.post('/', data={'file': 's3_file.txt', 's3file': 'file'})
S3FileMiddleware(lambda x: None)(request)
assert request.FILES.getlist('file')
assert request.FILES.get('file').read() == b's3file'
| python |
from __future__ import annotations
import uuid
from enum import Enum, auto
from io import StringIO
from threading import Lock
from typing import Dict, List, Optional, Sequence, Union
import networkx as nx
NODE_TYPES = {}
class DuplicateKeyError(Exception):
pass
def register_node(node_subclass):
"""Decorator to register a node subclass"""
NODE_TYPES[node_subclass.__name__] = node_subclass
return node_subclass
def _render_graph(stream, node: Node, indent: str = "", last=True, first=True):
"""Draw a textual representation of the node graph"""
if first:
first_i = ""
second_i = ""
elif last:
first_i = "╰─"
second_i = " "
else:
first_i = "├─"
second_i = "│ "
stream.write(indent + first_i + str(node) + "\n")
indent = indent + second_i
for i, child in enumerate(list(node.children)):
_render_graph(
stream,
child,
indent=indent,
last=(i + 1) == len(node.children),
first=False,
)
class NodeState(Enum):
def _generate_next_value_(name, start, count, last_values): # pylint: disable=E0213
return name
UNCONFIRMED = auto() # Client-side: We expect this but haven't gotten confirmation
CREATED = auto() # Node exists but not confirmed running
RUNNING = auto() # Node is currently in progress
FAILED = auto() # Node ran but failed for some reason
SUCCESS = auto() # Node ran successfully
@register_node
class Node:
"""
Generic object representing a single processing step.
Node can have several parents.
Args:
parents: Either a parent node, or a list of parent nodes
node_id: The tree-ID node, if known. If this is duplicate in the
context of the tree, an exception will be thrown
node_uuid: The UUID for this node. Will be generated if unspecified
tree: The DUI tree object this will belong to
"""
def __init__(
self,
parents: Union[Sequence[Node], Node] = None,
*,
node_id: str = None,
node_uuid: str = None,
):
self.tree: Optional[DUITree] = None
# Handle non-list parents
if parents is not None:
if isinstance(parents, Sequence):
self.parents = list(parents)
else:
self.parents = [parents]
else:
self.parents = []
self.id = str(node_id) if node_id is not None else None
self.uuid = node_uuid or uuid.uuid4().hex
self._children: List[Node] = []
self.state = NodeState.CREATED
@property
def children(self):
return tuple(self._children)
def to_dict(self):
"""Convert this node to a plain literal representation"""
out = {
"type": type(self).__name__,
"id": self.id,
"uuid": self.uuid,
"state": self.state.value,
}
if self.parents:
out["parents"] = [p.id for p in self.parents]
return out
@classmethod
def from_dict(cls, tree: DUITree, data: Dict):
"""Recreate a node from it's dict literal description"""
# Not perfect, as race condition, but checks dev environment
# Problem is: This might be a superclass in which case subclass
# might want to alter, after creation. At the moment we don't
# anticipate loading from dict that often though.
assert tree._lock.locked()
node_id = data["id"]
node_uuid = data["uuid"]
# DAG, so we can assume that parents are made before children
parents = [tree.nodes[parent_id] for parent_id in data.get("parents", [])]
# The constructor recreates all the links
node = cls(parents=parents, node_id=node_id, node_uuid=node_uuid)
node.state = NodeState(data["STATE"])
tree.attach(node)
return node
def __str__(self):
return f"Node {self.id}"
class DUITree:
"""Object coordinating the DUI DAG node graph"""
def __init__(self):
self._next_id = 1
self.nodes = {}
self._lock = Lock()
self._roots = []
def attach(self, node: Node) -> Node:
"""Attach a Node to this tree.
If it has an .id, it will be used, if it doesn't already exist,
otherwise it will have one assigned.
Returns the node.
"""
# Validate first before changing anything
if node.id in self.nodes:
raise DuplicateKeyError(f"Node id {node.id} already exists in tree")
if any(x.uuid == node.uuid for x in self.nodes.values()):
raise DuplicateKeyError(f"Duplicate UUID: {node.uuid}")
for parent in node.parents:
if parent.id not in self.nodes:
raise KeyError(f"Parent with ID {parent.id} not a member of tree")
if self.nodes[parent.id] is not parent:
raise ValueError(
f"Parent with ID {parent.id} is different to existing object"
)
if node in parent.children:
raise RuntimeError(
"Node already exists in parent children list... bad tree"
)
with self._lock:
# Check that the UUID doesn't already exist
# Generate or use the node ID
if node.id is None:
node.id = str(self._next_id)
self._next_id += 1
node.tree = self
self.nodes[node.id] = node
# Wire up the parent links
for parent in node.parents:
parent._children.append(node)
# Track roots
if not node.parents:
self._roots.append(node)
return node
def to_dict(self):
return [node.to_dict() for node_id, node in self.nodes.items()]
@classmethod
def from_dict(self, data):
all_nodes = {}
# Determine construction order
graph = nx.DiGraph()
for node_data in data:
node_id = node_data["id"]
all_nodes[node_id] = node_data
graph.add_node(node_id)
for parent in node_data.get("parents", []):
graph.add_edge(node_id, parent)
assert nx.is_directed_acyclic_graph(graph), "Node graph non-DAG"
node_order = list(reversed(list(nx.topological_sort(graph))))
# Now sorted, safe to create
tree = DUITree()
for node_id in node_order:
node_type = all_nodes[node_id].get("type", "Node")
assert node_type in NODE_TYPES
tree.nodes[node_id] = NODE_TYPES[node_type].from_dict(
tree, all_nodes[node_id]
)
def render_graph(self):
"""Generate an Unicode graph showing the tree structure"""
# Find the root nodes
dest = StringIO()
class FakeRoot:
def __str__(self):
return ""
root = FakeRoot()
root.children = [x for x in self.nodes.values() if not x.parents]
# for root in roots:
_render_graph(dest, root)
return dest.getvalue()
| python |
from . import CommonViewsTestCase
from .base import BaseAuthInfoViewsTestCase
# Create your tests here.
class AuthInfoViewsTestCase(CommonViewsTestCase):
registered_user = {
'username': 'username_000',
'password': 'password_000',
}
base_action_test_case = BaseAuthInfoViewsTestCase
# ======================================================================
@classmethod
def setUpTestData(cls):
super().setUpTestData()
def setUp(self):
self.create_user(user=self.registered_user)
super().setUp()
def tearDown(self):
super().tearDown()
# ======================================================================
# success
# ======================================================================
# ----- GET -----
def test_get_authenticated_success(self):
success_fail = 'success'
data_expected = {
'is_authenticated': True
}
action = self.base_action_test_case(user=self.registered_user)
client, client_login = action.client_login(client=None, user=self.registered_user)
client, response = action.get(client=client)
action.data_expected['get'][success_fail] = data_expected
action.base_test_get(response=response, success_fail=success_fail, assert_message='views')
def test_get_not_authenticated_success(self):
success_fail = 'success'
data_expected = {
'is_authenticated': False
}
action = self.base_action_test_case(user=None)
client = None
client, response = action.get(client=client)
action.data_expected['get'][success_fail] = data_expected
action.base_test_get(response=response, success_fail=success_fail, assert_message='views')
# ======================================================================
# fail
# ======================================================================
# ----- GET -----
# ----- POST -----
def test_post_fail(self):
method = 'post'
self.base_test_405_fail(method=method)
# ----- PUT -----
def test_put_fail(self):
method = 'put'
self.base_test_405_fail(method=method)
# ----- DELETE -----
def test_delete_fail(self):
method = 'delete'
self.base_test_405_fail(method=method)
# ======================================================================
| python |
from scrapy.exceptions import IgnoreRequest
class TranslationResult(IgnoreRequest):
"""A translation response was received"""
def __init__(self, response, *args, **kwargs):
self.response = response
super(TranslationResult, self).__init__(*args, **kwargs)
class TranslationError(Exception):
def __init__(self):
pass
def error(self):
return "Translation Error"
def warn(self):
return self.error()
def details(self):
return self.error()
class TranslationErrorGeneral(TranslationError):
def __init__(self, message):
self.message = message
super(TranslationErrorGeneral, self).__init__()
def warn(self):
return self.message
class TranslationErrorDueToInvalidResponseCode(TranslationError):
def __init__(self, response):
self.response = response
super(TranslationErrorDueToInvalidResponseCode, self).__init__()
def warn(self):
return "translation failed due to response code = %d"%self.response.status
def details(self):
return "translation failed due to response code = %d, request url = '%s'"%(
self.response.status,
self.response.request.url
)
| python |
import zutils
class zbrick:
def __init__(self):
self.c = ' '
self.fcolor = zutils.CL_FG
self.bcolor = zutils.CL_BG
self.attr = 0
def str(self):
return str(c)
def copy_from(self, other):
self.c = other.c
self.fcolor = other.fcolor
self.bcolor = other.bcolor
self.attr = other.attr
def equals(self, other):
return self.c == other.c and self.fcolor == other.fcolor and self.bcolor == other.bcolor and self.attr == other.attr
def __eq__(self, other):
return self.equals(other)
class zwall:
def __init__(self, width, height):
self._width = width
self._height = height
self._wall = [[0]*width for i in range(height)]
self._offset = [0,0]
self.clear()
def get_wall(self):
return self._wall
def get_width(self):
return self._width
def get_height(self):
return self._height
def write_text(self, x, y, text, fg = -1, bg = -1, attr = 0):
x += self._offset[0]
y += self._offset[1]
if fg == -1:
fg = zutils.CL_FG
if bg == -1:
bg = zutils.CL_BG
if(y < 0 or y >= self._height):
return
for _x in range( min(len(text), self._width - x) ):
self._wall[y][x+_x].c = text[_x]
self._wall[y][x+_x].fcolor = fg
self._wall[y][x+_x].bcolor = bg
self._wall[y][x+_x].attr = attr
def scroll_up(self):
old = self._wall.pop(0)
for x in range(self._width):
old[x] = zbrick()
self._wall += (old)
def clear(self):
for y in range(self._height):
for x in range(self._width):
self._wall[y][x] = zbrick()
def copy_from(self, otherwall):
for y in range(self._height):
for x in range(self._width):
self._wall[y][x].copy_from(otherwall._wall[y][x])
def __str__(self):
res = ""
for y in range(self._height):
for x in range(self._width):
res += self._wall[y][x].c
res += "\n"
return res
| python |
# -*- coding: utf-8 -*-
import base64
import os
import tempfile
from unittest import TestCase
from test_apps import htauth_app
HTPASSWD = 'test_user:$apr1$/W2gsTdJ$J5A3/jiOC/hph1Gcb.0yN/'
class HTAuthAppTestCase(TestCase):
def setUp(self):
_, self.htpasswd_path = tempfile.mkstemp()
f = open(self.htpasswd_path, 'w')
f.write(HTPASSWD)
f.close()
self.actual_app = htauth_app.create_app(
HTAUTH_HTPASSWD_PATH=self.htpasswd_path,
HTAUTH_REALM='Test Realm'
)
self.app = self.actual_app.test_client()
def tearDown(self):
os.unlink(self.htpasswd_path)
def test_no_auth(self):
rsp = self.app.get('/')
assert rsp.status_code == 200
assert rsp.data == 'Hello, World!'
def test_auth_not_ok(self):
rsp = self.app.get('/secret')
assert rsp.status_code == 401
assert rsp.data == 'Unauthorized'
assert rsp.headers['WWW-Authenticate'] == 'Basic realm="Test Realm"'
headers = {
'Authorization': 'Basic %s' % base64.b64encode('spam:eggs')
}
rsp = self.app.get('/secret', headers=headers)
assert rsp.status_code == 401
assert rsp.data == 'Unauthorized'
headers = {
'Authorization': 'Digest meh'
}
try:
rsp = self.app.get('/secret', headers=headers)
except RuntimeError:
pass
def test_auth_ok(self):
headers = {
'Authorization': 'Basic %s' % base64.b64encode('test_user:test_password')
}
rsp = self.app.get('/secret', headers=headers)
assert rsp.status_code == 200
assert rsp.data == 'Hello, test_user!'
| python |
import os
from django.conf.urls import url
from django.utils._os import upath
here = os.path.dirname(upath(__file__))
urlpatterns = [
url(r'^custom_templates/(?P<path>.*)$', 'django.views.static.serve', {
'document_root': os.path.join(here, 'custom_templates')}),
]
| python |
import gym
import numpy as np
import cv2
from collections import deque
class Environment(object):
def __init__(self, env_name, resized_width, resized_height,
agent_history_length, replay_size, alpha, action_repeat=4):
self._env = gym.make(env_name)
self._width = resized_width
self._height = resized_height
self._history_length = agent_history_length
self._replay_size = replay_size
self._state_buffer = deque(maxlen=replay_size)
self._default_priority = 0
self._alpha = alpha
self._action_repeat = action_repeat
@property
def action_size(self):
return self._env.action_space.n
def new_game(self):
frame = self._process_frame(self._env.reset())
self._frames = [frame] * self._history_length
def step(self, action):
reward = 0
for _ in range(self._action_repeat):
frame, reward_action, terminal, info = self._env.step(action)
reward += np.clip(reward_action, -1, 1)
if terminal:
break
frame = self._process_frame(frame)
prev_frames = self._frames
frames = prev_frames[1:] + [frame]
self._frames = frames
if self._replay_size > 0:
self._state_buffer.append({
'frames': frames,
'prev_frames': prev_frames,
'action': action,
'reward': reward,
'terminal': terminal,
'priority': self._default_priority})
return list(frames), reward, terminal, info
def render(self):
self._env.render()
def _process_frame(self, frame):
return cv2.resize(cv2.cvtColor(
frame, cv2.COLOR_RGB2GRAY) / 255., (self._width, self._height))
def _get_sample_probability(self):
priority = np.zeros(len(self._state_buffer))
i = 0
for state in self._state_buffer:
priority[i] = state['priority']
if self._default_priority < priority[i]:
self._default_priority = priority[i]
i += 1
probability = np.power(priority + 1e-7, self._alpha)
return probability / np.sum(probability)
def sample(self, batch_size):
if self._replay_size < 0:
raise Exception('replay_size = 0!')
buffer_size = len(self._state_buffer)
if buffer_size < batch_size:
return [], [], [], [], [], []
else:
prev_frames_batch = []
current_frames_batch = []
action_batch = []
reward_batch = []
terminal_batch = []
if self._alpha == 0:
state_batch = np.random.choice(
self._state_buffer, batch_size)
else:
state_batch = np.random.choice(
self._state_buffer, batch_size,
p=self._get_sample_probability())
for state in state_batch:
prev_frames_batch.append(state['prev_frames'])
current_frames_batch.append(state['frames'])
action_batch.append(state['action'])
reward_batch.append(state['reward'])
terminal_batch.append(state['terminal'])
return prev_frames_batch, action_batch, reward_batch,\
current_frames_batch, terminal_batch, state_batch
def get_frames(self):
return list(self._frames)
| python |
from collections import defaultdict
from copy import deepcopy
from geopy.geocoders import Nominatim
import Util
import twitter
import json
import time
import string
import stop_words
geolocator = Nominatim()
STOP_WORDS = stop_words.get_stop_words('english')
api = twitter.Api(consumer_key='b170h2arKC4VoITriN5jIjFRN',
consumer_secret='z2npapLunYynvp9E783KsTiTMUR4CE6jgGIFqXOdzmXNkYI7g9',
access_token_key='3842613073-L7vq82QRYRGCbO1kzN9bYfjfbbV7kOpWWLYnBGG',
access_token_secret='FU6AJWG4iDHfzQWhjKB1r3SIwoyzTcgFe0LjyNfq8r6aR')
global cached_query_results = {}
global cached_user_results = {}
def search_tweets(query, max_searches=5, override_cache=False):
"""Searches for tweets that match query.
Args:
query: The search query string. Can be a phrase or hashtag.
See https://dev.twitter.com/rest/reference/get/search/tweets
max_searches: The maximum number of API searches that will be
executed for the given query. Default value is 5 searches.
100 tweets can be obtained per API search, so by default
a maximum of 500 tweets will be returned.
override_cache: Whether to execute a search even if there is
already a cached result for the query. Defaults to False.
Returns:
A list of tweet objects matching the query with most recent
tweets first.
Raises:
UserWarning: If override_cache is set to False and result for
input query has already been cached.
"""
if query in cached_query_results and override_cache is not False:
raise UserWarning('input query {0} is already in '
'cached_query_results'.format(query))
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
result = []
search_result = api.GetSearch(term=query, count=100) # could also add lang='en'
result.extend(search_result)
oldest_tweet_id = min([t.GetId() for t in search_result])
num_searches = 1
while len(search_result) == 100 and num_searches < max_searches:
search_result = _search_tweets_aux(query, oldest_tweet_id)
oldest_tweet_id = min([t.GetId() for t in search_result])
result.extend(search_result)
num_searches += 1
global cached_query_results
cached_query_results[query] = result
return result
def _search_tweets_aux(query, max_tweet_id):
"""Auxiliary helper function for search_tweets."""
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
search_result = api.GetSearch(term=query, count=100, max_id=max_tweet_id - 1)
return search_result
def get_coordinate_list(tweets):
"""Gets list of (longitude, latitude) tuples for tweets in list.
Args:
tweets: List of tweet objects to extract geo coordinates from.
Will ignore tweets in list for which geo coordinates cannot
be extracted.
Returns:
List of (longitude, latitude) tuples for tweets in list.
"""
coord_list = []
for tweet in tweets:
coords = get_coordinates(tweet)
if coords:
coord_list.append(coords)
return coord_list
def get_coordinates(tweet):
"""Gets longitude and latitude of tweet.
Args:
tweet: The tweet object to extract geo coordinates from.
Returns:
Tuple of (longitude, latitude) for the input tweet. Returns
False if unable to extract geo coordinates for tweet.
"""
# try to get tweet geo coordinates directly if available
coordinates = tweet.GetCoordinates()
if coordinates:
return coordinates
# otherwise parase geo coordinates form user location if available
location = tweet.user.location
if location:
coordinates = geolocator.geocode(location)
if coordinates:
return coordinates.longitude, coordinates.latitude
# not able to extract geo coordinates, so return False
return False
def no_duplicate_tweets(tweets):
"""Returns True iff tweets in input list are all unique."""
ids = set()
for tweet in tweets:
tweet_id = tweet.GetId()
if tweet_id in ids:
return False
ids.add(tweet_id)
return True
def tweets_to_text_strings(tweets):
"""Converts list of tweets to list of tweet text strings."""
return [tweet.GetText() for tweet in tweets]
def tweets_to_word_counter(tweets, normalize=False, lowercase=True):
"""Converts list of tweets to dict of word counts.
Args:
tweets: List of tweet objects to process.
normalize: Whether to return frequencies instead of counts.
Default value is False (return counts).
lowercase: Whether to convert all words to lowercase.
Default value if True.
Returns:
util.Counter object containing counts of words in the tweets.
Words are keys, counts are values. If normalize is set to True,
then function will return word frequencies as values.
"""
word_counter = util.Counter()
for tweet in tweets:
word_counter += string_to_nonstopword_counter(tweet.GetText())
if normalize:
word_counter.normalize()
return word_counter
def string_to_nonstopword_list(text):
"""Returns list of non-stopwords in string.
Args:
text: The string to process.
Returns:
List of non-stopwords in text string. Punctuation, whitespace,
and hyperlinks are removed. Hashtag and @USERNAME punctionation
is not removed.
"""
# split strings into words and remove whitespace:
words = text.split()
# remove non-hashtag and non-username punctionation:
chars_to_remove = list(deepcopy(string.punctuation))
chars_to_remove.remove('#')
chars_to_remove.remove('@')
chars_to_remove = ''.join(chars_to_remove)
words = [word.strip(chars_to_remove) for word in words]
# remove empty strings:
words = [word for word in words if word]
# remove stopwords:
words = filter(lambda w: w.lower() not in STOP_WORDS, words)
# remove hyperlinks:
words = filter(lambda w: not (len(w) > 7 and w[0:9] == 'https://'), words)
# remove non ascii characters:
to_return = []
for word in words:
valid = True
for char in word:
if char not in string.printable:
valid = False
break
if valid:
to_return.append(word)
return to_return
def string_to_nonstopword_counter(text, lowercase=True):
"""Converts string to util.Counter of non-stopwords in text string.
Args:
text: The string to process.
lowercase: Whether the convert the words in the string to lowercase.
Returns:
util.Counter object containing counts of non-stopwords in string.
Punctuation, whitespace, and hyperlinks are removed. Hashtag
and @USERNAME punctionation is not removed.
"""
words = string_to_nonstopword_list(text)
word_counter = util.Counter()
for word in words:
if lowercase:
word = word.lower()
word_counter[word] += 1
return word_counter
def get_user_tweets(username, max_searches=5, override_cache=False):
"""Searches for tweets that match query.
Args:
username: The username of the Twitter account that tweets will
be downloaded for.
max_searches: The maximum number of API searches that will be
executed for the given user. Default value is 5 searches.
200 tweets can be obtained per API search, so by default
a maximum of 1000 tweets will be returned.
override_cache: Whether to execute a search even if there is
already a cached result for the specifed Twitter user.
Defaults to False.
Returns:
A list of tweet objects corresponding to the specified users's
public tweets, with their most recent tweets first.
"""
if username in cached_user_results and override_cache is not False:
raise UserWarning('input username {0} is already in '
'cached_user_results'.format(query))
remaining_timeout = api.GetSleepTime('/search/tweets') # might need to change this
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
result = []
search_result = api.GetUserTimeline(screen_name=username, count=200) # could also add lang='en'
result.extend(search_result)
oldest_tweet_id = min([t.GetId() for t in search_result])
num_searches = 1
while len(search_result) == 200 and num_searches < max_searches:
search_result = _get_user_tweets_aux(username, oldest_tweet_id)
if not search_result:
break
oldest_tweet_id = min([t.GetId() for t in search_result])
result.extend(search_result)
num_searches += 1
global cached_user_results
cached_user_results[username] = result
return result
def _get_user_tweets_aux(username, max_tweet_id):
"""Auxiliary helper function for search_tweets."""
remaining_timeout = api.GetSleepTime('/search/tweets')
if remaining_timeout != 0:
print ('searchTweets() must wait {0} seconds in order to not exceed '
'the Twitter API rate limit.'.format(remaining_timeout + 1))
time.sleep(remaining_timeout + 1)
search_result = api.GetUserTimeline(screen_name=username, count=200,
max_id=max_tweet_id - 1)
return search_result
def split_words_hashtags_usermentions(word_counter):
"""Splits all words into words, hashtags, and usermentions counters."""
pure_word_counter = util.Counter()
hashtag_counter = util.Counter()
usermentions_counter = util.Counter()
for word in word_counter:
if word[0] == '#':
hashtag_counter[word] = word_counter[word]
elif word[0] == '@':
usermentions_counter[word] = word_counter[word]
else:
pure_word_counter[word] = word_counter[word]
return pure_word_counter, hashtag_counter, usermentions_counter
| python |
import pytest
from pji.utils import duplicates
@pytest.mark.unittest
class TestUtilsCollection:
def test_duplicates(self):
assert duplicates([1, 2, 3]) == set()
assert duplicates({1, 2, 3}) == set()
assert duplicates((1, 2, 3)) == set()
assert duplicates([1, 2, 3, 2, 3]) == {2, 3}
assert duplicates((1, 2, 3, 2, 3)) == {2, 3}
| python |
# Copyright 2016 Andy Chu. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
"""
cmd_parse.py - Parse high level shell commands.
"""
from __future__ import print_function
from _devbuild.gen import grammar_nt
from _devbuild.gen.id_kind_asdl import Id, Id_t, Kind
from _devbuild.gen.types_asdl import lex_mode_e
from _devbuild.gen.syntax_asdl import (
condition, condition_t,
command, command_t,
command__Simple, command__DoGroup, command__ForExpr, command__ForEach,
command__WhileUntil, command__Case, command__If, command__ShFunction,
command__Subshell, command__DBracket, command__DParen,
command__CommandList, command__Proc,
BraceGroup,
case_arm,
sh_lhs_expr, sh_lhs_expr_t,
redir, redir_param, redir_param__HereDoc,
redir_loc, redir_loc_t,
word, word_e, word_t, compound_word, Token,
word_part_e, word_part_t,
assign_pair, env_pair,
assign_op_e,
source, parse_result, parse_result_t,
speck, name_type,
proc_sig_e, proc_sig__Closed,
)
from _devbuild.gen import syntax_asdl # token, etc.
from asdl import runtime
from core import alloc
from core import error
from core import ui
from core.pyerror import log, p_die
from frontend import consts
from frontend import match
from frontend import reader
from osh import braces
from osh import bool_parse
from osh import word_
from typing import Optional, List, Dict, Any, Tuple, cast, TYPE_CHECKING
if TYPE_CHECKING:
from core.alloc import Arena
from frontend.lexer import Lexer
from frontend.parse_lib import ParseContext, AliasesInFlight
from frontend.reader import _Reader
from osh.word_parse import WordParser
def _KeywordSpid(w):
# type: (word_t) -> int
"""
TODO: Can be we optimize this?
Assume that 'while', 'case', etc. are a specific type of compound_word.
I tested turning LeftMostSpanForWord in a no-op and couldn't observe the
difference on a ~500 ms parse of testdata/osh-runtime/abuild. So maybe this
doesn't make sense.
"""
return word_.LeftMostSpanForWord(w)
def _KeywordToken(UP_w):
# type: (word_t) -> Token
"""Given a word that IS A keyword, return the single token at the start.
In C++, this casts without checking, so BE CAREFUL to call it in the right context.
"""
assert UP_w.tag_() == word_e.Compound, UP_w
w = cast(compound_word, UP_w)
part = w.parts[0]
assert part.tag_() == word_part_e.Literal, part
return cast(Token, part)
def _ReadHereLines(line_reader, # type: _Reader
h, # type: redir
delimiter, # type: str
):
# type: (...) -> Tuple[List[Tuple[int, str, int]], Tuple[int, str, int]]
# NOTE: We read all lines at once, instead of parsing line-by-line,
# because of cases like this:
# cat <<EOF
# 1 $(echo 2
# echo 3) 4
# EOF
here_lines = [] # type: List[Tuple[int, str, int]]
last_line = None # type: Tuple[int, str, int]
strip_leading_tabs = (h.op.id == Id.Redir_DLessDash)
while True:
line_id, line, unused_offset = line_reader.GetLine()
if line is None: # EOF
# An unterminated here doc is just a warning in bash. We make it
# fatal because we want to be strict, and because it causes problems
# reporting other errors.
# Attribute it to the << in <<EOF for now.
p_die("Couldn't find terminator for here doc that starts here",
token=h.op)
assert len(line) != 0 # None should be the empty line
# If op is <<-, strip off ALL leading tabs -- not spaces, and not just
# the first tab.
start_offset = 0
if strip_leading_tabs:
n = len(line)
i = 0 # used after loop exit
while i < n:
if line[i] != '\t':
break
i += 1
start_offset = i
if line[start_offset:].rstrip() == delimiter:
last_line = (line_id, line, start_offset)
break
here_lines.append((line_id, line, start_offset))
return here_lines, last_line
def _MakeLiteralHereLines(here_lines, # type: List[Tuple[int, str, int]]
arena, # type: Arena
):
# type: (...) -> List[word_part_t] # less precise because List is invariant type
"""Create a line_span and a token for each line."""
tokens = [] # type: List[Token]
for line_id, line, start_offset in here_lines:
span_id = arena.AddLineSpan(line_id, start_offset, len(line))
t = Token(Id.Lit_Chars, span_id, line[start_offset:])
tokens.append(t)
parts = [cast(word_part_t, t) for t in tokens]
return parts
def _ParseHereDocBody(parse_ctx, r, line_reader, arena):
# type: (ParseContext, redir, _Reader, Arena) -> None
"""Fill in attributes of a pending here doc node."""
h = cast(redir_param__HereDoc, r.arg)
# "If any character in word is quoted, the delimiter shall be formed by
# performing quote removal on word, and the here-document lines shall not
# be expanded. Otherwise, the delimiter shall be the word itself."
# NOTE: \EOF counts, or even E\OF
ok, delimiter, delim_quoted = word_.StaticEval(h.here_begin)
if not ok:
p_die('Invalid here doc delimiter', word=h.here_begin)
here_lines, last_line = _ReadHereLines(line_reader, r, delimiter)
if delim_quoted: # << 'EOF'
# Literal for each line.
h.stdin_parts = _MakeLiteralHereLines(here_lines, arena)
else:
line_reader = reader.VirtualLineReader(here_lines, arena)
w_parser = parse_ctx.MakeWordParserForHereDoc(line_reader)
w_parser.ReadHereDocBody(h.stdin_parts) # fills this in
end_line_id, end_line, end_pos = last_line
# Create a span with the end terminator. Maintains the invariant that
# the spans "add up".
h.here_end_span_id = arena.AddLineSpan(end_line_id, end_pos, len(end_line))
def _MakeAssignPair(parse_ctx, preparsed, arena):
# type: (ParseContext, PreParsedItem, Arena) -> assign_pair
"""Create an assign_pair from a 4-tuples from DetectShAssignment."""
left_token, close_token, part_offset, w = preparsed
if left_token.id == Id.Lit_VarLike: # s=1
if left_token.val[-2] == '+':
var_name = left_token.val[:-2]
op = assign_op_e.PlusEqual
else:
var_name = left_token.val[:-1]
op = assign_op_e.Equal
tmp = sh_lhs_expr.Name(var_name)
tmp.spids.append(left_token.span_id)
lhs = cast(sh_lhs_expr_t, tmp)
elif left_token.id == Id.Lit_ArrayLhsOpen and parse_ctx.one_pass_parse:
var_name = left_token.val[:-1]
if close_token.val[-2] == '+':
op = assign_op_e.PlusEqual
else:
op = assign_op_e.Equal
left_spid = left_token.span_id + 1
right_spid = close_token.span_id
left_span = parse_ctx.arena.GetLineSpan(left_spid)
right_span = parse_ctx.arena.GetLineSpan(right_spid)
assert left_span.line_id == right_span.line_id, \
'%s and %s not on same line' % (left_span, right_span)
line = parse_ctx.arena.GetLine(left_span.line_id)
index_str = line[left_span.col : right_span.col]
lhs = sh_lhs_expr.UnparsedIndex(var_name, index_str)
elif left_token.id == Id.Lit_ArrayLhsOpen: # a[x++]=1
var_name = left_token.val[:-1]
if close_token.val[-2] == '+':
op = assign_op_e.PlusEqual
else:
op = assign_op_e.Equal
spid1 = left_token.span_id
spid2 = close_token.span_id
span1 = arena.GetLineSpan(spid1)
span2 = arena.GetLineSpan(spid2)
if span1.line_id == span2.line_id:
line = arena.GetLine(span1.line_id)
# extract what's between brackets
code_str = line[span1.col + span1.length : span2.col]
else:
raise NotImplementedError('%d != %d' % (span1.line_id, span2.line_id))
a_parser = parse_ctx.MakeArithParser(code_str)
# a[i+1]= is a place
src = source.Reparsed('array place',
left_token.span_id, close_token.span_id)
with alloc.ctx_Location(arena, src):
index_node = a_parser.Parse() # may raise error.Parse
tmp3 = sh_lhs_expr.IndexedName(var_name, index_node)
tmp3.spids.append(left_token.span_id)
lhs = cast(sh_lhs_expr_t, tmp3)
else:
raise AssertionError()
# TODO: Should we also create a rhs_expr.ArrayLiteral here?
n = len(w.parts)
if part_offset == n:
rhs = word.Empty() # type: word_t
else:
# tmp2 is for intersection of C++/MyPy type systems
tmp2 = compound_word(w.parts[part_offset:])
word_.TildeDetectAssign(tmp2)
rhs = tmp2
pair = syntax_asdl.assign_pair(lhs, op, rhs, [left_token.span_id])
return pair
def _AppendMoreEnv(preparsed_list, more_env):
# type: (PreParsedList, List[env_pair]) -> None
"""Helper to modify a SimpleCommand node.
Args:
preparsed: a list of 4-tuples from DetectShAssignment
more_env: a list to append env_pairs to
"""
for left_token, _, part_offset, w in preparsed_list:
if left_token.id != Id.Lit_VarLike: # can't be a[x]=1
p_die("Environment binding shouldn't look like an array assignment",
token=left_token)
if left_token.val[-2] == '+':
p_die('Expected = in environment binding, got +=', token=left_token)
var_name = left_token.val[:-1]
n = len(w.parts)
if part_offset == n:
val = word.Empty() # type: word_t
else:
val = compound_word(w.parts[part_offset:])
pair = syntax_asdl.env_pair(var_name, val, [left_token.span_id])
more_env.append(pair)
if TYPE_CHECKING:
PreParsedItem = Tuple[Token, Optional[Token], int, compound_word]
PreParsedList = List[PreParsedItem]
def _SplitSimpleCommandPrefix(words):
# type: (List[compound_word]) -> Tuple[PreParsedList, List[compound_word]]
"""Second pass of SimpleCommand parsing: look for assignment words."""
preparsed_list = [] # type: PreParsedList
suffix_words = [] # type: List[compound_word]
done_prefix = False
for w in words:
if done_prefix:
suffix_words.append(w)
continue
left_token, close_token, part_offset = word_.DetectShAssignment(w)
if left_token:
preparsed_list.append((left_token, close_token, part_offset, w))
else:
done_prefix = True
suffix_words.append(w)
return preparsed_list, suffix_words
def _MakeSimpleCommand(preparsed_list, suffix_words, redirects, block):
# type: (PreParsedList, List[compound_word], List[redir], Optional[BraceGroup]) -> command__Simple
"""Create an command.Simple node."""
# FOO=(1 2 3) ls is not allowed.
for _, _, _, w in preparsed_list:
if word_.HasArrayPart(w):
p_die("Environment bindings can't contain array literals", word=w)
# NOTE: It would be possible to add this check back. But it already happens
# at runtime in EvalWordSequence2.
# echo FOO=(1 2 3) is not allowed (but we should NOT fail on echo FOO[x]=1).
if 0:
for w in suffix_words:
if word_.HasArrayPart(w):
p_die("Commands can't contain array literals", word=w)
# NOTE: We only do brace DETECTION here, not brace EXPANSION. Therefore we
# can't implement bash's behavior of having say {~bob,~jane}/src work,
# because we only have a BracedTree.
# This is documented in spec/brace-expansion.
# NOTE: Technically we could do expansion outside of 'oshc translate', but it
# doesn't seem worth it.
words2 = braces.BraceDetectAll(suffix_words)
words3 = word_.TildeDetectAll(words2)
more_env = [] # type: List[env_pair]
_AppendMoreEnv(preparsed_list, more_env)
# do_fork by default
node = command.Simple(words3, redirects, more_env, block, True)
return node
class VarChecker(object):
"""Statically check for proc and variable usage errors."""
def __init__(self):
# type: () -> None
"""
Args:
oil_proc: Whether to disallow nested proc/function declarations
"""
# self.tokens for location info: 'proc' or another token
self.tokens = [] # type: List[Token]
self.names = [] # type: List[Dict[str, Id_t]]
def Push(self, blame_tok):
# type: (Token) -> None
"""
Bash allows this, but it's confusing because it's the same as two functions
at the top level.
f() {
g() {
echo 'top level function defined in another one'
}
}
Oil disallows nested procs.
"""
if len(self.tokens) != 0:
if self.tokens[0].id == Id.KW_Proc or blame_tok.id == Id.KW_Proc:
p_die("procs and shell functions can't be nested", token=blame_tok)
self.tokens.append(blame_tok)
entry = {} # type: Dict[str, Id_t]
self.names.append(entry)
def Pop(self):
# type: () -> None
self.names.pop()
self.tokens.pop()
def Check(self, keyword_id, name_tok):
# type: (Id_t, Token) -> None
"""Check for errors in declaration and mutation errors.
var x, const x:
x already declared
setvar x:
x is not declared
x is constant
setglobal x:
No errors are possible; we would need all these many conditions to
statically know the names:
- no 'source'
- shopt -u copy_env.
- AND use lib has to be static
setref x:
Should only mutate out params
Also should p(:out) declare 'out' as well as '__out'? Then you can't have
local variables with the same name.
"""
# Don't check the global level! Semantics are different here!
if len(self.names) == 0:
return
top = self.names[-1]
name = name_tok.val
if keyword_id in (Id.KW_Const, Id.KW_Var):
if name in top:
p_die('%r was already declared', name, token=name_tok)
else:
top[name] = keyword_id
if keyword_id == Id.KW_SetVar:
if name not in top:
p_die("%r hasn't been declared", name, token=name_tok)
if name in top and top[name] == Id.KW_Const:
p_die("Can't modify constant %r", name, token=name_tok)
# TODO: setref should only mutate out params.
class ctx_VarChecker(object):
def __init__(self, var_checker, blame_tok):
# type: (VarChecker, Token) -> None
var_checker.Push(blame_tok)
self.var_checker = var_checker
def __enter__(self):
# type: () -> None
pass
def __exit__(self, type, value, traceback):
# type: (Any, Any, Any) -> None
self.var_checker.Pop()
SECONDARY_KEYWORDS = [
Id.KW_Do, Id.KW_Done, Id.KW_Then, Id.KW_Fi, Id.KW_Elif, Id.KW_Else, Id.KW_Esac
]
class CommandParser(object):
"""
Args:
word_parse: to get a stream of words
lexer: for lookahead in function def, PushHint of ()
line_reader: for here doc
"""
def __init__(self, parse_ctx, w_parser, lexer, line_reader):
# type: (ParseContext, WordParser, Lexer, _Reader) -> None
self.parse_ctx = parse_ctx
self.aliases = parse_ctx.aliases # aliases to expand at parse time
self.w_parser = w_parser # type: WordParser # for normal parsing
self.lexer = lexer # for pushing hints, lookahead to (
self.line_reader = line_reader # for here docs
self.arena = parse_ctx.arena # for adding here doc and alias spans
self.eof_id = Id.Eof_Real
self.aliases_in_flight = [] # type: AliasesInFlight
# A hacky boolean to remove 'if cd / {' ambiguity.
self.allow_block = True
self.parse_opts = parse_ctx.parse_opts
# Note: VarChecker is instantiated with each CommandParser, which means
# that two 'proc foo' -- inside a command sub and outside -- don't
# conflict, because they use different CommandParser instances. I think
# this OK but you can imagine different behaviors.
self.var_checker = VarChecker()
self.Reset()
# These two Init_() functions simulate "keywords args" in C++.
def Init_EofId(self, eof_id):
# type: (Id_t) -> None
self.eof_id = eof_id
def Init_AliasesInFlight(self, aliases_in_flight):
# type: (AliasesInFlight) -> None
self.aliases_in_flight = aliases_in_flight
def Reset(self):
# type: () -> None
"""Reset our own internal state.
Called by the interactive loop.
"""
# Cursor state set by _Peek()
self.next_lex_mode = lex_mode_e.ShCommand
self.cur_word = None # type: word_t # current word
self.c_kind = Kind.Undefined
self.c_id = Id.Undefined_Tok
self.pending_here_docs = [] # type: List[redir] # should have HereLiteral arg
def ResetInputObjects(self):
# type: () -> None
"""Reset the internal state of our inputs.
Called by the interactive loop.
"""
self.w_parser.Reset()
self.lexer.ResetInputObjects()
self.line_reader.Reset()
def _Next(self):
# type: () -> None
"""Called when we don't need to look at the current token anymore.
A subsequent call to _Peek() will read the next token and store its Id and Kind.
"""
self.next_lex_mode = lex_mode_e.ShCommand
def _Peek(self):
# type: () -> None
"""Helper method.
Returns True for success and False on error. Error examples: bad command
sub word, or unterminated quoted string, etc.
"""
if self.next_lex_mode != lex_mode_e.Undefined:
w = self.w_parser.ReadWord(self.next_lex_mode)
# Here docs only happen in command mode, so other kinds of newlines don't
# count.
if w.tag_() == word_e.Token:
tok = cast(Token, w)
if tok.id == Id.Op_Newline:
for h in self.pending_here_docs:
_ParseHereDocBody(self.parse_ctx, h, self.line_reader, self.arena)
del self.pending_here_docs[:] # No .clear() until Python 3.3.
self.cur_word = w
self.c_kind = word_.CommandKind(self.cur_word)
self.c_id = word_.CommandId(self.cur_word)
self.next_lex_mode = lex_mode_e.Undefined
def _Eat(self, c_id):
# type: (Id_t) -> None
actual_id = word_.CommandId(self.cur_word)
msg = 'Expected word type %s, got %s' % (
ui.PrettyId(c_id), ui.PrettyId(actual_id)
)
self._Eat2(c_id, msg)
def _Eat2(self, c_id, msg):
# type: (Id_t, str) -> None
"""Consume a word of a type. If it doesn't match, return False.
Args:
c_id: the Id we expected
msg: improved error message
"""
self._Peek()
# TODO: Printing something like KW_Do is not friendly. We can map
# backwards using the _KEYWORDS list in frontend/lexer_def.py.
if self.c_id != c_id:
p_die(msg, word=self.cur_word)
self._Next()
def _NewlineOk(self):
# type: () -> None
"""Check for optional newline and consume it."""
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
def _AtSecondaryKeyword(self):
# type: () -> bool
if self.c_id in SECONDARY_KEYWORDS:
return True
return False
def ParseRedirect(self):
# type: () -> redir
self._Peek()
assert self.c_kind == Kind.Redir, self.cur_word
op_tok = cast(Token, self.cur_word) # for MyPy
op_val = op_tok.val
if op_val[0] == '{':
pos = op_val.find('}')
assert pos != -1 # lexer ensures thsi
loc = redir_loc.VarName(op_val[1:pos]) # type: redir_loc_t
elif op_val[0].isdigit():
pos = 1
if op_val[1].isdigit():
pos = 2
loc = redir_loc.Fd(int(op_val[:pos]))
else:
loc = redir_loc.Fd(consts.RedirDefaultFd(op_tok.id))
self._Next()
self._Peek()
# Here doc
if op_tok.id in (Id.Redir_DLess, Id.Redir_DLessDash):
arg = redir_param.HereDoc()
arg.here_begin = self.cur_word
r = redir(op_tok, loc, arg)
self.pending_here_docs.append(r) # will be filled on next newline.
self._Next()
return r
# Other redirect
if self.c_kind != Kind.Word:
p_die('Invalid token after redirect operator', word=self.cur_word)
arg_word = self.cur_word
tilde = word_.TildeDetect(arg_word)
if tilde:
arg_word = tilde
self._Next()
# We should never get Empty, Token, etc.
assert arg_word.tag_() == word_e.Compound, arg_word
return redir(op_tok, loc, cast(compound_word, arg_word))
def _ParseRedirectList(self):
# type: () -> List[redir]
"""Try parsing any redirects at the cursor.
This is used for blocks only, not commands.
Return None on error.
"""
redirects = [] # type: List[redir]
while True:
self._Peek()
# This prediction needs to ONLY accept redirect operators. Should we
# make them a separate TokeNkind?
if self.c_kind != Kind.Redir:
break
node = self.ParseRedirect()
redirects.append(node)
self._Next()
return redirects
def _ScanSimpleCommand(self):
# type: () -> Tuple[List[redir], List[compound_word], Optional[BraceGroup]]
"""First pass: Split into redirects and words."""
redirects = [] # type: List[redir]
words = [] # type: List[compound_word]
block = None # type: Optional[BraceGroup]
while True:
self._Peek()
if self.c_kind == Kind.Redir:
node = self.ParseRedirect()
redirects.append(node)
elif self.c_kind == Kind.Word:
if self.parse_opts.parse_brace():
# Treat { and } more like operators
if self.c_id == Id.Lit_LBrace:
if self.allow_block: # Disabled for if/while condition, etc.
block = self.ParseBraceGroup()
if 0:
print('--')
block.PrettyPrint()
print('\n--')
break
elif self.c_id == Id.Lit_RBrace:
# Another thing: { echo hi }
# We're DONE!!!
break
w = cast(compound_word, self.cur_word) # Kind.Word ensures this
words.append(w)
elif self.parse_opts.parse_amp() and self.c_id == Id.Op_Amp:
# TODO:
# myprog &2 > &1 should be parsed
p_die('TODO: Parse Redirect', word=self.cur_word)
else:
break
self._Next()
return redirects, words, block
def _MaybeExpandAliases(self, words):
# type: (List[compound_word]) -> Optional[command_t]
"""Try to expand aliases.
Args:
words: A list of Compound
Returns:
A new LST node, or None.
Our implementation of alias has two design choices:
- Where to insert it in parsing. We do it at the end of ParseSimpleCommand.
- What grammar rule to parse the expanded alias buffer with. In our case
it's ParseCommand().
This doesn't quite match what other shells do, but I can't figure out a
better places.
Most test cases pass, except for ones like:
alias LBRACE='{'
LBRACE echo one; echo two; }
alias MULTILINE='echo 1
echo 2
echo 3'
MULTILINE
NOTE: dash handles aliases in a totally diferrent way. It has a global
variable checkkwd in parser.c. It assigns it all over the grammar, like
this:
checkkwd = CHKNL | CHKKWD | CHKALIAS;
The readtoken() function checks (checkkwd & CHKALIAS) and then calls
lookupalias(). This seems to provide a consistent behavior among shells,
but it's less modular and testable.
Bash also uses a global 'parser_state & PST_ALEXPNEXT'.
Returns:
A command node if any aliases were expanded, or None otherwise.
"""
# Start a new list if there aren't any. This will be passed recursively
# through CommandParser instances.
aliases_in_flight = (
self.aliases_in_flight if len(self.aliases_in_flight) else []
)
# for error message
first_word_str = None # type: Optional[str]
argv0_spid = word_.LeftMostSpanForWord(words[0])
expanded = [] # type: List[str]
i = 0
n = len(words)
while i < n:
w = words[i]
ok, word_str, quoted = word_.StaticEval(w)
if not ok or quoted:
break
alias_exp = self.aliases.get(word_str)
if alias_exp is None:
break
# Prevent infinite loops. This is subtle: we want to prevent infinite
# expansion of alias echo='echo x'. But we don't want to prevent
# expansion of the second word in 'echo echo', so we add 'i' to
# "aliases_in_flight".
if (word_str, i) in aliases_in_flight:
break
if i == 0:
first_word_str = word_str # for error message
#log('%r -> %r', word_str, alias_exp)
aliases_in_flight.append((word_str, i))
expanded.append(alias_exp)
i += 1
if not alias_exp.endswith(' '):
# alias e='echo [ ' is the same expansion as
# alias e='echo ['
# The trailing space indicates whether we should continue to expand
# aliases; it's not part of it.
expanded.append(' ')
break # No more expansions
if len(expanded) == 0: # No expansions; caller does parsing.
return None
# We got some expansion. Now copy the rest of the words.
# We need each NON-REDIRECT word separately! For example:
# $ echo one >out two
# dash/mksh/zsh go beyond the first redirect!
while i < n:
w = words[i]
spid1 = word_.LeftMostSpanForWord(w)
spid2 = word_.RightMostSpanForWord(w)
span1 = self.arena.GetLineSpan(spid1)
span2 = self.arena.GetLineSpan(spid2)
if 0:
log('spid1 = %d, spid2 = %d', spid1, spid2)
n1 = self.arena.GetLineNumber(span1.line_id)
n2 = self.arena.GetLineNumber(span2.line_id)
log('span1 %s line %d %r', span1, n1, self.arena.GetLine(span1.line_id))
log('span2 %s line %d %r', span2, n2, self.arena.GetLine(span2.line_id))
if span1.line_id == span2.line_id:
line = self.arena.GetLine(span1.line_id)
piece = line[span1.col : span2.col + span2.length]
expanded.append(piece)
else:
# NOTE: The xrange(left_spid, right_spid) algorithm won't work for
# commands like this:
#
# myalias foo`echo hi`bar
#
# That is why we only support words over 1 or 2 lines.
raise NotImplementedError(
'line IDs %d != %d' % (span1.line_id, span2.line_id))
expanded.append(' ') # Put space back between words.
i += 1
code_str = ''.join(expanded)
# NOTE: self.arena isn't correct here. Breaks line invariant.
line_reader = reader.StringLineReader(code_str, self.arena)
cp = self.parse_ctx.MakeOshParser(line_reader)
cp.Init_AliasesInFlight(aliases_in_flight)
# break circular dep
from frontend import parse_lib
# The interaction between COMPLETION and ALIASES requires special care.
# See docstring of BeginAliasExpansion() in parse_lib.py.
src = source.Alias(first_word_str, argv0_spid)
with alloc.ctx_Location(self.arena, src):
with parse_lib.ctx_Alias(self.parse_ctx.trail):
try:
# _ParseCommandTerm() handles multiline commands, compound commands, etc.
# as opposed to ParseLogicalLine()
node = cp._ParseCommandTerm()
except error.Parse as e:
# Failure to parse alias expansion is a fatal error
# We don't need more handling here/
raise
if 0:
log('AFTER expansion:')
node.PrettyPrint()
return node
def ParseSimpleCommand(self):
# type: () -> command_t
"""
Fixed transcription of the POSIX grammar (TODO: port to grammar/Shell.g)
io_file : '<' filename
| LESSAND filename
...
io_here : DLESS here_end
| DLESSDASH here_end
redirect : IO_NUMBER (io_redirect | io_here)
prefix_part : ASSIGNMENT_WORD | redirect
cmd_part : WORD | redirect
assign_kw : Declare | Export | Local | Readonly
# Without any words it is parsed as a command, not an assigment
assign_listing : assign_kw
# Now we have something to do (might be changing assignment flags too)
# NOTE: any prefixes should be a warning, but they are allowed in shell.
assignment : prefix_part* assign_kw (WORD | ASSIGNMENT_WORD)+
# an external command, a function call, or a builtin -- a "word_command"
word_command : prefix_part* cmd_part+
simple_command : assign_listing
| assignment
| proc_command
Simple imperative algorithm:
1) Read a list of words and redirects. Append them to separate lists.
2) Look for the first non-assignment word. If it's declare, etc., then
keep parsing words AND assign words. Otherwise, just parse words.
3) If there are no non-assignment words, then it's a global assignment.
{ redirects, global assignments } OR
{ redirects, prefix_bindings, words } OR
{ redirects, ERROR_prefix_bindings, keyword, assignments, words }
THEN CHECK that prefix bindings don't have any array literal parts!
global assignment and keyword assignments can have the of course.
well actually EXPORT shouldn't have them either -- WARNING
3 cases we want to warn: prefix_bindings for assignment, and array literal
in prefix bindings, or export
A command can be an assignment word, word, or redirect on its own.
ls
>out.txt
>out.txt FOO=bar # this touches the file, and hten
Or any sequence:
ls foo bar
<in.txt ls foo bar >out.txt
<in.txt ls >out.txt foo bar
Or add one or more environment bindings:
VAR=val env
>out.txt VAR=val env
here_end vs filename is a matter of whether we test that it's quoted. e.g.
<<EOF vs <<'EOF'.
"""
redirects, words, block = self._ScanSimpleCommand()
block_spid = block.spids[0] if block else runtime.NO_SPID
if len(words) == 0: # e.g. >out.txt # redirect without words
if block:
p_die("Unexpected block", span_id=block_spid)
simple = command.Simple() # no words, more_env, or block,
simple.redirects = redirects
return simple
# Disallow =a because it's confusing
part0 = words[0].parts[0]
if part0.tag_() == word_part_e.Literal:
tok = cast(Token, part0)
if tok.id == Id.Lit_Equals:
p_die("=word isn't allowed when shopt 'parse_equals' is on.\n"
"Hint: add a space after = to pretty print an expression", token=tok)
preparsed_list, suffix_words = _SplitSimpleCommandPrefix(words)
if self.parse_opts.parse_equals() and len(preparsed_list):
left_token, _, _, _ = preparsed_list[0]
p_die("name=val isn't allowed when shopt 'parse_equals' is on.\n"
"Hint: add 'env' before it, or spaces around =", token=left_token)
# Set a reference to words and redirects for completion. We want to
# inspect this state after a failed parse.
self.parse_ctx.trail.SetLatestWords(suffix_words, redirects)
if len(suffix_words) == 0:
if block:
p_die("Unexpected block", span_id=block_spid)
# ShAssignment: No suffix words like ONE=1 a[x]=1 TWO=2
pairs = [] # type: List[assign_pair]
for preparsed in preparsed_list:
pairs.append(_MakeAssignPair(self.parse_ctx, preparsed, self.arena))
assign = command.ShAssignment(pairs, redirects)
left_spid = word_.LeftMostSpanForWord(words[0])
assign.spids.append(left_spid) # no keyword spid to skip past
return assign
kind, kw_token = word_.KeywordToken(suffix_words[0])
if kind == Kind.ControlFlow:
if block:
p_die("Unexpected block", span_id=block_spid)
if not self.parse_opts.parse_ignored() and len(redirects):
p_die("Control flow shouldn't have redirects", token=kw_token)
if len(preparsed_list): # FOO=bar local spam=eggs not allowed
# TODO: Change location as above
left_token, _, _, _ = preparsed_list[0]
p_die("Control flow shouldn't have environment bindings",
token=left_token)
# Attach the token for errors. (ShAssignment may not need it.)
if len(suffix_words) == 1:
arg_word = None # type: Optional[word_t]
elif len(suffix_words) == 2:
arg_word = suffix_words[1]
else:
p_die('Unexpected argument to %r', kw_token.val, word=suffix_words[2])
return command.ControlFlow(kw_token, arg_word)
# Only expand aliases if we didn't get a block.
if not block and self.parse_opts.expand_aliases():
# If any expansions were detected, then parse again.
expanded_node = self._MaybeExpandAliases(suffix_words)
if expanded_node:
# Attach env bindings and redirects to the expanded node.
more_env = [] # type: List[env_pair]
_AppendMoreEnv(preparsed_list, more_env)
exp = command.ExpandedAlias(expanded_node, redirects, more_env)
return exp
# TODO check that we don't have env1=x x[1]=y env2=z here.
# FOO=bar printenv.py FOO
node = _MakeSimpleCommand(preparsed_list, suffix_words, redirects, block)
return node
def ParseBraceGroup(self):
# type: () -> BraceGroup
"""
Original:
brace_group : LBrace command_list RBrace ;
Oil:
brace_group : LBrace (Op_Newline IgnoredComment?)? command_list RBrace ;
The doc comment can only occur if there's a newline.
"""
left_spid = _KeywordSpid(self.cur_word)
self._Eat(Id.Lit_LBrace)
doc_token = None # type: Token
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
with word_.ctx_EmitDocToken(self.w_parser):
self._Peek()
if self.c_id == Id.Ignored_Comment:
doc_token = cast(Token, self.cur_word)
self._Next()
c_list = self._ParseCommandList()
#right_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Lit_RBrace)
node = BraceGroup(doc_token, c_list.children, None) # no redirects yet
node.spids.append(left_spid)
return node
def ParseDoGroup(self):
# type: () -> command__DoGroup
"""
Used by ForEach, ForExpr, While, Until. Should this be a Do node?
do_group : Do command_list Done ; /* Apply rule 6 */
"""
self._Eat(Id.KW_Do)
do_spid = _KeywordSpid(self.cur_word) # Must come AFTER _Eat
c_list = self._ParseCommandList() # could be any thing
self._Eat(Id.KW_Done)
done_spid = _KeywordSpid(self.cur_word) # after _Eat
node = command.DoGroup(c_list.children)
node.spids.append(do_spid)
node.spids.append(done_spid)
return node
def ParseForWords(self):
# type: () -> Tuple[List[compound_word], int]
"""
for_words : WORD* for_sep
;
for_sep : ';' newline_ok
| NEWLINES
;
"""
words = [] # type: List[compound_word]
# The span_id of any semi-colon, so we can remove it.
semi_spid = runtime.NO_SPID
while True:
self._Peek()
if self.c_id == Id.Op_Semi:
tok = cast(Token, self.cur_word)
semi_spid = tok.span_id
self._Next()
self._NewlineOk()
break
elif self.c_id == Id.Op_Newline:
self._Next()
break
elif self.parse_opts.parse_brace() and self.c_id == Id.Lit_LBrace:
break
if self.cur_word.tag_() != word_e.Compound:
# TODO: Can we also show a pointer to the 'for' keyword?
p_die('Invalid word in for loop', word=self.cur_word)
w2 = cast(compound_word, self.cur_word)
words.append(w2)
self._Next()
return words, semi_spid
def _ParseForExprLoop(self):
# type: () -> command__ForExpr
"""
for (( init; cond; update )) for_sep? do_group
"""
node = self.w_parser.ReadForExpression()
self._Next()
self._Peek()
if self.c_id == Id.Op_Semi:
self._Next()
self._NewlineOk()
elif self.c_id == Id.Op_Newline:
self._Next()
elif self.c_id == Id.KW_Do: # missing semicolon/newline allowed
pass
elif self.c_id == Id.Lit_LBrace: # does NOT require parse_brace
pass
else:
p_die('Invalid word after for expression', word=self.cur_word)
if self.c_id == Id.Lit_LBrace:
node.body = self.ParseBraceGroup()
else:
node.body = self.ParseDoGroup()
return node
def _ParseForEachLoop(self, for_spid):
# type: (int) -> command__ForEach
node = command.ForEach()
node.do_arg_iter = False
node.spids.append(for_spid) # for $LINENO and error fallback
ok, iter_name, quoted = word_.StaticEval(self.cur_word)
if not ok or quoted:
p_die("Loop variable name should be a constant", word=self.cur_word)
if not match.IsValidVarName(iter_name):
p_die("Invalid loop variable name", word=self.cur_word)
node.iter_name = iter_name
self._Next() # skip past name
self._NewlineOk()
in_spid = runtime.NO_SPID
semi_spid = runtime.NO_SPID
self._Peek()
if self.c_id == Id.KW_In:
self._Next() # skip in
# TODO: Do _Peek() here?
in_spid = word_.LeftMostSpanForWord(self.cur_word) + 1
iter_words, semi_spid = self.ParseForWords()
words2 = braces.BraceDetectAll(iter_words)
words3 = word_.TildeDetectAll(words2)
node.iter_words = words3
elif self.c_id == Id.Op_Semi: # for x; do
node.do_arg_iter = True # implicit for loop
self._Next()
elif self.c_id == Id.KW_Do:
node.do_arg_iter = True # implicit for loop
# do not advance
else: # for foo BAD
p_die('Unexpected word after for loop variable', word=self.cur_word)
self._Peek()
if self.c_id == Id.Lit_LBrace: # parse_opts.parse_brace() must be on
node.body = self.ParseBraceGroup()
else:
node.body = self.ParseDoGroup()
node.spids.append(in_spid)
node.spids.append(semi_spid)
return node
def ParseFor(self):
# type: () -> command_t
"""
for_clause : For for_name newline_ok (in for_words? for_sep)? do_group ;
| For '((' ... TODO
"""
for_spid = _KeywordSpid(self.cur_word)
self._Eat(Id.KW_For)
if self.w_parser.LookAhead() == Id.Op_LParen:
# for (x in y) { }
# NOTE: parse_paren NOT required since it would have been a syntax error.
lvalue, iterable, _ = (
self.parse_ctx.ParseOilForExpr(self.lexer, grammar_nt.oil_for)
)
self._Peek()
if self.c_id == Id.Lit_LBrace:
body = self.ParseBraceGroup() # type: command_t
else:
body = self.ParseDoGroup()
return command.OilForIn(lvalue, iterable, body)
else:
self._Peek()
if self.c_id == Id.Op_DLeftParen:
# for (( i = 0; i < 10; i++)
n1 = self._ParseForExprLoop()
n1.redirects = self._ParseRedirectList()
return n1
else:
# for x in a b; do echo hi; done
n2 = self._ParseForEachLoop(for_spid)
n2.redirects = self._ParseRedirectList()
return n2
def ParseWhileUntil(self, keyword):
# type: (Token) -> command__WhileUntil
"""
while_clause : While command_list do_group ;
until_clause : Until command_list do_group ;
"""
self._Next() # skip keyword
if self.parse_opts.parse_paren() and self.w_parser.LookAhead() == Id.Op_LParen:
enode, _ = self.parse_ctx.ParseOilExpr(self.lexer, grammar_nt.oil_expr)
# NOTE: OilCondition could have spids of ( and ) ?
cond = condition.Oil(enode) # type: condition_t
else:
self.allow_block = False
commands = self._ParseCommandList()
self.allow_block = True
cond = condition.Shell(commands.children)
# NOTE: The LSTs will be different for Oil and OSH, but the execution
# should be unchanged. To be sure we should desugar.
self._Peek()
if self.parse_opts.parse_brace() and self.c_id == Id.Lit_LBrace:
# while test -f foo {
body_node = self.ParseBraceGroup() # type: command_t
else:
body_node = self.ParseDoGroup()
node = command.WhileUntil(keyword, cond, body_node, None) # no redirects yet
node.spids.append(keyword.span_id) # e.g. for errexit message
return node
def ParseCaseItem(self):
# type: () -> case_arm
"""
case_item: '('? pattern ('|' pattern)* ')'
newline_ok command_term? trailer? ;
"""
self.lexer.PushHint(Id.Op_RParen, Id.Right_CasePat)
left_spid = word_.LeftMostSpanForWord(self.cur_word)
if self.c_id == Id.Op_LParen:
self._Next()
pat_words = [] # type: List[word_t]
while True:
self._Peek()
pat_words.append(self.cur_word)
self._Next()
self._Peek()
if self.c_id == Id.Op_Pipe:
self._Next()
else:
break
rparen_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Right_CasePat)
self._NewlineOk()
if self.c_id not in (Id.Op_DSemi, Id.KW_Esac):
c_list = self._ParseCommandTerm()
action_children = c_list.children
else:
action_children = []
dsemi_spid = runtime.NO_SPID
last_spid = runtime.NO_SPID
self._Peek()
if self.c_id == Id.KW_Esac:
last_spid = word_.LeftMostSpanForWord(self.cur_word)
elif self.c_id == Id.Op_DSemi:
dsemi_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next()
else:
# Happens on EOF
p_die('Expected ;; or esac', word=self.cur_word)
self._NewlineOk()
spids = [left_spid, rparen_spid, dsemi_spid, last_spid]
arm = syntax_asdl.case_arm(pat_words, action_children, spids)
return arm
def ParseCaseList(self, arms):
# type: (List[case_arm]) -> None
"""
case_list: case_item (DSEMI newline_ok case_item)* DSEMI? newline_ok;
"""
self._Peek()
while True:
# case item begins with a command word or (
if self.c_id == Id.KW_Esac:
break
if self.parse_opts.parse_brace() and self.c_id == Id.Lit_RBrace:
break
if self.c_kind != Kind.Word and self.c_id != Id.Op_LParen:
break
arm = self.ParseCaseItem()
arms.append(arm)
self._Peek()
# Now look for DSEMI or ESAC
def ParseCase(self):
# type: () -> command__Case
"""
case_clause : Case WORD newline_ok in newline_ok case_list? Esac ;
"""
case_node = command.Case()
case_spid = _KeywordSpid(self.cur_word)
self._Next() # skip case
self._Peek()
case_node.to_match = self.cur_word
self._Next()
self._NewlineOk()
in_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Peek()
if self.parse_opts.parse_brace() and self.c_id == Id.Lit_LBrace:
self._Next()
else:
self._Eat(Id.KW_In)
self._NewlineOk()
if self.c_id != Id.KW_Esac: # empty case list
self.ParseCaseList(case_node.arms)
# TODO: should it return a list of nodes, and extend?
self._Peek()
esac_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Peek()
if self.parse_opts.parse_brace() and self.c_id == Id.Lit_RBrace:
self._Next()
else:
self._Eat(Id.KW_Esac)
self._Next()
case_node.spids.append(case_spid)
case_node.spids.append(in_spid)
case_node.spids.append(esac_spid)
return case_node
def _ParseOilElifElse(self, if_node):
# type: (command__If) -> None
"""
if test -f foo {
echo foo
} elif test -f bar; test -f spam {
^ we parsed up to here
echo bar
} else {
echo none
}
"""
arms = if_node.arms
while self.c_id == Id.KW_Elif:
elif_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip elif
if (self.parse_opts.parse_paren() and
self.w_parser.LookAhead() == Id.Op_LParen):
enode, _ = self.parse_ctx.ParseOilExpr(self.lexer, grammar_nt.oil_expr)
# NOTE: OilCondition could have spids of ( and ) ?
cond = condition.Oil(enode) # type: condition_t
else:
self.allow_block = False
commands= self._ParseCommandList()
self.allow_block = True
cond = condition.Shell(commands.children)
body = self.ParseBraceGroup()
self._Peek()
arm = syntax_asdl.if_arm(cond, body.children, [elif_spid])
arms.append(arm)
self._Peek()
if self.c_id == Id.KW_Else:
else_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next()
body = self.ParseBraceGroup()
if_node.else_action = body.children
else:
else_spid = runtime.NO_SPID
if_node.spids.append(else_spid)
def _ParseOilIf(self, if_spid, cond):
# type: (int, condition_t) -> command__If
"""
if test -f foo {
# ^ we parsed up to here
echo foo
} elif test -f bar; test -f spam {
echo bar
} else {
echo none
}
NOTE: If you do something like if test -n foo{, the parser keeps going, and
the error is confusing because it doesn't point to the right place.
I think we might need strict_brace so that foo{ is disallowed. It has to
be foo\{ or foo{a,b}. Or just turn that on with parse_brace? After you
form ANY CompoundWord, make sure it's balanced for Lit_LBrace and
Lit_RBrace? Maybe this is pre-parsing step in teh WordParser?
"""
if_node = command.If()
body1 = self.ParseBraceGroup()
# Every arm has 1 spid, unlike shell-style
# TODO: We could get the spids from the brace group.
arm = syntax_asdl.if_arm(cond, body1.children, [if_spid])
if_node.arms.append(arm)
self._Peek()
if self.c_id in (Id.KW_Elif, Id.KW_Else):
self._ParseOilElifElse(if_node)
else:
if_node.spids.append(runtime.NO_SPID) # no else spid
# the whole if node has the 'else' spid, unlike shell-style there's no 'fi'
# spid because that's in the BraceGroup.
return if_node
def _ParseElifElse(self, if_node):
# type: (command__If) -> None
"""
else_part: (Elif command_list Then command_list)* Else command_list ;
"""
arms = if_node.arms
self._Peek()
while self.c_id == Id.KW_Elif:
elif_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip elif
commands = self._ParseCommandList()
cond = condition.Shell(commands.children)
then_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
arm = syntax_asdl.if_arm(cond, body.children, [elif_spid, then_spid])
arms.append(arm)
if self.c_id == Id.KW_Else:
else_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next()
body = self._ParseCommandList()
if_node.else_action = body.children
else:
else_spid = runtime.NO_SPID
if_node.spids.append(else_spid)
def ParseIf(self):
# type: () -> command__If
"""
if_clause : If command_list Then command_list else_part? Fi ;
"""
if_spid = _KeywordSpid(self.cur_word)
if_node = command.If()
self._Next() # skip if
# Remove ambiguity with if cd / {
if self.parse_opts.parse_paren() and self.w_parser.LookAhead() == Id.Op_LParen:
enode, _ = self.parse_ctx.ParseOilExpr(self.lexer, grammar_nt.oil_expr)
# NOTE: OilCondition could have spids of ( and ) ?
cond = condition.Oil(enode) # type: condition_t
else:
self.allow_block = False
commands = self._ParseCommandList()
self.allow_block = True
cond = condition.Shell(commands.children)
self._Peek()
if self.parse_opts.parse_brace() and self.c_id == Id.Lit_LBrace:
# if foo {
return self._ParseOilIf(if_spid, cond)
then_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Then)
body = self._ParseCommandList()
arm = syntax_asdl.if_arm(cond, body.children, [if_spid, then_spid])
if_node.arms.append(arm)
if self.c_id in (Id.KW_Elif, Id.KW_Else):
self._ParseElifElse(if_node)
else:
if_node.spids.append(runtime.NO_SPID) # no else spid
fi_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.KW_Fi)
if_node.spids.append(fi_spid)
return if_node
def ParseTime(self):
# type: () -> command_t
"""
time [-p] pipeline
According to bash help.
"""
time_spid = _KeywordSpid(self.cur_word)
self._Next() # skip time
pipeline = self.ParsePipeline()
node = command.TimeBlock(pipeline)
node.spids.append(time_spid)
return node
def ParseCompoundCommand(self):
# type: () -> command_t
"""
Refactoring: we put io_redirect* here instead of in function_body and
command.
compound_command : brace_group io_redirect*
| subshell io_redirect*
| for_clause io_redirect*
| while_clause io_redirect*
| until_clause io_redirect*
| if_clause io_redirect*
| case_clause io_redirect*
# bash extensions
| time_clause
| [[ BoolExpr ]]
| (( ArithExpr ))
# Oil extensions
| const ...
| var ...
| setglobal ...
| setref ...
| setvar ...
;
"""
if self.c_id == Id.Lit_LBrace:
n1 = self.ParseBraceGroup()
n1.redirects = self._ParseRedirectList()
return n1
if self.c_id == Id.Op_LParen:
n2 = self.ParseSubshell()
n2.redirects = self._ParseRedirectList()
return n2
if self.c_id == Id.KW_For:
# Note: Redirects parsed in this call. POSIX for and bash for (( have
# redirects, but Oil for doesn't.
return self.ParseFor()
if self.c_id in (Id.KW_While, Id.KW_Until):
keyword = _KeywordToken(self.cur_word)
n3 = self.ParseWhileUntil(keyword)
n3.redirects = self._ParseRedirectList()
return n3
if self.c_id == Id.KW_If:
n4 = self.ParseIf()
n4.redirects = self._ParseRedirectList()
return n4
if self.c_id == Id.KW_Case:
n5 = self.ParseCase()
n5.redirects = self._ParseRedirectList()
return n5
if self.c_id == Id.KW_DLeftBracket:
n6 = self.ParseDBracket()
n6.redirects = self._ParseRedirectList()
return n6
if self.c_id == Id.Op_DLeftParen:
n7 = self.ParseDParen()
n7.redirects = self._ParseRedirectList()
return n7
# bash extensions: no redirects
if self.c_id == Id.KW_Time:
return self.ParseTime()
# Oil extensions
if self.c_id in (Id.KW_Var, Id.KW_Const):
keyword_id = self.c_id
kw_token = word_.LiteralToken(self.cur_word)
self._Next()
n8 = self.w_parser.ParseVarDecl(kw_token)
for lhs in n8.lhs:
self.var_checker.Check(keyword_id, lhs.name)
return n8
if self.c_id in (Id.KW_SetVar, Id.KW_SetRef, Id.KW_SetGlobal):
kw_token = word_.LiteralToken(self.cur_word)
self._Next()
n9 = self.w_parser.ParsePlaceMutation(kw_token, self.var_checker)
return n9
# Happens in function body, e.g. myfunc() oops
p_die('Unexpected word while parsing compound command', word=self.cur_word)
assert False # for MyPy
def ParseFunctionDef(self):
# type: () -> command__ShFunction
"""
function_header : fname '(' ')'
function_def : function_header newline_ok function_body ;
Precondition: Looking at the function name.
NOTE: There is an ambiguity with:
function foo ( echo hi ) and
function foo () ( echo hi )
Bash only accepts the latter, though it doesn't really follow a grammar.
"""
left_spid = word_.LeftMostSpanForWord(self.cur_word)
word0 = cast(compound_word, self.cur_word) # caller ensures validity
name = word_.ShFunctionName(word0)
if len(name) == 0: # example: foo$x is invalid
p_die('Invalid function name', word=word0)
part0 = word0.parts[0]
# If we got a non-empty string from ShFunctionName, this should be true.
assert part0.tag_() == word_part_e.Literal
blame_tok = cast(Token, part0) # for ctx_VarChecker
self._Next() # move past function name
# Must be true because of lookahead
self._Peek()
assert self.c_id == Id.Op_LParen, self.cur_word
self.lexer.PushHint(Id.Op_RParen, Id.Right_ShFunction)
self._Next()
self._Peek()
if self.c_id == Id.Right_ShFunction:
# 'f ()' implies a function definition, since invoking it with no args
# would just be 'f'
self._Next()
after_name_spid = word_.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.ShFunction()
func.name = name
with ctx_VarChecker(self.var_checker, blame_tok):
func.body = self.ParseCompoundCommand()
# matches ParseKshFunctionDef below
func.spids.append(left_spid)
func.spids.append(left_spid) # name_spid is same as left_spid in this case
func.spids.append(after_name_spid)
return func
else:
p_die('Expected ) in function definition', word=self.cur_word)
return None
def ParseKshFunctionDef(self):
# type: () -> command__ShFunction
"""
ksh_function_def : 'function' fname ( '(' ')' )? newline_ok function_body
"""
keyword_tok = _KeywordToken(self.cur_word)
left_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past 'function'
self._Peek()
cur_word = cast(compound_word, self.cur_word) # caller ensures validity
name = word_.ShFunctionName(cur_word)
if len(name) == 0: # example: foo$x is invalid
p_die('Invalid KSH-style function name', word=cur_word)
name_spid = word_.LeftMostSpanForWord(self.cur_word)
after_name_spid = name_spid + 1
self._Next() # skip past 'function name
self._Peek()
if self.c_id == Id.Op_LParen:
self.lexer.PushHint(Id.Op_RParen, Id.Right_ShFunction)
self._Next()
self._Eat(Id.Right_ShFunction)
# Change it: after )
after_name_spid = word_.LeftMostSpanForWord(self.cur_word) + 1
self._NewlineOk()
func = command.ShFunction()
func.name = name
with ctx_VarChecker(self.var_checker, keyword_tok):
func.body = self.ParseCompoundCommand()
# matches ParseFunctionDef above
func.spids.append(left_spid)
func.spids.append(name_spid)
func.spids.append(after_name_spid)
return func
def ParseOilProc(self):
# type: () -> command__Proc
node = command.Proc()
keyword_tok = _KeywordToken(self.cur_word)
with ctx_VarChecker(self.var_checker, keyword_tok):
self.w_parser.ParseProc(node)
if node.sig.tag_() == proc_sig_e.Closed: # Register params
sig = cast(proc_sig__Closed, node.sig)
for param in sig.params:
# Treat params as variables.
self.var_checker.Check(Id.KW_Var, param.name)
# We COULD register __out here but it would require a different API.
#if param.prefix and param.prefix.id == Id.Arith_Colon:
# self.var_checker.Check(Id.KW_Var, '__' + param.name)
self._Next()
node.body = self.ParseBraceGroup()
# No redirects for Oil procs (only at call site)
return node
def ParseCoproc(self):
# type: () -> command_t
"""
TODO: command__Coproc?
"""
raise NotImplementedError()
def ParseSubshell(self):
# type: () -> command__Subshell
left_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past (
# Ensure that something $( (cd / && pwd) ) works. If ) is already on the
# translation stack, we want to delay it.
self.lexer.PushHint(Id.Op_RParen, Id.Right_Subshell)
c_list = self._ParseCommandList()
if len(c_list.children) == 1:
child = c_list.children[0]
else:
child = c_list
node = command.Subshell(child, None) # no redirects yet
right_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Eat(Id.Right_Subshell)
node.spids.append(left_spid)
node.spids.append(right_spid)
return node
def ParseDBracket(self):
# type: () -> command__DBracket
"""
Pass the underlying word parser off to the boolean expression parser.
"""
left_spid = word_.LeftMostSpanForWord(self.cur_word)
# TODO: Test interactive. Without closing ]], you should get > prompt
# (PS2)
self._Next() # skip [[
b_parser = bool_parse.BoolParser(self.w_parser)
bnode = b_parser.Parse() # May raise
self._Peek()
right_spid = word_.LeftMostSpanForWord(self.cur_word)
node = command.DBracket(bnode, None) # no redirects yet
node.spids.append(left_spid)
node.spids.append(right_spid)
return node
def ParseDParen(self):
# type: () -> command__DParen
left_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip ((
anode = self.w_parser.ReadDParen()
assert anode is not None
self._Peek()
right_spid = word_.LeftMostSpanForWord(self.cur_word)
node = command.DParen(anode, None) # no redirects yet
node.spids.append(left_spid)
node.spids.append(right_spid)
return node
def ParseCommand(self):
# type: () -> command_t
"""
command : simple_command
| compound_command # Oil edit: io_redirect* folded in
| function_def
| ksh_function_def
;
"""
self._Peek()
if self._AtSecondaryKeyword():
p_die('Unexpected word when parsing command', word=self.cur_word)
if self.c_id == Id.KW_Function:
return self.ParseKshFunctionDef()
if self.c_id == Id.KW_Proc:
return self.ParseOilProc()
# Top-level keywords to hide: func, data, enum, class/mod. Not sure about
# 'use'.
if self.parse_opts.parse_tea():
if self.c_id == Id.KW_Func:
out0 = command.Func()
self.parse_ctx.ParseFunc(self.lexer, out0)
self._Next()
return out0
if self.c_id == Id.KW_Data:
out1 = command.Data()
self.parse_ctx.ParseDataType(self.lexer, out1)
self._Next()
return out1
if self.c_id == Id.KW_Enum:
out2 = command.Enum()
self.parse_ctx.ParseEnum(self.lexer, out2)
self._Next()
return out2
if self.c_id == Id.KW_Class:
out3 = command.Class()
self.parse_ctx.ParseClass(self.lexer, out3)
self._Next()
return out3
if self.c_id == Id.KW_Import:
# Needs last_token because it ends with an optional thing?
out4 = command.Import()
self.w_parser.ParseImport(out4)
self._Next()
return out4
if self.c_id in (
Id.KW_DLeftBracket, Id.Op_DLeftParen, Id.Op_LParen, Id.Lit_LBrace,
Id.KW_For, Id.KW_While, Id.KW_Until, Id.KW_If, Id.KW_Case, Id.KW_Time,
Id.KW_Var, Id.KW_Const, Id.KW_SetVar, Id.KW_SetGlobal,
Id.KW_SetRef):
return self.ParseCompoundCommand()
if self.c_id in (Id.Lit_Underscore, Id.Lit_Equals):
keyword = _KeywordToken(self.cur_word)
self._Next()
enode = self.w_parser.ParseCommandExpr()
return command.Expr(speck(keyword.id, keyword.span_id), enode)
# Sytnax error for '}' starting a line, which all shells disallow.
if self.c_id == Id.Lit_RBrace:
p_die('Unexpected right brace', word=self.cur_word)
if self.c_kind == Kind.Redir: # Leading redirect
return self.ParseSimpleCommand()
if self.c_kind == Kind.Word:
cur_word = cast(compound_word, self.cur_word) # ensured by Kind.Word
# NOTE: At the top level, only Token and Compound are possible.
# Can this be modelled better in the type system, removing asserts?
#
# TODO: This can be a proc INVOCATION! (Doesn't even need parse_paren)
# Problem: We have to distinguish f( ) { echo ; } and myproc (x, y)
# That requires 2 tokens of lookahead, which we don't have
#
# Or maybe we don't just have ParseSimpleCommand -- we will have
# ParseOilCommand or something
if (self.w_parser.LookAhead() == Id.Op_LParen and
not word_.IsVarLike(cur_word)):
return self.ParseFunctionDef() # f() { echo; } # function
# Parse x = 1+2*3 when parse_equals is set.
parts = cur_word.parts
if self.parse_opts.parse_equals() and len(parts) == 1:
part0 = parts[0]
if part0.tag_() == word_part_e.Literal:
tok = cast(Token, part0)
# NOTE: tok.id should be Lit_Chars, but that check is redundant
if (match.IsValidVarName(tok.val) and
self.w_parser.LookAhead() == Id.Lit_Equals):
self.var_checker.Check(Id.KW_Const, tok)
enode = self.w_parser.ParseBareDecl()
self._Next() # Somehow this is necessary
# TODO: Use BareDecl here. Well, do that when we treat it as const
# or lazy.
return command.VarDecl(None, [name_type(tok, None)], enode)
# echo foo
# f=(a b c) # array
# array[1+2]+=1
return self.ParseSimpleCommand()
if self.c_kind == Kind.Eof:
p_die("Unexpected EOF while parsing command", word=self.cur_word)
# NOTE: This only happens in batch mode in the second turn of the loop!
# e.g. )
p_die("Invalid word while parsing command", word=self.cur_word)
assert False # for MyPy
def ParsePipeline(self):
# type: () -> command_t
"""
pipeline : Bang? command ( '|' newline_ok command )* ;
"""
negated = False
# For blaming failures
pipeline_spid = runtime.NO_SPID
self._Peek()
if self.c_id == Id.KW_Bang:
pipeline_spid = word_.LeftMostSpanForWord(self.cur_word)
negated = True
self._Next()
child = self.ParseCommand()
assert child is not None
children = [child]
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
if negated:
no_stderrs = [] # type: List[int]
node = command.Pipeline(children, negated, no_stderrs)
node.spids.append(pipeline_spid)
return node
else:
return child
pipe_index = 0
stderr_indices = [] # type: List[int]
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
while True:
# Set it to the first | if it isn't already set.
if pipeline_spid == runtime.NO_SPID:
pipeline_spid = word_.LeftMostSpanForWord(self.cur_word)
self._Next() # skip past Id.Op_Pipe or Id.Op_PipeAmp
self._NewlineOk()
child = self.ParseCommand()
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_Pipe, Id.Op_PipeAmp):
break
if self.c_id == Id.Op_PipeAmp:
stderr_indices.append(pipe_index)
pipe_index += 1
node = command.Pipeline(children, negated, stderr_indices)
node.spids.append(pipeline_spid)
return node
def ParseAndOr(self):
# type: () -> command_t
self._Peek()
if self.c_id == Id.Word_Compound:
first_word_tok = word_.LiteralToken(self.cur_word)
if first_word_tok is not None and first_word_tok.id == Id.Lit_TDot:
# We got '...', so parse in multiline mode
self._Next()
with word_.ctx_Multiline(self.w_parser):
return self._ParseAndOr()
# Parse in normal mode, not multiline
return self._ParseAndOr()
def _ParseAndOr(self):
# type: () -> command_t
"""
and_or : and_or ( AND_IF | OR_IF ) newline_ok pipeline
| pipeline
Note that it is left recursive and left associative. We parse it
iteratively with a token of lookahead.
"""
child = self.ParsePipeline()
assert child is not None
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
return child
ops = [] # type: List[int]
op_spids = [] # type: List[int]
children = [child]
while True:
ops.append(self.c_id)
op_spids.append(word_.LeftMostSpanForWord(self.cur_word))
self._Next() # skip past || &&
self._NewlineOk()
child = self.ParsePipeline()
children.append(child)
self._Peek()
if self.c_id not in (Id.Op_DPipe, Id.Op_DAmp):
break
node = command.AndOr(ops, children)
node.spids = op_spids
return node
# NOTE: _ParseCommandLine and _ParseCommandTerm are similar, but different.
# At the top level, we execute after every line, e.g. to
# - process alias (a form of dynamic parsing)
# - process 'exit', because invalid syntax might appear after it
# On the other hand, for a while loop body, we parse the whole thing at once,
# and then execute it. We don't want to parse it over and over again!
# COMPARE
# command_line : and_or (sync_op and_or)* trailer? ; # TOP LEVEL
# command_term : and_or (trailer and_or)* ; # CHILDREN
def _ParseCommandLine(self):
# type: () -> command_t
"""
command_line : and_or (sync_op and_or)* trailer? ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
NOTE: This rule causes LL(k > 1) behavior. We would have to peek to see if
there is another command word after the sync op.
But it's easier to express imperatively. Do the following in a loop:
1. ParseAndOr
2. Peek.
a. If there's a newline, then return. (We're only parsing a single
line.)
b. If there's a sync_op, process it. Then look for a newline and
return. Otherwise, parse another AndOr.
"""
# This END_LIST is slightly different than END_LIST in _ParseCommandTerm.
# I don't think we should add anything else here; otherwise it will be
# ignored at the end of ParseInteractiveLine(), e.g. leading to bug #301.
END_LIST = [Id.Op_Newline, Id.Eof_Real]
children = [] # type: List[command_t]
done = False
while not done:
child = self.ParseAndOr()
self._Peek()
if self.c_id in (Id.Op_Semi, Id.Op_Amp):
tok = cast(Token, self.cur_word) # for MyPy
child = command.Sentence(child, tok)
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in END_LIST:
done = True
else:
# e.g. echo a(b)
p_die('Unexpected word while parsing command line',
word=self.cur_word)
children.append(child)
# Simplify the AST.
if len(children) > 1:
return command.CommandList(children)
else:
return children[0]
def _ParseCommandTerm(self):
# type: () -> command__CommandList
""""
command_term : and_or (trailer and_or)* ;
trailer : sync_op newline_ok
| NEWLINES;
sync_op : '&' | ';';
This is handled in imperative style, like _ParseCommandLine.
Called by _ParseCommandList for all blocks, and also for ParseCaseItem,
which is slightly different. (HOW? Is it the DSEMI?)
Returns:
syntax_asdl.command
"""
# Token types that will end the command term.
END_LIST = [self.eof_id, Id.Right_Subshell, Id.Lit_RBrace, Id.Op_DSemi]
# NOTE: This is similar to _ParseCommandLine.
#
# - Why aren't we doing END_LIST in _ParseCommandLine?
# - Because you will never be inside $() at the top level.
# - We also know it will end in a newline. It can't end in "fi"!
# - example: if true; then { echo hi; } fi
children = [] # type: List[command_t]
done = False
while not done:
self._Peek()
# Most keywords are valid "first words". But do/done/then do not BEGIN
# commands, so they are not valid.
if self._AtSecondaryKeyword():
break
child = self.ParseAndOr()
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next()
self._Peek()
if self.c_id in END_LIST:
done = True
elif self.c_id in (Id.Op_Semi, Id.Op_Amp):
tok = cast(Token, self.cur_word) # for MyPy
child = command.Sentence(child, tok)
self._Next()
self._Peek()
if self.c_id == Id.Op_Newline:
self._Next() # skip over newline
# Test if we should keep going. There might be another command after
# the semi and newline.
self._Peek()
if self.c_id in END_LIST: # \n EOF
done = True
elif self.c_id in END_LIST: # ; EOF
done = True
elif self.c_id in END_LIST: # EOF
done = True
# For if test -f foo; test -f bar {
elif self.parse_opts.parse_brace() and self.c_id == Id.Lit_LBrace:
done = True
else:
#p_die("OOPS", word=self.cur_word)
pass # e.g. "} done", "fi fi", ") fi", etc. is OK
children.append(child)
self._Peek()
return command.CommandList(children)
def _ParseCommandList(self):
# type: () -> command__CommandList
"""
command_list : newline_ok command_term trailer? ;
This one is called by all the compound commands. It's basically a command
block.
NOTE: Rather than translating the CFG directly, the code follows a style
more like this: more like this: (and_or trailer)+. It makes capture
easier.
"""
self._NewlineOk()
node = self._ParseCommandTerm()
return node
def ParseLogicalLine(self):
# type: () -> command_t
"""Parse a single line for main_loop.
A wrapper around _ParseCommandLine(). Similar but not identical to
_ParseCommandList() and ParseCommandSub().
Raises:
ParseError
"""
self._NewlineOk()
self._Peek()
if self.c_id == Id.Eof_Real:
return None # main loop checks for here docs
node = self._ParseCommandLine()
return node
def ParseInteractiveLine(self):
# type: () -> parse_result_t
"""Parse a single line for Interactive main_loop.
Different from ParseLogicalLine because newlines are handled differently.
Raises:
ParseError
"""
self._Peek()
if self.c_id == Id.Op_Newline:
return parse_result.EmptyLine()
if self.c_id == Id.Eof_Real:
return parse_result.Eof()
node = self._ParseCommandLine()
return parse_result.Node(node)
def ParseCommandSub(self):
# type: () -> command_t
"""Parse $(echo hi) and `echo hi` for word_parse.py.
They can have multiple lines, like this:
echo $(
echo one
echo two
)
"""
self._NewlineOk()
if self.c_kind == Kind.Eof: # e.g. $()
return command.NoOp()
c_list = self._ParseCommandTerm()
if len(c_list.children) == 1:
return c_list.children[0]
else:
return c_list
def CheckForPendingHereDocs(self):
# type: () -> None
# NOTE: This happens when there is no newline at the end of a file, like
# osh -c 'cat <<EOF'
if len(self.pending_here_docs):
node = self.pending_here_docs[0] # Just show the first one?
h = cast(redir_param__HereDoc, node.arg)
p_die('Unterminated here doc began here', word=h.here_begin)
| python |
#import pandas as pd
import plotly.express as px
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import covid
countries = covid.getCountries()
df = covid.getNewData()
#print(df)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div([
html.H1("International COVID-19 Dashboard", style={'text-align': 'center'}),
dcc.Dropdown(
id='select-country',
options=[
{'label': c, 'value': c} for c in countries
],
#multi=True,
placeholder="Select a country",
style={'width': "30%"}
),
html.Div(id='dd-output-container', children=[]),
html.Br(),
dcc.Graph(id='world_map', figure={})
])
@app.callback(
[Output(component_id='dd-output-container', component_property='children'),
Output(component_id='world_map', component_property='figure')],
[Input(component_id='select-country', component_property='value')]
)
def update_output_div(input_value):
container = "The map shows information for: {}".format(input_value)
df.reset_index(drop=True)
# Plotly Express
fig = px.choropleth(
data_frame=df,
labels={'cases.new':'New', 'cases.active':'Active', 'deaths.total':'Deaths', 'cases.total':'Cases','tests.total':'Tests'},
locations='country',
locationmode='country names',
title="Covid Map",
color='cases.active',
range_color=[10,100000],
hover_data=['cases.new', 'cases.active', 'deaths.total', 'cases.total', 'tests.total'],
hover_name='country',
custom_data=['continent'],
color_continuous_scale=px.colors.sequential.YlOrRd,
height=960,
projection='natural earth',
template='plotly'
)
return container, fig
if __name__ == '__main__':
app.run_server(debug=True) | python |
from django.contrib.auth.models import User
from django.contrib.contenttypes.generic import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.utils import timezone
class Comment(models.Model):
author = models.ForeignKey(User, null=True, related_name="comments")
name = models.CharField(max_length=100)
email = models.CharField(max_length=255, blank=True)
website = models.CharField(max_length=255, blank=True)
content_type = models.ForeignKey(ContentType)
object_id = models.IntegerField()
content_object = GenericForeignKey()
comment = models.TextField()
submit_date = models.DateTimeField(default=timezone.now)
ip_address = models.IPAddressField(null=True)
public = models.BooleanField(default=True)
def __unicode__(self):
return u"<{}: {} submit_date={}>".format(self.__class__.__name__, self.pk, self.submit_date)
| python |
from setuptools import setup, find_packages
classifiers = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",
]
setup(
name="koronavirus",
packages=find_packages(),
version="0.0.2",
license="MIT",
description="Koronavirüs (Covid-19) verilerine erişmenizi sağlayan bir Python modülü.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Dorukyum",
author_email="[email protected]",
url="https://github.com/Dorukyum/koronavirus",
keywords="API, Türkçe, Covid, Korona, Corona",
install_requires=["requests", "aiohttp"],
classifiers=classifiers,
)
| python |
import datetime
import json
from django.core.urlresolvers import resolve
from django.test import TestCase
from rest_framework.serializers import ValidationError
from rest_framework.test import APITestCase
from .models import Appointment
from .serializers import DATE_ERROR_MESSAGE, TIME_ERROR_MESSAGE
from .views import main_view
class AppointmentModelTestCase(TestCase):
"""docstring"""
appt_dict = {
'date': datetime.date.today().isoformat(),
'time_start': "13:30",
'non_recurring': "Anon",
'reason': "I broke a leg"
}
def test_appointment_saved_with_time_end(self):
existing = Appointment.objects.create(**self.appt_dict)
self.assertEqual(existing.time_end, datetime.time(13, 59))
class MainViewTestCase(TestCase):
"""Smoke tests"""
def test_index_resolve_correct_view(self):
view = resolve('/')
self.assertEqual(view.func, main_view)
def test_index_renders_correct_html(self):
resp = self.client.get('/')
self.assertIn(b'Dr. Dre\'s', resp.content)
class AppointmentAPITestCase(APITestCase):
"""docstring for AppointmentAPITestCase"""
endpoint = '/api/v1/appointment/'
appt_dict = {
'date': datetime.date.today().isoformat(),
'time_start': "13:30",
'non_recurring': "Anon",
'reason': "I broke a leg"
}
def test_anonymous_user_can_create_appointment(self):
resp = self.client.post(self.endpoint, self.appt_dict)
self.assertEqual(resp.status_code, 201)
appt = Appointment.objects.first()
self.assertEqual(appt.reason, self.appt_dict['reason'])
self.assertEqual(appt.non_recurring, self.appt_dict['non_recurring'])
self.assertIsNone(appt.visitor)
def test_appointments_cant_be_in_past(self):
appt_dict = dict(self.appt_dict)
yesterday = datetime.date.today() - datetime.timedelta(days=1)
appt_dict['date'] = yesterday.isoformat()
resp = self.client.post(self.endpoint, appt_dict)
self.assertJSONEqual(resp.content.decode('utf-8'),
{"date":[DATE_ERROR_MESSAGE]})
self.assertFalse(Appointment.objects.exists())
def test_appointments_cant_be_in_wrong_hours(self):
appt_dict = dict(self.appt_dict)
appt_dict['time_start'] = "07:00"
resp = self.client.post(self.endpoint, appt_dict)
self.assertJSONEqual(resp.content.decode('utf-8'),
{"time_start":[TIME_ERROR_MESSAGE]})
self.assertFalse(Appointment.objects.exists())
def test_appointments_cant_be_in_same_hours(self):
Appointment.objects.create(**self.appt_dict)
resp = self.client.post(self.endpoint, self.appt_dict)
self.assertEqual(Appointment.objects.count(), 1)
self.assertContains(resp, 'non_field_errors', status_code=400)
def test_appointments_cant_be_closer_than_30_mins(self):
self.appt_dict['time_end'] = '15:30'
Appointment.objects.create(**self.appt_dict)
before = dict(self.appt_dict)
before['time_start'] = "13:20"
after = dict(self.appt_dict)
after['time_start'] = "13:59"
another_after = dict(self.appt_dict)
another_after['time_start'] = "15:29"
resp = self.client.post(self.endpoint, before)
resp = self.client.post(self.endpoint, after)
resp = self.client.post(self.endpoint, another_after)
self.assertEqual(Appointment.objects.count(), 1)
def test_user_cant_edit_appointment(self):
existing = Appointment.objects.create(**self.appt_dict)
edit = {'reason': "Malicious edit"}
resp = self.client.patch(self.endpoint + str(existing.id), edit)
self.assertEqual(Appointment.objects.first().reason,
existing.reason)
# what's wrong with status?
# self.assertEqual(resp.status_code, 405)
def test_user_cant_delete_appointment(self):
existing = Appointment.objects.create(**self.appt_dict)
before = Appointment.objects.count()
resp = self.client.delete(self.endpoint + str(existing.id))
after = Appointment.objects.count()
self.assertTrue(Appointment.objects.exists())
self.assertEqual(before, after)
# what's wrong with status?
# self.assertEqual(resp.status_code, 405)
| python |
# labvirus.py
# BOJ 14502
# Book p.341
n, m = map(int, input().split())
data = []
tmp = [[0] * m for _ in range(n)]
for _ in range(n):
data.append(list(map(int, input().split())))
dx = (-1, 0, 1, 0)
dy = (0, 1, 0, -1)
res = 0
def dfs_virus(x, y):
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= ny < m:
if tmp[nx][ny] == 0:
tmp[nx][ny] = 2
dfs_virus(nx, ny)
def safe():
score = 0
for i in range(n):
for j in range(m):
if tmp[i][j] == 0:
score += 1
return score
def dfs(cnt):
global res
if cnt == 3:
for i in range(n):
for j in range(m):
tmp[i][j] = data[i][j]
for i in range(n):
for j in range(m):
if tmp[i][j] == 2:
dfs_virus(i, j)
res = max(res, safe())
return
for i in range(n):
for j in range(m):
if data[i][j] == 0:
data[i][j] = 1
cnt += 1
dfs(cnt)
data[i][j] = 0
cnt -= 1
dfs(0)
print(res) | python |
import os
import sys
import click
from modules import az, logging_util
from modules.cli.args import Arguments
from modules.cli.parser import Parser
from modules.cli.validator import Validator
from modules.entities.criteria import Criteria
from modules.exceptions import AzException, NoArgsException
@click.command()
@click.option('--number', '-n', type=click.INT, help='Number of apks to download.')
@click.option('--dexdate', '-d', help='The date on a dex file, format %Y-%m-%d, e.g. 2015-10-03.')
@click.option('--apksize', '-s', help='Apk size, in bytes.')
@click.option('--vtdetection', '-vt', help='Virus total rating, integer.')
@click.option('--pkgname', '-pn', help='Package names.')
@click.option('--markets', '-m', help='Markets, e.g. play.google.com. Possible values (can differ, since repository is updating): 1mobile,angeeks,anzhi,apk_bang,appchina,fdroid,freewarelovers,genome,hiapk,markets,mi.com,play.google.com,proandroid,slideme,torrents.')
@click.option('--sha256', help='SHA256 hashes of apks to download.')
@click.option('--sha1', help='SHA1 hashes of apks to download.')
@click.option('--md5', help='MD5 hashes of apks to download.')
@click.option('--metadata', '-md', help='Metadata. This is a subset of latest.csv column names to keep in metadata.csv. By default sha256,pkg_name,apk_size,dex_date,markets.')
@click.option('--out', '-o', help='Output folder name. By default current directory.')
@click.option('--seed', '-sd', type=click.INT, help='Seed for a random algorithm.')
@click.option('--key', '-k', help='Androzoo api key.')
@click.option('--input-file', '-i', help='Path to input csv.')
@click.option('--threads', '-t', type=click.INT, default=4, help='Number of threads for concurrent download. 4 by default.')
@click.version_option(message='%(version)s')
def run(number, dexdate, apksize, vtdetection, pkgname, markets, metadata, out, seed, sha256, sha1, md5, key, input_file, threads):
"""Downloads specified number of apks satisfying specified criteria from androzoo repository. Saves specified metadata to metadata.csv.
dexdate, apksize and vtdetection require specifying lower and upper bounds in format lower:upper, both inclusive. One of the bounds can be omitted (i.e. you can write :upper or lower:)
pkgname, markets, metadata, sha256, sha1, md5 can be either single values or comma separated lists.
Key and input file can be specified as options or via local or global config file.
Allows downloading in the multiple threads.
Sample usage:
az -n 10 -d 2015-12-11: -s :3000000 -m play.google.com,appchina
This means: download 10 apks with the dexdate starting from the 2015-12-11(inclusive), size up to 3000000 bytes(inclusive) and present on either play.google.com or appchina
"""
try:
args = Arguments(number, dexdate, apksize, vtdetection, markets, pkgname, metadata, sha256, sha1, md5, key, input_file)
Validator(args).validate()
logging_util.setup_logging()
number, *criteria_args, metadata, key, input_file = Parser(args).parse()
criteria = Criteria(*criteria_args)
az.run(input_file, key, number, criteria, out_dir=out if out else os.getcwd(), metadata=metadata, seed=seed, threads=threads)
except NoArgsException:
with click.Context(run) as ctx:
click.echo(run.get_help(ctx))
except AzException as e:
sys.exit(str(e))
else:
sys.exit(0)
if __name__ == '__main__':
run(['-vt', '0:0'])
| python |