content
stringlengths 0
894k
| type
stringclasses 2
values |
---|---|
# robot1
# WEMOS D1 Mini Board GPIO Map: D8 pull_down, D4 pull_down
# D0=16, D1=5, D2=4, D3=0, D4=2, D5=14, D6=12, D7=13, D8=15
import os, gc, micropython, machine, random, time
from sensor_manager import Sensor_HCSR04
usonic = Sensor_HCSR04(trigger=5, echo=4) # D1=5, D2=4
class Motor():
def __init__(self, EN1, EN2):
if isinstance(EN1, int) and isinstance(EN2, int):
self.EN1 = machine.Pin(EN1, mode=machine.Pin.OUT, value=0, pull=None)
self.EN2 = machine.Pin(EN2, mode=machine.Pin.OUT, value=0, pull=None)
else:
raise TypeError('EN1 and EN2 must be integer')
def forward(self):
self.EN1.value(1)
self.EN2.value(0)
def backward(self):
self.EN1.value(0)
self.EN2.value(1)
def stop(self):
self.EN1.value(0)
self.EN2.value(0)
#End Motor
class Robot():
def __init__(self, M1, M2):
if isinstance(M1, Motor) and isinstance(M2, Motor):
self.M1 = M1 # Motor 1
self.M2 = M2 # Motor 2
else:
raise TypeError('M1 and M2 must be a Motor object')
def stop(self):
self.M1.stop()
self.M2.stop()
def forward(self):
self.M1.forward()
self.M2.forward()
def backward(self):
self.M1.backward()
self.M2.backward()
def turn(self, mode=0):
if mode == 1:
self.M1.forward()
elif mode == 2:
self.M2.forward()
else:
self.M1.forward()
self.M2.backward()
#End class Robot
motor1 = Motor(14, 16) # D0 = 16, D5 = 14
motor2 = Motor(13, 12) # D6 = 12, D7 = 13
robot = Robot(motor1, motor2)
stop = (
(robot.stop, 1),
(robot.backward, 0.25),
(robot.stop, 1)
)
avoid = (
(robot.stop, 1),
(robot.backward, 0.25),
(robot.turn, 1),
(robot.stop, 1)
)
move = (
(robot.forward, 0.1),
(robot.forward, 0.1),
)
actions = (move, stop, avoid)
ACTION_MOVE = 0
ACTION_STOP = 1
ACTION_AVOID = 2
ACTION = 0
try:
while 1:
usonic.read()
d = usonic.values[0]
if d < 5:
ACTION = ACTION_STOP
elif d > 5 and d < 10:
ACTION = ACTION_AVOID
else:
ACTION = ACTION_MOVE
for action, delay in actions[ACTION]:
action()
time.sleep(delay)
except:
robot.stop()
| python |
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for IWSLT17 zero-shot translation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import re
from absl import flags
from absl import logging
from language.labs.consistent_zero_shot_nmt.data_generators import translate_multilingual
from tensor2tensor.data_generators import problem
from tensor2tensor.utils import registry
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
flags.DEFINE_string("iwslt17_orig_data_path", "", "Data directory for IWSLT17.")
flags.DEFINE_string("iwslt17_overlap_data_path", "",
"Overlap data directory for IWSLT17.")
__all__ = [
"TranslateIwslt17",
"TranslateIwslt17Nonoverlap",
"TranslateIwslt17Autoenc",
"TranslateIwslt17NonoverlapAutoenc",
]
# 20 pairs total.
_IWSLT17_ALL_LANG_PAIRS = [
# en <> {de, it, nl, ro} (8 pairs).
("en", "de"),
("de", "en"),
("en", "nl"),
("nl", "en"),
("en", "it"),
("it", "en"),
("en", "ro"),
("ro", "en"),
# de <> {it, ro} (4 pairs).
("de", "it"),
("it", "de"),
("de", "ro"),
("ro", "de"),
# nl <> {it, ro} (4 pairs).
("nl", "it"),
("it", "nl"),
("nl", "ro"),
("ro", "nl"),
# de <> nl and it <> ro (4 zero-shot pairs).
("de", "nl"),
("nl", "de"),
("it", "ro"),
("ro", "it"),
]
# 8 training pairs that contain en as source or target.
_IWSLT17_TRAIN_LANG_PAIRS = _IWSLT17_ALL_LANG_PAIRS[:8]
# 20 testing pairs (all directions).
_IWSLT17_TEST_LANG_PAIRS = _IWSLT17_ALL_LANG_PAIRS[:]
# 4 pairs used for autoencoding (en is excluded).
_IWSLT17_AUTOENC_LANG_PAIRS = [
("en", "de"),
("en", "nl"),
("en", "it"),
("en", "ro"),
]
_IWSLT17_TRAIN_DATASETS = [
{
"src_lang": "<" + src_lang + ">",
"tgt_lang": "<" + tgt_lang + ">",
"src_fname": "train.tags.{src_lang}-{tgt_lang}.{src_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
"tgt_fname": "train.tags.{src_lang}-{tgt_lang}.{tgt_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
}
for src_lang, tgt_lang in _IWSLT17_TRAIN_LANG_PAIRS
]
_IWSLT17_TRAIN_REMOVE_SETS = [
{
"src_remove": "remove.{src_lang}-{tgt_lang}.{src_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
"tgt_remove": "remove.{src_lang}-{tgt_lang}.{tgt_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
}
for src_lang, tgt_lang in _IWSLT17_TRAIN_LANG_PAIRS
]
_IWSLT17_AUTOENC_DATASETS = [
{
"src_lang": "<" + tgt_lang + ">",
"tgt_lang": "<" + tgt_lang + ">",
"src_fname": "train.tags.{src_lang}-{tgt_lang}.{tgt_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
"tgt_fname": "train.tags.{src_lang}-{tgt_lang}.{tgt_lang}".format(
src_lang=src_lang, tgt_lang=tgt_lang),
}
for src_lang, tgt_lang in _IWSLT17_AUTOENC_LANG_PAIRS
]
_IWSLT17_TEST_DATASETS = [
{
"src_lang": "<" + src_lang + ">",
"tgt_lang": "<" + tgt_lang + ">",
"src_fname": "IWSLT17.TED.dev2010.{src_lang}-{tgt_lang}.{src_lang}.xml".format( # pylint: disable=line-too-long
src_lang=src_lang, tgt_lang=tgt_lang),
"tgt_fname": "IWSLT17.TED.dev2010.{src_lang}-{tgt_lang}.{tgt_lang}.xml".format( # pylint: disable=line-too-long
src_lang=src_lang, tgt_lang=tgt_lang),
}
for src_lang, tgt_lang in _IWSLT17_TEST_LANG_PAIRS
]
_ALLOWED_TAGS = {"description", "seg", "title"}
_FLAT_HTML_REGEX = re.compile(r"<([^ ]*).*>(.*)</(.*)>")
_WHOLE_TAG_REGEX = re.compile(r"<[^<>]*>\Z")
def _parse_lines(path):
"""Parses lines from IWSLT17 dataset."""
lines = []
if tf.gfile.Exists(path):
with tf.gfile.GFile(path) as fp:
for line in fp:
line = line.strip()
# Skip lines that are tags entirely.
if _WHOLE_TAG_REGEX.match(line):
continue
# Try to parse as content between an opening and closing tags.
match = _FLAT_HTML_REGEX.match(line)
# Always append text not contained between the tags.
if match is None:
lines.append(line)
elif (match.group(1) == match.group(3) and
match.group(1).lower() in _ALLOWED_TAGS):
lines.append(match.group(2).strip())
return lines
def _compile_data(tmp_dir, datasets, filename):
"""Concatenate all `datasets` and save to `filename`."""
filename = os.path.join(tmp_dir, filename)
src_fname = filename + ".src"
tgt_fname = filename + ".tgt"
if tf.gfile.Exists(src_fname) and tf.gfile.Exists(tgt_fname):
tf.logging.info("Skipping compile data, found files:\n%s\n%s",
src_fname, tgt_fname)
return filename
with tf.gfile.GFile(src_fname, mode="w") as src_resfile:
with tf.gfile.GFile(tgt_fname, mode="w") as tgt_resfile:
for d in datasets:
logging.info("Loading %s-%s...", d["src_lang"], d["tgt_lang"])
# Load source and target lines.
src_fpath = os.path.join(FLAGS.iwslt17_orig_data_path, d["src_fname"])
tgt_fpath = os.path.join(FLAGS.iwslt17_orig_data_path, d["tgt_fname"])
src_lines = _parse_lines(src_fpath)
tgt_lines = _parse_lines(tgt_fpath)
assert len(src_lines) == len(tgt_lines)
logging.info("...loaded %d parallel sentences", len(src_lines))
# Filter overlap, if necessary.
if "src_remove" in d:
logging.info("...filtering src overlap")
src_remove_path = os.path.join(FLAGS.iwslt17_overlap_data_path,
d["src_remove"])
src_remove = set(_parse_lines(src_remove_path))
logging.info("...total overlapping lines: %d", len(src_remove))
logging.info("...lines before filtering: %d", len(src_lines))
src_tgt_lines = [
(src_line, tgt_line)
for src_line, tgt_line in zip(src_lines, tgt_lines)
if src_line not in src_remove]
src_lines, tgt_lines = map(list, zip(*src_tgt_lines))
logging.info("...lines after filtering: %d", len(src_lines))
if "tgt_remove" in d:
logging.info("...filtering tgt overlap")
tgt_remove_path = os.path.join(FLAGS.iwslt17_overlap_data_path,
d["tgt_remove"])
tgt_remove = set(_parse_lines(tgt_remove_path))
logging.info("...total overlapping lines: %d", len(tgt_remove))
logging.info("...lines before filtering: %d", len(src_lines))
src_tgt_lines = [
(src_line, tgt_line)
for src_line, tgt_line in zip(src_lines, tgt_lines)
if tgt_line not in tgt_remove]
src_lines, tgt_lines = map(list, zip(*src_tgt_lines))
logging.info("...lines after filtering: %d", len(src_lines))
assert len(src_lines) == len(tgt_lines)
# Prepend tags to each source and target line.
src_lines = [d["src_lang"] + l for l in src_lines]
tgt_lines = [d["tgt_lang"] + l for l in tgt_lines]
# Write preprocessed source and target lines.
logging.info("...writing preprocessed files")
for src_line, tgt_line in zip(src_lines, tgt_lines):
src_resfile.write(src_line)
src_resfile.write("\n")
tgt_resfile.write(tgt_line)
tgt_resfile.write("\n")
return filename
@registry.register_problem
class TranslateIwslt17(translate_multilingual.TranslateMultilingualProblem):
"""Problem spec for IWSLT17 zeroshot translation."""
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
if dataset_split == problem.DatasetSplit.TRAIN:
return _IWSLT17_TRAIN_DATASETS
return _IWSLT17_TEST_DATASETS
def generate_samples(self, data_dir, tmp_dir, dataset_split):
auxiliary_tags = ["<de>", "<it>", "<nl>", "<ro>"]
return self._generate_samples(data_dir, tmp_dir, dataset_split,
auxiliary_tags=auxiliary_tags,
compile_data_fn=_compile_data)
def generate_text_for_vocab(self, data_dir, tmp_dir):
return self._generate_text_for_vocab(
data_dir,
tmp_dir,
datapath=FLAGS.iwslt17_orig_data_path,
parse_lines_fn=_parse_lines)
@registry.register_problem
class TranslateIwslt17Nonoverlap(TranslateIwslt17):
"""Problem spec for IWSLT17 zeroshot translation without overlap."""
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
if dataset_split == problem.DatasetSplit.TRAIN:
# Include overlap information.
return [
dict(list(d.items()) + list(o.items()))
for d, o in zip(_IWSLT17_TRAIN_DATASETS, _IWSLT17_TRAIN_REMOVE_SETS)]
return _IWSLT17_TEST_DATASETS
@registry.register_problem
class TranslateIwslt17Autoenc(TranslateIwslt17):
"""Problem spec for IWSLT17 zeroshot translation with autoencoding."""
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
if dataset_split == problem.DatasetSplit.TRAIN:
return _IWSLT17_TRAIN_DATASETS + _IWSLT17_AUTOENC_DATASETS
return _IWSLT17_TEST_DATASETS
@registry.register_problem
class TranslateIwslt17NonoverlapAutoenc(TranslateIwslt17Nonoverlap):
"""Problem spec for IWSLT17 zeroshot translation with autoencoding."""
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
if dataset_split == problem.DatasetSplit.TRAIN:
data_files_nonoverlap = [
dict(list(d.items()) + list(o.items()))
for d, o in zip(_IWSLT17_TRAIN_DATASETS, _IWSLT17_TRAIN_REMOVE_SETS)]
return data_files_nonoverlap + _IWSLT17_AUTOENC_DATASETS
return _IWSLT17_TEST_DATASETS
| python |
#!/usr/bin/env python3
"""
* Copyright (c) 2020 dithpri (Racoda) <[email protected]>
* This file is part of RCES: https://github.com/dithpri/RCES and licensed under
* the MIT license. See LICENSE.md or
* https://github.com/dithpri/RCES/blob/master/LICENSE.md for more details.
"""
import os
import sys
import requests
import argparse
from colorama import Fore, Back, Style
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
sys.stderr.flush()
try:
from sans.api import Api
from sans.utils import pretty_string
from sans.errors import NotFound
except ImportError:
eprint(
"""You need `sans` to run this script!
install it by running
python3 -m pip install -U sans
or
py -m pip install -U sans
or from https://pypi.org/project/sans/"""
)
input("Press enter to continue...")
sys.exit(1)
import asyncio
import datetime
import re
from collections import defaultdict
from autodispatch import update
async def ratelimit():
while xra := Api.xra:
xra = xra - datetime.datetime.now().timestamp()
eprint(f"Rate limit reached: sleeping {int(xra)} seconds...")
await asyncio.sleep(xra)
async def main(username, password, query_season, posted_query):
version = 7.0
print("Version No. %.1f" % version)
while not username:
username = input("What nation are you collecting from? ")
nation = username.lower().replace(" ", "_")
while not password:
password = input("What is the password of that nation? ")
Api.agent = f"Owner Report (dev. Atlae) (in use by {username})"
while query_season not in [0, 1, 2, 3]:
query_season = input("What season are you looking for? (1 or 2, 0 for both) ")
try:
query_season = int(query_season)
except ValueError:
print("That's not a number!")
if query_season == 3:
print("S3 will never come.")
await asyncio.sleep(0)
sys.exit()
while posted_query is None:
# if len(posted_query) == 0:
posted_query = input("Please enter your query using the Advanced Cards Queries Syntax. Leave blank if you have a list in cards.txt: ")
custom = len(posted_query) > 0
cards = []
if custom:
open("cards.txt", "w")
if query_season != 0:
processed_query = posted_query.replace(":", "%3A").replace("&", "%26").replace("!", "%21").replace("|", "%7C").replace(" ", "+").replace("(", "%28").replace(")", "%29")
query = f'http://azure.nsr3n.info/card_queries/get_daemon_advanced.sh?format=full&query={processed_query}&season={query_season}&format=json&submit=submit'
print('Running...accessing r3n\'s server')
start_time = datetime.datetime.now()
reqs = requests.get(query)
cards = reqs.json()['cards']
print("Finished accessing r3n\'s server")
print(datetime.datetime.now() - start_time)
print("Writing the output of said query into file")
with open('cards.txt', 'a') as f:
for i in range(len(cards)):
f.write(str(cards[i]) + '\n')
else:
while query_season < 2:
query_season += 1
processed_query = posted_query.replace(":", "%3A").replace("&", "%26").replace("!", "%21").replace("|", "%7C").replace(" ", "+").replace("(", "%28").replace(")", "%29")
query = f'http://azure.nsr3n.info/card_queries/get_daemon_advanced.sh?format=full&query={processed_query}&season={query_season}&format=json&submit=submit'
print('Running...accessing r3n\'s server')
reqs = requests.get(query)
cards = reqs.json()['cards']
print("Finished accessing r3n\'s server")
print("Writing the output of said query into file")
with open('cards.txt', 'a') as f:
for i in range(len(cards)):
f.write(str(cards[i]) + '\n')
else:
if not os.path.exists("cards.txt"):
eprint("""
`cards.txt` does not exist in your directory!
If you are listing the address in your command-line interface like this:
C:/Users/NAME > C:/Users/NAME/your/path/here/allinone.py
Please create `cards.txt` in your C:/Users/NAME directory or `cd` to the directory (strongly recommended) like this:
C:/Users/NAME > cd C:/Users/NAME/your/path/here & python allinone.py
""")
input("Press enter to continue...")
await asyncio.sleep(0)
sys.exit(1)
with open("cards.txt", "r") as lines:
linenum = 0
for line in lines.readlines():
linenum += 1
if temp := re.match(r"^https?://(www\.)?nationstates.net/page=deck/card=(?P<id>[0-9]+)/?(/season=(?P<season>[0-9]+))?/?(\s+)(?P<name>\w+)", line):
id, season, name = temp.group("id"), temp.group("season"), temp.group("name")
elif temp := re.match("(?P<id>[0-9]+)\s+(?P<name>\w+)(\s+(?P<season>[0-9]+))?", line):
id, name, season = temp.group("id"), temp.group("name"), temp.group("season")
elif temp := re.match("{'id': '(?P<id>[0-9]+)', 'name': '(?P<name>\w+)', 'season': '(?P<season>[0-9]+)'}", line):
id, name, season = temp.group("id"), temp.group("name"), temp.group("season")
else:
eprint(f"Unable to process line {linenum} because you put in a wrong format")
continue
if season is not None:
cards.append({'id': id, 'name': name, 'season': season})
else:
for s in range(1,3):
cards.append({'id': id, 'name': name, 'season': s})
file_name = datetime.datetime.now().strftime(f"{nation} %Y-%m-%d %H-%M-%S.txt")
with open(file_name, "x") as output_file:
if os.path.exists("preamble.txt"):
with open("preamble.txt", 'r') as p:
output_file.write(p.read() + "\n")
else:
eprint("""
`preamble.txt` does not exist in your directory!
If you are listing the address in your command-line interface like this:
C:/Users/NAME > C:/Users/NAME/your/path/here/allinone.py
Please create `preamble.txt` in your C:/Users/NAME directory or `cd` to the directory (strongly recommended) like this:
C:/Users/NAME > cd C:/Users/NAME/your/path/here & python allinone.py
""")
output_file.write("[box][i]This table was generated with the help of [nation]Racoda[/nation]'s RCES owner report, which can be found [url=https://github.com/dithpri/RCES]here.[/url] I coded a way to automate this [url=https://github.com/Atlae/Dispatch-Maker]here[/url]. -[nation]Atlae[/nation] ([nation]The Atlae Isles[/nation])[/i][/box]\n")
output_file.write("[box][table][tr][td][b]NAME[/b][/td][td][b]CARD LINK[/b][/td][td][b]NUMBER OF OWNERS[/b][/td][td][b]NUMBER OF COPIES[/b][/td][td][b]OWNERS[/b][/td][/tr]\n")
for card in cards:
id = card['id']
name = card['name']
season = card['season']
owners_dict = defaultdict(int)
num_owners = 0
num_copies = 0
owners_copies = "[list][*][i]No owners... :([/i][/list]"
await ratelimit()
result = await Api("card owners", cardid=id, season=season)
try:
for owner in result.OWNERS.OWNER:
num_copies += 1
owners_dict[owner.text] += 1
except AttributeError:
if result.find("OWNERS") == None:
eprint(f"Card {id} season {season} does not exist.")
continue
owners = owners_dict.keys()
num_owners = len(owners)
if num_owners > 0:
owners_copies = ",".join(
[
":".join((a, str(b)))
for a, b in sorted(
owners_dict.items(), key=lambda x: x[1], reverse=True
)
]
)
owners_copies = re.sub(r":\d+,", "[/nation][*][nation]", owners_copies)
owners_copies = re.sub(r":\d+", "[/nation]", owners_copies)
owners_copies = "[list][*][nation]" + owners_copies + "[/list]"
output_file.write(
f"[tr][td]{name}[/td][td][url=https://www.nationstates.net/page=deck/card={id}/season={season}]Link to Card[/url][/td][td]{num_owners}[/td][td]{num_copies}[/td][td]{owners_copies}[/td][/tr]\n"
)
print(f"Added {card}")
output_file.write("[/table][/box]")
with open(file_name, "r") as output_file:
update(username, password, output_file.read())
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='run_me', description="Python program to create a formatted dispatch of cards and owners")
parser.add_argument('--u', dest='USERNAME', type=str, nargs='?', default=None, help="Plese enter your username.")
parser.add_argument('--p', dest='PASSWORD', type=str, nargs='?', default=None, help="Please enter your password (only you can see it).")
parser.add_argument('--s', dest='SEASON', type=int, nargs='?', default=-1, help="The season you want to search.")
parser.add_argument('--q', dest='QUERY', type=str, nargs='?', default=None, help="Please enter your query using the Advanced Cards Queries Syntax.")
args = parser.parse_args()
print(args)
asyncio.run(main(args.USERNAME, args.PASSWORD, args.SEASON, args.QUERY), debug=False)
| python |
from utime import sleep
from textbuffer import TextBuffer
from uio import IOBase
from uos import dupterm
class Monitor(IOBase):
def __init__(self, cols=40, rows=4):
self.textbuffer = TextBuffer(cols, rows)
def read(self, size):
return None
def write(self, byteslike):
with open("write.txt", 'wa') as dumpfile:
for byte in byteslike:
dumpfile.write(str(byte) + ' ' + chr(byte) + '\n')
self.textbuffer.write(byteslike)
self.dump_screen()
self.dump_lines()
self.dump_wrapped()
return len(byteslike)
def dump_screen(self):
lines = []
line_dict = self.textbuffer.pop()
for y in range(self.textbuffer.rows):
if y in line_dict:
lines.append(line_dict[y] + '\n')
else:
lines.append('*' * self.textbuffer.cols + '\n')
lines.append('\n')
lines.append(str(self.textbuffer.offset) + '\n')
lines.append(self.textbuffer.previous_char + '\n')
lines.append(str(len(line_dict)) + '\n')
with open("screen.txt", 'w') as dumpfile:
for line in lines:
dumpfile.write(line)
def dump_lines(self):
with open("lines.txt", 'w') as dumpfile:
for line in self.textbuffer.lines:
dumpfile.write(line + '\n')
def dump_wrapped(self):
with open("wrapped.txt", 'w') as dumpfile:
for wrapped_lines in self.textbuffer.wrapped:
for line in wrapped_lines:
dumpfile.write(line + '\n')
monitor = Monitor()
prev = dupterm(monitor, 1)
#print(prev)
| python |
from models.builder import CoordSolverBuilder # NOQA
from models.common import CKeyClassifier # NOQA
from models.common import PairScorer # NOQA
from models.encoder import Encoder # NOQA
from models.feature import BaselineExtractor # NOQA
from models.feature import FeatureExtractor1 # NOQA
from models.feature import FeatureExtractor2 # NOQA
from models.teranishi17 import Teranishi17 # NOQA
from models.teranishi19 import Teranishi19 # NOQA
| python |
# Generated by Django 3.1.12 on 2021-09-22 21:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0023_delete_projectuser'),
]
operations = [
migrations.AlterField(
model_name='project',
name='sample',
field=models.CharField(blank=True, max_length=240, null=True),
),
]
| python |
#!/usr/bin/python3
import rclpy
from rclpy.node import Node
from std_msgs.msg import String
from chess_msgs.srv import GetNextMove, SetEloRating
from std_srvs.srv import Empty
import stockfish
dft_cfg = {
"Write Debug Log": "false",
"Contempt": 0,
"Min Split Depth": 0,
"Threads": 1,
"Ponder": "false",
"Hash": 16,
"MultiPV": 1,
"Skill Level": 20,
"Move Overhead": 30,
"Minimum Thinking Time": 20,
"Slow Mover": 80,
"UCI_Chess960": "false",
}
class StockFishROS(Node):
def __init__(self, node_name="stockfish_node"):
super().__init__(node_name)
self._stockfish = stockfish.Stockfish(depth=18)
self._get_move_played = self.create_subscription(
String, "played_move", self._move_played_cb, 10
)
self._get_next_move_srv = self.create_service(
GetNextMove, "get_next_move", self._get_next_move_cb
)
self._set_skill_level_srv = self.create_service(
SetEloRating, "set_elo_rating", self._set_elo_rating
)
self._reset_game = self.create_service(
Empty, "reset_game", self._reset_game
)
def _move_played_cb(self, msg):
self.get_logger().info("Received move %s" % msg.data)
self._stockfish.make_moves_from_current_position([msg.data])
def _get_next_move_cb(self, _, response):
move = self._stockfish.get_best_move_time(1000)
self.get_logger().info("My next move %s" % move)
response.move = move
type = self._stockfish.will_move_be_a_capture("move")
if type == stockfish.Capture.DIRECT_CAPTURE:
type = "capture"
elif type == stockfish.Capture.EN_PASSANT:
type = "en_passant"
elif type == stockfish.Capture.NO_CAPTURE:
m_P1 = move[0:1]
m_P2 = move[2:3]
p1 = self._stockfish.get_what_is_on_square(m_P1)
p2 = self._stockfish.get_what_is_on_square(m_P2)
if (
p1 is stockfish.Piece.BLACK_KING
and m_P1 == "e8"
and (m_P2 == "g8" or m_P2 == "c8")
):
type = "roque"
else:
type = "no_capture"
response.type = type
return response
def _set_elo_rating(self, request, response):
self.get_logger().info("Elo Rating %s" % request.elo_rating)
self._stockfish.set_elo_rating(request.elo_rating)
response.success = True
return response
def _reset_game(self, _, response):
self.get_logger().info("Reset")
self._stockfish.set_position([""])
return response
def main(args=None):
rclpy.init(args=args)
stockfish_node = StockFishROS()
try:
rclpy.spin(stockfish_node)
finally:
stockfish_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main()
| python |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.privacy.dlp.v2",
manifest={
"Likelihood",
"FileType",
"InfoType",
"StoredType",
"CustomInfoType",
"FieldId",
"PartitionId",
"KindExpression",
"DatastoreOptions",
"CloudStorageRegexFileSet",
"CloudStorageOptions",
"CloudStorageFileSet",
"CloudStoragePath",
"BigQueryOptions",
"StorageConfig",
"HybridOptions",
"BigQueryKey",
"DatastoreKey",
"Key",
"RecordKey",
"BigQueryTable",
"BigQueryField",
"EntityId",
"TableOptions",
},
)
class Likelihood(proto.Enum):
r"""Categorization of results based on how likely they are to
represent a match, based on the number of elements they contain
which imply a match.
"""
LIKELIHOOD_UNSPECIFIED = 0
VERY_UNLIKELY = 1
UNLIKELY = 2
POSSIBLE = 3
LIKELY = 4
VERY_LIKELY = 5
class FileType(proto.Enum):
r"""Definitions of file type groups to scan. New types will be
added to this list.
"""
FILE_TYPE_UNSPECIFIED = 0
BINARY_FILE = 1
TEXT_FILE = 2
IMAGE = 3
WORD = 5
PDF = 6
AVRO = 7
CSV = 8
TSV = 9
class InfoType(proto.Message):
r"""Type of information detected by the API.
Attributes:
name (str):
Name of the information type. Either a name of your choosing
when creating a CustomInfoType, or one of the names listed
at https://cloud.google.com/dlp/docs/infotypes-reference
when specifying a built-in type. When sending Cloud DLP
results to Data Catalog, infoType names should conform to
the pattern ``[A-Za-z0-9$-_]{1,64}``.
version (str):
Optional version name for this InfoType.
"""
name = proto.Field(proto.STRING, number=1,)
version = proto.Field(proto.STRING, number=2,)
class StoredType(proto.Message):
r"""A reference to a StoredInfoType to use with scanning.
Attributes:
name (str):
Resource name of the requested ``StoredInfoType``, for
example
``organizations/433245324/storedInfoTypes/432452342`` or
``projects/project-id/storedInfoTypes/432452342``.
create_time (google.protobuf.timestamp_pb2.Timestamp):
Timestamp indicating when the version of the
``StoredInfoType`` used for inspection was created.
Output-only field, populated by the system.
"""
name = proto.Field(proto.STRING, number=1,)
create_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
class CustomInfoType(proto.Message):
r"""Custom information type provided by the user. Used to find
domain-specific sensitive information configurable to the data
in question.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
info_type (google.cloud.dlp_v2.types.InfoType):
CustomInfoType can either be a new infoType, or an extension
of built-in infoType, when the name matches one of existing
infoTypes and that infoType is specified in
``InspectContent.info_types`` field. Specifying the latter
adds findings to the one detected by the system. If built-in
info type is not specified in ``InspectContent.info_types``
list then the name is treated as a custom info type.
likelihood (google.cloud.dlp_v2.types.Likelihood):
Likelihood to return for this CustomInfoType. This base
value can be altered by a detection rule if the finding
meets the criteria specified by the rule. Defaults to
``VERY_LIKELY`` if not specified.
dictionary (google.cloud.dlp_v2.types.CustomInfoType.Dictionary):
A list of phrases to detect as a
CustomInfoType.
This field is a member of `oneof`_ ``type``.
regex (google.cloud.dlp_v2.types.CustomInfoType.Regex):
Regular expression based CustomInfoType.
This field is a member of `oneof`_ ``type``.
surrogate_type (google.cloud.dlp_v2.types.CustomInfoType.SurrogateType):
Message for detecting output from
deidentification transformations that support
reversing.
This field is a member of `oneof`_ ``type``.
stored_type (google.cloud.dlp_v2.types.StoredType):
Load an existing ``StoredInfoType`` resource for use in
``InspectDataSource``. Not currently supported in
``InspectContent``.
This field is a member of `oneof`_ ``type``.
detection_rules (Sequence[google.cloud.dlp_v2.types.CustomInfoType.DetectionRule]):
Set of detection rules to apply to all findings of this
CustomInfoType. Rules are applied in order that they are
specified. Not supported for the ``surrogate_type``
CustomInfoType.
exclusion_type (google.cloud.dlp_v2.types.CustomInfoType.ExclusionType):
If set to EXCLUSION_TYPE_EXCLUDE this infoType will not
cause a finding to be returned. It still can be used for
rules matching.
"""
class ExclusionType(proto.Enum):
r""""""
EXCLUSION_TYPE_UNSPECIFIED = 0
EXCLUSION_TYPE_EXCLUDE = 1
class Dictionary(proto.Message):
r"""Custom information type based on a dictionary of words or phrases.
This can be used to match sensitive information specific to the
data, such as a list of employee IDs or job titles.
Dictionary words are case-insensitive and all characters other than
letters and digits in the unicode `Basic Multilingual
Plane <https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane>`__
will be replaced with whitespace when scanning for matches, so the
dictionary phrase "Sam Johnson" will match all three phrases "sam
johnson",
Plane](https://en.wikipedia.org/wiki/Plane_%28Unicode%29#Basic_Multilingual_Plane)
surrounding any match must be of a different type than the adjacent
characters within the word, so letters must be next to non-letters
and digits next to non-digits. For example, the dictionary word
"jen" will match the first three letters of the text "jen123" but
will return no matches for "jennifer".
Dictionary words containing a large number of characters that are
not letters or digits may result in unexpected findings because such
characters are treated as whitespace. The
`limits <https://cloud.google.com/dlp/limits>`__ page contains
details about the size limits of dictionaries. For dictionaries that
do not fit within these constraints, consider using
``LargeCustomDictionaryConfig`` in the
`limits <https://cloud.google.com/dlp/limits>`__ page contains
details about
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
word_list (google.cloud.dlp_v2.types.CustomInfoType.Dictionary.WordList):
List of words or phrases to search for.
This field is a member of `oneof`_ ``source``.
cloud_storage_path (google.cloud.dlp_v2.types.CloudStoragePath):
Newline-delimited file of words in Cloud
Storage. Only a single file is accepted.
This field is a member of `oneof`_ ``source``.
"""
class WordList(proto.Message):
r"""Message defining a list of words or phrases to search for in
the data.
Attributes:
words (Sequence[str]):
Words or phrases defining the dictionary. The dictionary
must contain at least one phrase and every phrase must
contain at least 2 characters that are letters or digits.
[required]
"""
words = proto.RepeatedField(proto.STRING, number=1,)
word_list = proto.Field(
proto.MESSAGE,
number=1,
oneof="source",
message="CustomInfoType.Dictionary.WordList",
)
cloud_storage_path = proto.Field(
proto.MESSAGE, number=3, oneof="source", message="CloudStoragePath",
)
class Regex(proto.Message):
r"""Message defining a custom regular expression.
Attributes:
pattern (str):
Pattern defining the regular expression. Its
syntax
(https://github.com/google/re2/wiki/Syntax) can
be found under the google/re2 repository on
GitHub.
group_indexes (Sequence[int]):
(https://github.com/google/re2/wiki/Syntax)
can be found under the The index of the submatch
to extract as findings. When not specified, the
entire match is returned. No more than 3 may be
included.
"""
pattern = proto.Field(proto.STRING, number=1,)
group_indexes = proto.RepeatedField(proto.INT32, number=2,)
class SurrogateType(proto.Message):
r"""Message for detecting output from deidentification transformations
such as
```CryptoReplaceFfxFpeConfig`` <https://cloud.google.com/dlp/docs/reference/rest/v2/organizations.deidentifyTemplates#cryptoreplaceffxfpeconfig>`__.
These types of transformations are those that perform
pseudonymization, thereby producing a "surrogate" as
```CryptoReplaceFfxFpeConfig`` <https://cloud.google.com/dlp/docs/reference/rest/v2/organizations.deidentifyTemplates#cryptoreplaceffxfpeconfig>`__.
transformation such as ``surrogate_info_type``. This CustomInfoType
does not support the use of ``detection_rules``.
"""
class DetectionRule(proto.Message):
r"""Deprecated; use ``InspectionRuleSet`` instead. Rule for modifying a
``CustomInfoType`` to alter behavior under certain circumstances,
depending on the specific details of the rule. Not supported for the
``surrogate_type`` custom infoType.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
hotword_rule (google.cloud.dlp_v2.types.CustomInfoType.DetectionRule.HotwordRule):
Hotword-based detection rule.
This field is a member of `oneof`_ ``type``.
"""
class Proximity(proto.Message):
r"""Message for specifying a window around a finding to apply a
detection rule.
Attributes:
window_before (int):
Number of characters before the finding to
consider.
window_after (int):
Number of characters after the finding to
consider.
"""
window_before = proto.Field(proto.INT32, number=1,)
window_after = proto.Field(proto.INT32, number=2,)
class LikelihoodAdjustment(proto.Message):
r"""Message for specifying an adjustment to the likelihood of a
finding as part of a detection rule.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
fixed_likelihood (google.cloud.dlp_v2.types.Likelihood):
Set the likelihood of a finding to a fixed
value.
This field is a member of `oneof`_ ``adjustment``.
relative_likelihood (int):
Increase or decrease the likelihood by the specified number
of levels. For example, if a finding would be ``POSSIBLE``
without the detection rule and ``relative_likelihood`` is 1,
then it is upgraded to ``LIKELY``, while a value of -1 would
downgrade it to ``UNLIKELY``. Likelihood may never drop
below ``VERY_UNLIKELY`` or exceed ``VERY_LIKELY``, so
applying an adjustment of 1 followed by an adjustment of -1
when base likelihood is ``VERY_LIKELY`` will result in a
final likelihood of ``LIKELY``.
This field is a member of `oneof`_ ``adjustment``.
"""
fixed_likelihood = proto.Field(
proto.ENUM, number=1, oneof="adjustment", enum="Likelihood",
)
relative_likelihood = proto.Field(
proto.INT32, number=2, oneof="adjustment",
)
class HotwordRule(proto.Message):
r"""The rule that adjusts the likelihood of findings within a
certain proximity of hotwords.
Attributes:
hotword_regex (google.cloud.dlp_v2.types.CustomInfoType.Regex):
Regular expression pattern defining what
qualifies as a hotword.
proximity (google.cloud.dlp_v2.types.CustomInfoType.DetectionRule.Proximity):
Proximity of the finding within which the
entire hotword must reside. The total length of
the window cannot exceed 1000 characters. Note
that the finding itself will be included in the
window, so that hotwords may be used to match
substrings of the finding itself. For example,
the certainty of a phone number regex "\(\d{3}\)
\d{3}-\d{4}" could be adjusted upwards if the
area code is known to be the local area code of
a company office using the hotword regex
"\(xxx\)", where "xxx" is the area code in
question.
likelihood_adjustment (google.cloud.dlp_v2.types.CustomInfoType.DetectionRule.LikelihoodAdjustment):
Likelihood adjustment to apply to all
matching findings.
"""
hotword_regex = proto.Field(
proto.MESSAGE, number=1, message="CustomInfoType.Regex",
)
proximity = proto.Field(
proto.MESSAGE,
number=2,
message="CustomInfoType.DetectionRule.Proximity",
)
likelihood_adjustment = proto.Field(
proto.MESSAGE,
number=3,
message="CustomInfoType.DetectionRule.LikelihoodAdjustment",
)
hotword_rule = proto.Field(
proto.MESSAGE,
number=1,
oneof="type",
message="CustomInfoType.DetectionRule.HotwordRule",
)
info_type = proto.Field(proto.MESSAGE, number=1, message="InfoType",)
likelihood = proto.Field(proto.ENUM, number=6, enum="Likelihood",)
dictionary = proto.Field(proto.MESSAGE, number=2, oneof="type", message=Dictionary,)
regex = proto.Field(proto.MESSAGE, number=3, oneof="type", message=Regex,)
surrogate_type = proto.Field(
proto.MESSAGE, number=4, oneof="type", message=SurrogateType,
)
stored_type = proto.Field(
proto.MESSAGE, number=5, oneof="type", message="StoredType",
)
detection_rules = proto.RepeatedField(
proto.MESSAGE, number=7, message=DetectionRule,
)
exclusion_type = proto.Field(proto.ENUM, number=8, enum=ExclusionType,)
class FieldId(proto.Message):
r"""General identifier of a data field in a storage service.
Attributes:
name (str):
Name describing the field.
"""
name = proto.Field(proto.STRING, number=1,)
class PartitionId(proto.Message):
r"""Datastore partition ID.
A partition ID identifies a grouping of entities. The grouping
is always by project and namespace, however the namespace ID may
be empty.
A partition ID contains several dimensions:
project ID and namespace ID.
Attributes:
project_id (str):
The ID of the project to which the entities
belong.
namespace_id (str):
If not empty, the ID of the namespace to
which the entities belong.
"""
project_id = proto.Field(proto.STRING, number=2,)
namespace_id = proto.Field(proto.STRING, number=4,)
class KindExpression(proto.Message):
r"""A representation of a Datastore kind.
Attributes:
name (str):
The name of the kind.
"""
name = proto.Field(proto.STRING, number=1,)
class DatastoreOptions(proto.Message):
r"""Options defining a data set within Google Cloud Datastore.
Attributes:
partition_id (google.cloud.dlp_v2.types.PartitionId):
A partition ID identifies a grouping of
entities. The grouping is always by project and
namespace, however the namespace ID may be
empty.
kind (google.cloud.dlp_v2.types.KindExpression):
The kind to process.
"""
partition_id = proto.Field(proto.MESSAGE, number=1, message="PartitionId",)
kind = proto.Field(proto.MESSAGE, number=2, message="KindExpression",)
class CloudStorageRegexFileSet(proto.Message):
r"""Message representing a set of files in a Cloud Storage bucket.
Regular expressions are used to allow fine-grained control over
which files in the bucket to include.
Included files are those that match at least one item in
``include_regex`` and do not match any items in ``exclude_regex``.
Note that a file that matches items from both lists will *not* be
included. For a match to occur, the entire file path (i.e.,
everything in the url after the bucket name) must match the regular
expression.
For example, given the input
``{bucket_name: "mybucket", include_regex: ["directory1/.*"], exclude_regex: ["directory1/excluded.*"]}``:
- ``gs://mybucket/directory1/myfile`` will be included
- ``gs://mybucket/directory1/directory2/myfile`` will be included
(``.*`` matches across ``/``)
- ``gs://mybucket/directory0/directory1/myfile`` will *not* be
included (the full path doesn't match any items in
``include_regex``)
- ``gs://mybucket/directory1/excludedfile`` will *not* be included
(the path matches an item in ``exclude_regex``)
If ``include_regex`` is left empty, it will match all files by
default (this is equivalent to setting ``include_regex: [".*"]``).
Some other common use cases:
- ``{bucket_name: "mybucket", exclude_regex: [".*\.pdf"]}`` will
include all files in ``mybucket`` except for .pdf files
- ``{bucket_name: "mybucket", include_regex: ["directory/[^/]+"]}``
will include all files directly under
``gs://mybucket/directory/``, without matching across ``/``
Attributes:
bucket_name (str):
The name of a Cloud Storage bucket. Required.
include_regex (Sequence[str]):
A list of regular expressions matching file paths to
include. All files in the bucket that match at least one of
these regular expressions will be included in the set of
files, except for those that also match an item in
``exclude_regex``. Leaving this field empty will match all
files by default (this is equivalent to including ``.*`` in
the list).
Regular expressions use RE2
`syntax <https://github.com/google/re2/wiki/Syntax>`__; a
guide can be found under the google/re2 repository on
GitHub.
exclude_regex (Sequence[str]):
`syntax <https://github.com/google/re2/wiki/Syntax>`__; a
guide can be found A list of regular expressions matching
file paths to exclude. All files in the bucket that match at
least one of these regular expressions will be excluded from
the scan.
Regular expressions use RE2
`syntax <https://github.com/google/re2/wiki/Syntax>`__; a
guide can be found under the google/re2 repository on
GitHub.
"""
bucket_name = proto.Field(proto.STRING, number=1,)
include_regex = proto.RepeatedField(proto.STRING, number=2,)
exclude_regex = proto.RepeatedField(proto.STRING, number=3,)
class CloudStorageOptions(proto.Message):
r"""Options defining a file or a set of files within a Google
Cloud Storage bucket.
Attributes:
file_set (google.cloud.dlp_v2.types.CloudStorageOptions.FileSet):
The set of one or more files to scan.
bytes_limit_per_file (int):
Max number of bytes to scan from a file. If a scanned file's
size is bigger than this value then the rest of the bytes
are omitted. Only one of bytes_limit_per_file and
bytes_limit_per_file_percent can be specified. Cannot be set
if de-identification is requested.
bytes_limit_per_file_percent (int):
Max percentage of bytes to scan from a file. The rest are
omitted. The number of bytes scanned is rounded down. Must
be between 0 and 100, inclusively. Both 0 and 100 means no
limit. Defaults to 0. Only one of bytes_limit_per_file and
bytes_limit_per_file_percent can be specified. Cannot be set
if de-identification is requested.
file_types (Sequence[google.cloud.dlp_v2.types.FileType]):
List of file type groups to include in the scan. If empty,
all files are scanned and available data format processors
are applied. In addition, the binary content of the selected
files is always scanned as well. Images are scanned only as
binary if the specified region does not support image
inspection and no file_types were specified. Image
inspection is restricted to 'global', 'us', 'asia', and
'europe'.
sample_method (google.cloud.dlp_v2.types.CloudStorageOptions.SampleMethod):
files_limit_percent (int):
Limits the number of files to scan to this
percentage of the input FileSet. Number of files
scanned is rounded down. Must be between 0 and
100, inclusively. Both 0 and 100 means no limit.
Defaults to 0.
"""
class SampleMethod(proto.Enum):
r"""How to sample bytes if not all bytes are scanned. Meaningful only
when used in conjunction with bytes_limit_per_file. If not
specified, scanning would start from the top.
"""
SAMPLE_METHOD_UNSPECIFIED = 0
TOP = 1
RANDOM_START = 2
class FileSet(proto.Message):
r"""Set of files to scan.
Attributes:
url (str):
The Cloud Storage url of the file(s) to scan, in the format
``gs://<bucket>/<path>``. Trailing wildcard in the path is
allowed.
If the url ends in a trailing slash, the bucket or directory
represented by the url will be scanned non-recursively
(content in sub-directories will not be scanned). This means
that ``gs://mybucket/`` is equivalent to
``gs://mybucket/*``, and ``gs://mybucket/directory/`` is
equivalent to ``gs://mybucket/directory/*``.
Exactly one of ``url`` or ``regex_file_set`` must be set.
regex_file_set (google.cloud.dlp_v2.types.CloudStorageRegexFileSet):
The regex-filtered set of files to scan. Exactly one of
``url`` or ``regex_file_set`` must be set.
"""
url = proto.Field(proto.STRING, number=1,)
regex_file_set = proto.Field(
proto.MESSAGE, number=2, message="CloudStorageRegexFileSet",
)
file_set = proto.Field(proto.MESSAGE, number=1, message=FileSet,)
bytes_limit_per_file = proto.Field(proto.INT64, number=4,)
bytes_limit_per_file_percent = proto.Field(proto.INT32, number=8,)
file_types = proto.RepeatedField(proto.ENUM, number=5, enum="FileType",)
sample_method = proto.Field(proto.ENUM, number=6, enum=SampleMethod,)
files_limit_percent = proto.Field(proto.INT32, number=7,)
class CloudStorageFileSet(proto.Message):
r"""Message representing a set of files in Cloud Storage.
Attributes:
url (str):
The url, in the format ``gs://<bucket>/<path>``. Trailing
wildcard in the path is allowed.
"""
url = proto.Field(proto.STRING, number=1,)
class CloudStoragePath(proto.Message):
r"""Message representing a single file or path in Cloud Storage.
Attributes:
path (str):
A url representing a file or path (no wildcards) in Cloud
Storage. Example: gs://[BUCKET_NAME]/dictionary.txt
"""
path = proto.Field(proto.STRING, number=1,)
class BigQueryOptions(proto.Message):
r"""Options defining BigQuery table and row identifiers.
Attributes:
table_reference (google.cloud.dlp_v2.types.BigQueryTable):
Complete BigQuery table reference.
identifying_fields (Sequence[google.cloud.dlp_v2.types.FieldId]):
Table fields that may uniquely identify a row within the
table. When ``actions.saveFindings.outputConfig.table`` is
specified, the values of columns specified here are
available in the output table under
``location.content_locations.record_location.record_key.id_values``.
Nested fields such as ``person.birthdate.year`` are allowed.
rows_limit (int):
Max number of rows to scan. If the table has more rows than
this value, the rest of the rows are omitted. If not set, or
if set to 0, all rows will be scanned. Only one of
rows_limit and rows_limit_percent can be specified. Cannot
be used in conjunction with TimespanConfig.
rows_limit_percent (int):
Max percentage of rows to scan. The rest are omitted. The
number of rows scanned is rounded down. Must be between 0
and 100, inclusively. Both 0 and 100 means no limit.
Defaults to 0. Only one of rows_limit and rows_limit_percent
can be specified. Cannot be used in conjunction with
TimespanConfig.
sample_method (google.cloud.dlp_v2.types.BigQueryOptions.SampleMethod):
excluded_fields (Sequence[google.cloud.dlp_v2.types.FieldId]):
References to fields excluded from scanning.
This allows you to skip inspection of entire
columns which you know have no findings.
included_fields (Sequence[google.cloud.dlp_v2.types.FieldId]):
Limit scanning only to these fields.
"""
class SampleMethod(proto.Enum):
r"""How to sample rows if not all rows are scanned. Meaningful only when
used in conjunction with either rows_limit or rows_limit_percent. If
not specified, rows are scanned in the order BigQuery reads them.
"""
SAMPLE_METHOD_UNSPECIFIED = 0
TOP = 1
RANDOM_START = 2
table_reference = proto.Field(proto.MESSAGE, number=1, message="BigQueryTable",)
identifying_fields = proto.RepeatedField(
proto.MESSAGE, number=2, message="FieldId",
)
rows_limit = proto.Field(proto.INT64, number=3,)
rows_limit_percent = proto.Field(proto.INT32, number=6,)
sample_method = proto.Field(proto.ENUM, number=4, enum=SampleMethod,)
excluded_fields = proto.RepeatedField(proto.MESSAGE, number=5, message="FieldId",)
included_fields = proto.RepeatedField(proto.MESSAGE, number=7, message="FieldId",)
class StorageConfig(proto.Message):
r"""Shared message indicating Cloud storage type.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
datastore_options (google.cloud.dlp_v2.types.DatastoreOptions):
Google Cloud Datastore options.
This field is a member of `oneof`_ ``type``.
cloud_storage_options (google.cloud.dlp_v2.types.CloudStorageOptions):
Google Cloud Storage options.
This field is a member of `oneof`_ ``type``.
big_query_options (google.cloud.dlp_v2.types.BigQueryOptions):
BigQuery options.
This field is a member of `oneof`_ ``type``.
hybrid_options (google.cloud.dlp_v2.types.HybridOptions):
Hybrid inspection options.
This field is a member of `oneof`_ ``type``.
timespan_config (google.cloud.dlp_v2.types.StorageConfig.TimespanConfig):
"""
class TimespanConfig(proto.Message):
r"""Configuration of the timespan of the items to include in
scanning. Currently only supported when inspecting Google Cloud
Storage and BigQuery.
Attributes:
start_time (google.protobuf.timestamp_pb2.Timestamp):
Exclude files, tables, or rows older than
this value. If not set, no lower time limit is
applied.
end_time (google.protobuf.timestamp_pb2.Timestamp):
Exclude files, tables, or rows newer than
this value. If not set, no upper time limit is
applied.
timestamp_field (google.cloud.dlp_v2.types.FieldId):
Specification of the field containing the timestamp of
scanned items. Used for data sources like Datastore and
BigQuery.
For BigQuery: If this value is not specified and the table
was modified between the given start and end times, the
entire table will be scanned. If this value is specified,
then rows are filtered based on the given start and end
times. Rows with a ``NULL`` value in the provided BigQuery
column are skipped. Valid data types of the provided
BigQuery column are: ``INTEGER``, ``DATE``, ``TIMESTAMP``,
and ``DATETIME``.
For Datastore: If this value is specified, then entities are
filtered based on the given start and end times. If an
entity does not contain the provided timestamp property or
contains empty or invalid values, then it is included. Valid
data types of the provided timestamp property are:
``TIMESTAMP``.
enable_auto_population_of_timespan_config (bool):
When the job is started by a JobTrigger we will
automatically figure out a valid start_time to avoid
scanning files that have not been modified since the last
time the JobTrigger executed. This will be based on the time
of the execution of the last run of the JobTrigger.
"""
start_time = proto.Field(
proto.MESSAGE, number=1, message=timestamp_pb2.Timestamp,
)
end_time = proto.Field(
proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,
)
timestamp_field = proto.Field(proto.MESSAGE, number=3, message="FieldId",)
enable_auto_population_of_timespan_config = proto.Field(proto.BOOL, number=4,)
datastore_options = proto.Field(
proto.MESSAGE, number=2, oneof="type", message="DatastoreOptions",
)
cloud_storage_options = proto.Field(
proto.MESSAGE, number=3, oneof="type", message="CloudStorageOptions",
)
big_query_options = proto.Field(
proto.MESSAGE, number=4, oneof="type", message="BigQueryOptions",
)
hybrid_options = proto.Field(
proto.MESSAGE, number=9, oneof="type", message="HybridOptions",
)
timespan_config = proto.Field(proto.MESSAGE, number=6, message=TimespanConfig,)
class HybridOptions(proto.Message):
r"""Configuration to control jobs where the content being
inspected is outside of Google Cloud Platform.
Attributes:
description (str):
A short description of where the data is
coming from. Will be stored once in the job. 256
max length.
required_finding_label_keys (Sequence[str]):
These are labels that each inspection request must include
within their 'finding_labels' map. Request may contain
others, but any missing one of these will be rejected.
Label keys must be between 1 and 63 characters long and must
conform to the following regular expression:
``[a-z]([-a-z0-9]*[a-z0-9])?``.
No more than 10 keys can be required.
labels (Sequence[google.cloud.dlp_v2.types.HybridOptions.LabelsEntry]):
To organize findings, these labels will be added to each
finding.
Label keys must be between 1 and 63 characters long and must
conform to the following regular expression:
``[a-z]([-a-z0-9]*[a-z0-9])?``.
Label values must be between 0 and 63 characters long and
must conform to the regular expression
``([a-z]([-a-z0-9]*[a-z0-9])?)?``.
No more than 10 labels can be associated with a given
finding.
Examples:
- ``"environment" : "production"``
- ``"pipeline" : "etl"``
table_options (google.cloud.dlp_v2.types.TableOptions):
If the container is a table, additional
information to make findings meaningful such as
the columns that are primary keys.
"""
description = proto.Field(proto.STRING, number=1,)
required_finding_label_keys = proto.RepeatedField(proto.STRING, number=2,)
labels = proto.MapField(proto.STRING, proto.STRING, number=3,)
table_options = proto.Field(proto.MESSAGE, number=4, message="TableOptions",)
class BigQueryKey(proto.Message):
r"""Row key for identifying a record in BigQuery table.
Attributes:
table_reference (google.cloud.dlp_v2.types.BigQueryTable):
Complete BigQuery table reference.
row_number (int):
Row number inferred at the time the table was scanned. This
value is nondeterministic, cannot be queried, and may be
null for inspection jobs. To locate findings within a table,
specify
``inspect_job.storage_config.big_query_options.identifying_fields``
in ``CreateDlpJobRequest``.
"""
table_reference = proto.Field(proto.MESSAGE, number=1, message="BigQueryTable",)
row_number = proto.Field(proto.INT64, number=2,)
class DatastoreKey(proto.Message):
r"""Record key for a finding in Cloud Datastore.
Attributes:
entity_key (google.cloud.dlp_v2.types.Key):
Datastore entity key.
"""
entity_key = proto.Field(proto.MESSAGE, number=1, message="Key",)
class Key(proto.Message):
r"""A unique identifier for a Datastore entity.
If a key's partition ID or any of its path kinds or names are
reserved/read-only, the key is reserved/read-only.
A reserved/read-only key is forbidden in certain documented
contexts.
Attributes:
partition_id (google.cloud.dlp_v2.types.PartitionId):
Entities are partitioned into subsets,
currently identified by a project ID and
namespace ID. Queries are scoped to a single
partition.
path (Sequence[google.cloud.dlp_v2.types.Key.PathElement]):
The entity path. An entity path consists of one or more
elements composed of a kind and a string or numerical
identifier, which identify entities. The first element
identifies a *root entity*, the second element identifies a
*child* of the root entity, the third element identifies a
child of the second entity, and so forth. The entities
identified by all prefixes of the path are called the
element's *ancestors*.
A path can never be empty, and a path can have at most 100
elements.
"""
class PathElement(proto.Message):
r"""A (kind, ID/name) pair used to construct a key path.
If either name or ID is set, the element is complete. If neither
is set, the element is incomplete.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
kind (str):
The kind of the entity. A kind matching regex ``__.*__`` is
reserved/read-only. A kind must not contain more than 1500
bytes when UTF-8 encoded. Cannot be ``""``.
id (int):
The auto-allocated ID of the entity.
Never equal to zero. Values less than zero are
discouraged and may not be supported in the
future.
This field is a member of `oneof`_ ``id_type``.
name (str):
The name of the entity. A name matching regex ``__.*__`` is
reserved/read-only. A name must not be more than 1500 bytes
when UTF-8 encoded. Cannot be ``""``.
This field is a member of `oneof`_ ``id_type``.
"""
kind = proto.Field(proto.STRING, number=1,)
id = proto.Field(proto.INT64, number=2, oneof="id_type",)
name = proto.Field(proto.STRING, number=3, oneof="id_type",)
partition_id = proto.Field(proto.MESSAGE, number=1, message="PartitionId",)
path = proto.RepeatedField(proto.MESSAGE, number=2, message=PathElement,)
class RecordKey(proto.Message):
r"""Message for a unique key indicating a record that contains a
finding.
This message has `oneof`_ fields (mutually exclusive fields).
For each oneof, at most one member field can be set at the same time.
Setting any member of the oneof automatically clears all other
members.
.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields
Attributes:
datastore_key (google.cloud.dlp_v2.types.DatastoreKey):
This field is a member of `oneof`_ ``type``.
big_query_key (google.cloud.dlp_v2.types.BigQueryKey):
This field is a member of `oneof`_ ``type``.
id_values (Sequence[str]):
Values of identifying columns in the given row. Order of
values matches the order of ``identifying_fields`` specified
in the scanning request.
"""
datastore_key = proto.Field(
proto.MESSAGE, number=2, oneof="type", message="DatastoreKey",
)
big_query_key = proto.Field(
proto.MESSAGE, number=3, oneof="type", message="BigQueryKey",
)
id_values = proto.RepeatedField(proto.STRING, number=5,)
class BigQueryTable(proto.Message):
r"""Message defining the location of a BigQuery table. A table is
uniquely identified by its project_id, dataset_id, and table_name.
Within a query a table is often referenced with a string in the
format of: ``<project_id>:<dataset_id>.<table_id>`` or
``<project_id>.<dataset_id>.<table_id>``.
Attributes:
project_id (str):
The Google Cloud Platform project ID of the
project containing the table. If omitted,
project ID is inferred from the API call.
dataset_id (str):
Dataset ID of the table.
table_id (str):
Name of the table.
"""
project_id = proto.Field(proto.STRING, number=1,)
dataset_id = proto.Field(proto.STRING, number=2,)
table_id = proto.Field(proto.STRING, number=3,)
class BigQueryField(proto.Message):
r"""Message defining a field of a BigQuery table.
Attributes:
table (google.cloud.dlp_v2.types.BigQueryTable):
Source table of the field.
field (google.cloud.dlp_v2.types.FieldId):
Designated field in the BigQuery table.
"""
table = proto.Field(proto.MESSAGE, number=1, message="BigQueryTable",)
field = proto.Field(proto.MESSAGE, number=2, message="FieldId",)
class EntityId(proto.Message):
r"""An entity in a dataset is a field or set of fields that correspond
to a single person. For example, in medical records the ``EntityId``
might be a patient identifier, or for financial records it might be
an account identifier. This message is used when generalizations or
analysis must take into account that multiple rows correspond to the
same entity.
Attributes:
field (google.cloud.dlp_v2.types.FieldId):
Composite key indicating which field contains
the entity identifier.
"""
field = proto.Field(proto.MESSAGE, number=1, message="FieldId",)
class TableOptions(proto.Message):
r"""Instructions regarding the table content being inspected.
Attributes:
identifying_fields (Sequence[google.cloud.dlp_v2.types.FieldId]):
The columns that are the primary keys for
table objects included in ContentItem. A copy of
this cell's value will stored alongside
alongside each finding so that the finding can
be traced to the specific row it came from. No
more than 3 may be provided.
"""
identifying_fields = proto.RepeatedField(
proto.MESSAGE, number=1, message="FieldId",
)
__all__ = tuple(sorted(__protobuf__.manifest))
| python |
from wxpy import Bot, FEMALE
if __name__ == "__main__":
bot = Bot()
my_friend = bot.friends().search('胜男酱', sex=FEMALE)[0]
message = input()
while message != "exit":
my_friend.send(message)
message = input()
| python |
"""
Data structures to represent relations (i.e., data sets).
"""
import conclave.utils as utils
class Column:
"""
Column data structure.
"""
def __init__(self, rel_name: str, name: str, idx: int, type_str: str, trust_set: set):
"""
Initialize object.
:param rel_name: name of corresponding relation
:param name: name of column
:param idx: integer index of the column in the relation
:param type_str: describes type of values in column (currently only "INTEGER" supported)
:param trust_set: parties trusted to learn this column in the clear
"""
if type_str not in {"INTEGER"}:
raise Exception("Type not supported {}".format(type_str))
self.rel_name = rel_name
self.name = name
self.idx = idx
self.type_str = type_str
self.trust_set = trust_set
def get_name(self):
"""Return column name."""
return self.name
def get_idx(self):
"""Return column index."""
return self.idx
def dbg_str(self):
"""Return column name and trust set as string."""
coll_set_str = " ".join(sorted([str(party) for party in self.trust_set]))
return self.get_name() + " " + "{" + coll_set_str + "}"
def merge_coll_sets_in(self, other_coll_sets: set):
"""Merge collusion sets into column."""
self.trust_set = utils.merge_coll_sets(self.trust_set, other_coll_sets)
def __str__(self):
"""Return string representation of column object."""
return self.get_name()
class Relation:
"""
Relation data structure.
"""
def __init__(self, name: str, columns: list, stored_with: set):
"""Initialize object."""
self.name = name
self.columns = columns
self.stored_with = stored_with # Ownership of this data set. Does this refer to secret shares or open data?
def rename(self, new_name):
"""Rename relation."""
self.name = new_name
for col in self.columns:
col.rel_name = new_name
def is_shared(self):
"""Determine if this relation is shared."""
return len(self.stored_with) > 1
def update_column_indexes(self):
"""
Makes sure column indexes are same as the columns' positions
in the list. Call this after inserting new columns or otherwise
changing their order.
"""
for idx, col in enumerate(self.columns):
col.idx = idx
def update_columns(self):
"""Update relation name in relation column objects."""
self.update_column_indexes()
for col in self.columns:
col.rel_name = self.name
def dbg_str(self):
"""Return extended string representation for debugging."""
col_str = ", ".join([col.dbg_str() for col in self.columns])
return "{}([{}]) {}".format(self.name, col_str, self.stored_with)
def __str__(self):
"""Return string representation of relation."""
col_str = ", ".join([str(col) for col in self.columns])
return "{}([{}])".format(self.name, col_str)
| python |
import rsa
m = '00f0d1b6305ea6256c768f30b6a94ef6c9fa2ee0b8eea2ea5634f821925de774ac60e7cfe9d238489be12551b460ef7943fb0fc132fdfba35fd11a71e0b13d9fe4fed9af90eb69da8627fab28f9700ceb6747ef1e09d6b360553f5385bb8f6315a3c7f71fa0e491920fd18c8119e8ab97d96a06d618e945483d39d83e3a2cf2567'
e = '10001'
message = 'wxz2015111zc@srb'
import urllib.parse
class Rsa:
def __init__(self,e,m):
self.e = e
self.m = m
def encrypt(self,message):
message = urllib.parse.quote(message)
mm = int(self.m, 16)
ee = int(self.e, 16)
rsa_pubkey = rsa.PublicKey(mm, ee)
crypto = self._encrypt(message.encode(), rsa_pubkey)
return crypto.hex()
def _pad_for_encryption(self, message, target_length):
message = message[::-1]
msglength = len(message)
padding = b''
padding_length = target_length - msglength
for i in range(padding_length):
padding += b'\x00'
return b''.join([b'\x00\x00',padding,b'\x00',message])
def _encrypt(self, message, pub_key):
keylength = rsa.common.byte_size(pub_key.n)
padded = self._pad_for_encryption(message, keylength)
payload = rsa.transform.bytes2int(padded)
encrypted = rsa.core.encrypt_int(payload, pub_key.e, pub_key.n)
block = rsa.transform.int2bytes(encrypted, keylength)
return block
| python |
import FWCore.ParameterSet.Config as cms
import TrackingTools.MaterialEffects.MaterialPropagator_cfi
# "forward" propagator for electrons
fwdGsfElectronPropagator = TrackingTools.MaterialEffects.MaterialPropagator_cfi.MaterialPropagator.clone(
Mass = 0.000511,
ComponentName = 'fwdGsfElectronPropagator'
)
| python |
import os
from cs50 import SQL
from flask import Flask, flash, redirect, render_template, request, session
from flask_session import Session
from tempfile import mkdtemp
from werkzeug.exceptions import default_exceptions
from werkzeug.security import check_password_hash, generate_password_hash
from helpers import apology, login_required, lookup, usd
# Configure application
application=app = Flask(__name__)
# Ensure templates are auto-reloaded
app.config["TEMPLATES_AUTO_RELOAD"] = True
# Set the secret key to some random bytes. Keep this really secret!
app.secret_key = os.getenv("secret")
# Ensure responses aren't cached
@app.after_request
def after_request(response):
response.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
response.headers["Expires"] = 0
response.headers["Pragma"] = "no-cache"
return response
# Custom filter
app.jinja_env.filters["usd"] = usd
# Configure session to use filesystem (instead of signed cookies)
app.config["SESSION_FILE_DIR"] = mkdtemp()
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Configure CS50 Library to use SQLite database
db = SQL("sqlite:///finance.db")
@app.route("/",methods=["GET"])
@login_required
def index():
total=0
data=[]
user_id = session["user_id"]
cash= db.execute("SELECT cash from users where id = :id",id=user_id)[0]['cash']
select_user=db.execute('SELECT "symbol",SUM("share") as sum FROM "transactions" WHERE "user_id" = :user_id GROUP BY "symbol"', user_id=user_id)
if len(select_user) > 0:
for i in select_user:
if i['sum'] >0 :
quote=lookup(i['symbol'])
temp={
'symbol': quote['symbol'],
'name': quote['name'],
'shares': i['sum'],
'price': usd(quote['price']),
'total': i['sum']*quote['price']
}
total+=temp['total']
temp['total']=usd(temp['total'])
data.append(temp)
return render_template("index.html",data=data,total=usd(total+cash),cash=usd(cash))
return render_template("index.html",total=cash,cash=cash)
@app.route("/buy", methods=["GET", "POST"])
@login_required
def buy():
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("symbol"):
return apology("must provide symbol", 403)
# Ensure password was submitted
elif not request.form.get("shares"):
return apology("must provide shares", 403)
quote=lookup(request.form.get("symbol"))
if not quote:
return apology("Invalid symbol", 403)
cash= db.execute("SELECT cash from users where id = :id",id=session["user_id"])[0]['cash']
if ( float(request.form.get("shares")) * quote['price'] > cash):
return apology("CANT AFFORD, you are poor", 403)
db.execute("INSERT into transactions (user_id,company,price,share,symbol,cost) values ( :user_id , :company , :price , :share,:symbol,:cost)" , \
user_id=session["user_id"],company=quote['name'],price=quote['price'],share=float(request.form.get("shares")),symbol=quote["symbol"],\
cost=float(request.form.get("shares")) * quote['price'])
db.execute("UPDATE users SET cash = :rem_cash WHERE id = :id ", rem_cash= cash - (float(request.form.get("shares")) * quote['price']) ,\
id=session["user_id"])
flash('Bought!')
return redirect("/")
return render_template("buy.html")
@app.route("/history")
@login_required
def history():
history= db.execute("SELECT symbol,share,price,timestamp,action FROM transactions where user_id = :user_id",user_id=session["user_id"] )
return render_template("history.html",data=history)
@app.route("/login", methods=["GET", "POST"])
def login():
"""Log user in"""
# Forget any user_id
session.clear()
# User reached route via POST (as by submitting a form via POST)
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
# Ensure username exists and password is correct
if len(rows) != 1 or not check_password_hash(rows[0]["hash"], request.form.get("password")):
return apology("invalid username and/or password", 403)
# Remember which user has logged in
session["user_id"] = rows[0]["id"]
# Redirect user to home page
return redirect("/")
# User reached route via GET (as by clicking a link or via redirect)
else:
return render_template("login.html")
@app.route("/logout")
def logout():
"""Log user out"""
# Forget any user_id
session.clear()
# Redirect user to login form
return redirect("/")
@app.route("/quote", methods=["GET", "POST"])
@login_required
def quote():
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("symbol"):
return apology("Must give symbol", 403)
quote=lookup(request.form.get("symbol"))
if not quote:
return apology("Invalid symbol", 403)
quote['price']=usd(quote['price'])
return render_template("quote.html",quote=quote)
return render_template("quote.html")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# Ensure username was submitted
if not request.form.get("username"):
return apology("must provide username", 403)
# Ensure password was submitted
elif not request.form.get("password"):
return apology("must provide password", 403)
elif not request.form.get("password") == request.form.get("confirmation"):
return apology("passwords must match",403)
# Query database for username
rows = db.execute("SELECT * FROM users WHERE username = :username",
username=request.form.get("username"))
if(len(rows) > 0):
flash('User already exists! Try a different user name.')
return render_template("register.html")
db.execute("INSERT into users ( username,hash) values (:username,:hash)",\
username=request.form.get("username"),hash=generate_password_hash(request.form.get("password")))
return render_template("login.html")
return render_template("register.html")
@app.route("/sell", methods=["GET", "POST"])
@login_required
def sell():
user_id = session["user_id"]
select_user=db.execute('SELECT "symbol",SUM("share") as sum FROM "transactions" WHERE "user_id" = :user_id GROUP BY "symbol"', user_id=user_id)
symbols=[]
for x in select_user:
if x['sum']>0:
symbols.append(x['symbol'])
if request.method == "POST":
if not request.form.get("symbol"):
return apology("must provide symbol", 403)
elif not request.form.get("shares"):
return apology("must provide shares", 403)
num_shares=int(request.form.get("shares"))
symbol=request.form.get("symbol")
for x in select_user:
if x['symbol'] == symbol:
if x['sum'] < num_shares:
return apology("you no have much shares", 403)
quote=lookup(symbol)
price_curr=quote['price']*float(num_shares)
db.execute("INSERT into transactions (user_id,company,price,share,symbol,cost,action) values ( :user_id , :company , :price , :share,:symbol,:cost,:action)" , \
user_id=user_id,company=quote['name'],price=quote['price'],share=int(num_shares)*-1,symbol=quote["symbol"],\
cost=price_curr,action='s')
db.execute("UPDATE users SET cash = cash + :price_curr WHERE id = :id ", price_curr= price_curr ,\
id=user_id)
flash('Sold!')
return redirect("/")
return render_template("sell.html",symbols=symbols)
def errorhandler(e):
"""Handle error"""
return apology(e.name, e.code)
# listen for errors
for code in default_exceptions:
app.errorhandler(code)(errorhandler)
if __name__ == "__main__":
app.run()
| python |
import os
import sys
from collections import OrderedDict
try:
# Python 2
import ConfigParser as configparser
except:
# Python 3
import configparser
class RDMConfig(object):
def __init__(self):
""" Config container. """
self.station_code = None
self.channel = None
self.latitude = None
self.longitude = None
self.elevation = None
self.instrument_string = None
self.raw = None
self.zipped = None
self.mode = None
self.gain = None
self.upload_enabled = None
self.hostname = None
self.rsa_private_key = None
self.upload_queue_file = None
self.remote_dir = None
self.read_from_server = False
self.mains_frequency = 60
def readConfig(config_file_path):
""" Generates two plots of the nights data.
Arguments:
config_file_path: [str] The path to the directory that stores the configuration file.
E.g.: /home/pi/RadiometerData/config.txt
Return:
rdm_config: [object] The configuration object.
"""
# Create the configuration object
rdm_config = RDMConfig()
# Create a config object
config = configparser.ConfigParser()
# Read the config file into the object
config.read(config_file_path)
# Gather configuration data for the station
rdm_config.station_code = config['Station']['StationCode']
rdm_config.channel = config['Station']['Channel']
rdm_config.latitude = float(config['Station']['Latitude'])
rdm_config.longitude = float(config['Station']['Longitude'])
rdm_config.elevation = float(config['Station']['Elevation'])
rdm_config.instrument_string = config['Station']['InstrumentString']
rdm_config.raw = config['Station']['RawData']
rdm_config.zipped = config['Station']['StoredData']
rdm_config.mode = int(config['Station']['DifferentialMode'])
rdm_config.gain = int(config['Station']['Gain'])
# Gather configuration data for the upload manager
rdm_config.upload_enabled = (config['Upload']['EnableUpload'].lower().strip() == "true")
rdm_config.hostname = config['Upload']['HostName']
rdm_config.rsa_private_key = config['Upload']['RSAPrivateKey']
rdm_config.upload_queue_file = config['Upload']['QueueFilename']
rdm_config.remote_dir = config['Upload']['RemoteDirectory']
# If True, it means that this instance of the code is running on the server
rdm_config.read_from_server = (config['Server']['ReadFromServer'].lower().strip() == "true")
# Filtering parameters
rdm_config.mains_frequency = float(config['Filtering']['MainsFrequency'])
# Return the configuration object
return rdm_config
def makeConfig(config_file_path):
""" Generates two plots of the nights data.
Input Arguments:
-config_file_path (string): The path to the directory that will store the configuration file. Ex: /home/pi/RadiometerData/config.txt
Outputs:
- One config.txt file saved in config_file_path
"""
# There was no detected config file so one will be created
# An error message explaining the issue
print("No config file detected in /home/pi/RadiometerData")
print("A default config file has been created and can be changed in RadiometerData")
# Create a config object
config = configparser.ConfigParser()
# optionxform prevents it from naming all config parameters with lower case letters
config.optionxform = str
# Creates the station data inside the config file using default values
config['Station'] = OrderedDict((
('StationCode', 'AA0000'),
('Channel', 'A'),
('Latitude', '0.0'),
('Longitude', '0.0'),
('Elevation', '0.0'),
('InstrumentString', 'Your description'),
('RawData','CapturedData'),
('StoredData','ArchivedData'),
('DifferentialMode','1'),
('Gain','1')
))
# Creates the upload manager configuration section using default settings
config['Upload'] = OrderedDict((
('EnableUpload', 'True'),
('HostName', ''),
('RSAPrivateKey', '~/.ssh/id_rsa'),
('QueueFilename','FILES_TO_UPLOAD.inf'),
('RemoteDirectory','.')
))
# Creates the upload manager configuration section using default settings
config['Server'] = OrderedDict((
('ReadFromServer', 'False'),
))
# Creates the upload manager configuration section using default settings
config['Filtering'] = OrderedDict((
('MainsFrequency', '60'),
))
# Generate the file in the desired directory and close it
with open(config_file_path, 'w') as configfile:config.write(configfile)
configfile.closed
# Allow the user to configure the config file
os.chmod(config_file_path, 0o777)
# Exit allowing the user to configure their settings
sys.exit()
| python |
import jax
molecular_profiles = [
"APC inact mut KRAS G12D",
"APC mutant BRAF mutant PIK3CA mutant SMAD4 mutant TP53 mutant",
"BRAF V600E EGFR amp",
"BRAF V600E MAP2K1 L115P",
"BRAF V600E NRAS Q61K NRAS A146T MAP2K1 P387S",
"BRAF amp BRAF V600X NRAS Q61K",
"CDKN2A mut MET del exon14 PDGFRA mut SMAD4 Q249H",
"DNMT3A R882H FLT3 Y599_D600insSTDNEYFYVDFREYEY NPM1 W288fs",
"EGFR E746_A750del EGFR T790M EGFR L718Q",
"EGFR exon 19 del MET amp MET D1228V",
"ERBB2 over exp PIK3CA H1047R SRC over exp",
"ETV6 - JAK2 JAK2 G831R",
"FGFR2 E565A FGFR2 K659M FGFR2 N549H FGFR2 N549K FGFR2 V564F FGFR2-ZMYM4",
"FGFR2 N550K PIK3CA I20M PIK3CA P539R PTEN R130Q PTEN T321fs*23",
"FGFR3 wild-type FGFR3 dec exp HRAS G12V",
"FLT3 exon 14 ins FLT3 D835N",
"FLT3 exon 14 ins FLT3 F691L FLT3 D698N",
"FLT3 exon 14 ins FLT3 M837G FLT3 S838R FLT3 D839H",
"JAK2 over exp MPL over exp",
"KRAS G12D PIK3CA E545K PIK3CA H1047L TP53 wild-type",
"KRAS G12D PTEN dec exp TP53 R306*",
"KRAS G13C PIK3CA H1047Y PTEN G143fs*4 PTEN K267fs*9",
"KRAS mut + TP53 wild-type",
"MET del exon14 TP53 N30fs*14",
"NPM1-ALK ALK L1196M ALK D1203N",
]
def _parse(molecular_profile):
""" dispatch to jax harvester """
return jax._parse(molecular_profile)
def test_parse_all():
""" just loop through all test profiles, ensure no exceptions """
genes = []
for molecular_profile in molecular_profiles:
genes.append(_parse(molecular_profile)[0])
def test_parse_fusion():
""" make sure we handle fusion format """
genes, tuples = _parse("ETV6 - JAK2")
assert ['ETV6', 'JAK2'] == genes
assert tuples == [['ETV6', 'ETV6-JAK2'], ['JAK2', 'ETV6-JAK2']]
def test_parse_simple():
""" make sure we handle fusion format """
genes, tuples = _parse("BRAF V600E")
assert ["BRAF"] == genes
assert tuples == [["BRAF", "V600E"]]
def test_parse_simple_annotated():
""" make sure we 'annotations' on gene """
genes, tuples = _parse("MET del exon14")
assert ["MET"] == genes
assert tuples == [["MET", "del", "exon14"]]
def test_parse_compound_annotated():
""" make sure we 'annotations' on gene and others """
genes, tuples = _parse("MET del exon14 TP53 N30fs*14")
assert ['MET', 'TP53'] == genes
assert tuples == [["MET", "del", "exon14"], ["TP53", "N30fs*14"]]
def test_parse_mixed_annotated_compound():
""" make sure we handle fusion format """
genes, tuples = _parse("CDKN2A mut MET del exon14 PDGFRA mut SMAD4 Q249H")
assert ['CDKN2A', 'MET', 'PDGFRA', 'SMAD4'] == genes
assert tuples == [["CDKN2A", "mut"],
["MET", "del", "exon14"],
["PDGFRA", "mut"],
["SMAD4", "Q249H"]]
def test_parse_terminate_with_fusion():
""" make sure we handle fusion format in last tuple"""
genes, tuples = _parse("FGFR2 E565A FGFR2 K659M FGFR2 N549H FGFR2 N549K FGFR2 V564F FGFR2-ZMYM4") # NOQA
assert ['FGFR2', 'ZMYM4'] == genes
assert tuples == [["FGFR2", "E565A"],
["FGFR2", "K659M"],
["FGFR2", "N549H"],
["FGFR2", "N549K"],
["FGFR2", "V564F"],
['FGFR2', "FGFR2-ZMYM4"],
['ZMYM4', "FGFR2-ZMYM4"],
]
def test_plus_sign():
""" make sure we handle fusion format in last tuple"""
genes, tuples = _parse("KRAS mut + TP53 wild-type") # NOQA
assert ['KRAS', 'TP53'] == genes
assert tuples == [["KRAS", "mut"],
["TP53", "wild-type"]]
def test_odd_number():
""" make sure we handle odd number"""
genes, tuples = _parse("EML4-ALK ALK C1156Y ALK L1198F")
assert ['ALK', 'EML4'] == genes
assert tuples == [["ALK", "C1156Y"],
["ALK", "L1198F"],
["EML4", "EML4-ALK"],
["ALK", "EML4-ALK"],
]
def test_act_mut_fusion():
genes, tuples = _parse("EML4 - ALK SRC act mut")
assert ['ALK', 'EML4', 'SRC'] == genes
assert tuples == [["SRC", "act", "mut"],
["EML4", "EML4-ALK"],
["ALK", "EML4-ALK"],
]
def test_act_amp_fusion():
genes, tuples = _parse("NPM1-ALK amp")
print genes, tuples
assert ['ALK', 'NPM1'] == genes
assert tuples == [["NPM1", "NPM1-ALK amp"],
["ALK", "NPM1-ALK amp"],
]
| python |
# pylint: disable=attribute-defined-outside-init
# Copyright 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""ChromeOS Firmware Utilities
This modules provides easy access to ChromeOS firmware.
To access the contents of a firmware image, use FimwareImage().
To access the flash chipset containing firmware, use Flashrom().
To get the content of (cacheable) firmware, use LoadMainFirmware() or
LoadEcFirmware().
"""
import collections
import logging
import os
import re
import tempfile
from cros.factory.gooftool import common
from cros.factory.utils import fmap
# Names to select target bus.
TARGET_MAIN = 'main'
TARGET_EC = 'ec'
TARGET_PD = 'pd'
CROS_PD_PATH = '/dev/cros_pd'
# Types of named tuples
WpStatus = collections.namedtuple('WpStatus', 'enabled offset size')
# All Chrome OS images are FMAP based.
FirmwareImage = fmap.FirmwareImage
class Flashrom:
"""Wrapper for calling system command flashrom(8)."""
# flashrom(8) command line parameters
_VALID_TARGETS = (TARGET_MAIN, TARGET_EC, TARGET_PD)
_TARGET_MAP = {
TARGET_MAIN: '-p host',
TARGET_EC: '-p ec',
TARGET_PD: '-p ec:type=pd',
}
_WRITE_FLAGS = '--noverify-all'
_READ_FLAGS = ''
def __init__(self, target=None):
self._target = target or TARGET_MAIN
def _InvokeCommand(self, param, ignore_status=False):
command = ' '.join(['flashrom', self._TARGET_MAP[self._target], param])
if self._target == TARGET_PD and not os.path.exists(CROS_PD_PATH):
# crbug.com/p/691901: 'flashrom' does not return PD information reliably
# using programmer "-p ec:type=pd". As a result, we want to only read PD
# information if /dev/cros_pd exists.
logging.debug('%s._InvokeCommand: Ignore command because %s does not '
'exist: [%s]', self.__class__, CROS_PD_PATH, command)
command = 'false'
else:
logging.debug('%s._InvokeCommand: %s', self.__class__, command)
result = common.Shell(command)
if not (ignore_status or result.success):
raise IOError('Failed in command: %s\n%s' % (command, result.stderr))
return result
def GetTarget(self):
"""Gets current target (bus) to access."""
return self._target
def SetTarget(self, target):
"""Sets current target (bus) to access."""
assert target in self._VALID_TARGETS, 'Unknown target: %s' % target
self._target = target
def GetSize(self):
return int(self._InvokeCommand('--flash-size').stdout.splitlines()[-1], 0)
def GetName(self):
"""Returns a key-value dict for chipset info, or None for any failure."""
results = self._InvokeCommand('--flash-name', ignore_status=True).stdout
match_list = re.findall(r'\b(\w+)="([^"]*)"', results)
return dict(match_list) if match_list else None
def Read(self, filename=None, sections=None):
"""Reads whole image from selected flash chipset.
Args:
filename: File name to receive image. None to use temporary file.
sections: List of sections to read. None to read whole image.
Returns:
Image data read from flash chipset.
"""
if filename is None:
with tempfile.NamedTemporaryFile(prefix='fw_%s_' % self._target) as f:
return self.Read(f.name)
sections_param = ['-i %s' % name for name in sections or []]
self._InvokeCommand("-r '%s' %s %s" % (filename, ' '.join(sections_param),
self._READ_FLAGS))
with open(filename, 'rb') as file_handle:
return file_handle.read()
def Write(self, data=None, filename=None, sections=None):
"""Writes image into selected flash chipset.
Args:
data: Image data to write. None to write given file.
filename: File name of image to write if data is None.
sections: List of sections to write. None to write whole image.
"""
assert ((data is None) ^ (filename is None)), (
'Either data or filename should be None.')
if data is not None:
with tempfile.NamedTemporaryFile(prefix='fw_%s_' % self._target) as f:
f.write(data)
f.flush()
self.Write(None, f.name)
return
sections_param = [('-i %s' % name) for name in sections or []]
self._InvokeCommand("-w '%s' %s %s" % (filename, ' '.join(sections_param),
self._WRITE_FLAGS))
def GetWriteProtectionStatus(self):
"""Gets write protection status from selected flash chipset.
Returns: A named tuple with (enabled, offset, size).
"""
# flashrom(8) output: WP: status: 0x80
# WP: status.srp0: 1
# WP: write protect is %s. (disabled/enabled)
# WP: write protect range: start=0x%8x, len=0x%08x
results = self._InvokeCommand('--wp-status').stdout
status = re.findall(r'WP: write protect is (\w+)\.', results)
if len(status) != 1:
raise IOError('Failed getting write protection status')
status = status[0]
if status not in ('enabled', 'disabled'):
raise ValueError('Unknown write protection status: %s' % status)
wp_range = re.findall(r'WP: write protect range: start=(\w+), len=(\w+)',
results)
if len(wp_range) != 1:
raise IOError('Failed getting write protection range')
wp_range = wp_range[0]
return WpStatus(status == 'enabled',
int(wp_range[0], 0),
int(wp_range[1], 0))
def EnableWriteProtection(self, offset, size):
"""Enables write protection by specified range."""
self._InvokeCommand('--wp-range 0x%06X,0x%06X --wp-enable' % (offset, size))
result = self.GetWriteProtectionStatus()
if ((not result.enabled) or (result.offset != offset) or
(result.size != size)):
raise IOError('Failed to enabled write protection.')
# Try to verify write protection by attempting to disable it.
self._InvokeCommand('--wp-disable --wp-range 0,0', ignore_status=True)
# Verify the results
result = self.GetWriteProtectionStatus()
if ((not result.enabled) or (result.offset != offset) or
(result.size != size)):
raise IOError('Software write protection can be disabled. Please make '
'sure hardware write protection is enabled.')
def DisableWriteProtection(self):
"""Tries to Disable whole write protection range and status."""
self._InvokeCommand('--wp-disable --wp-range 0,0')
result = self.GetWriteProtectionStatus()
if result.enabled or (result.offset != 0) or (result.size != 0):
raise IOError('Failed to disable write protection.')
class FirmwareContent:
"""Wrapper around flashrom for a specific firmware target.
This class keeps track of all the instances of itself that exist.
The goal being that only one instance ever gets created for each
target. This mapping of targets to instances is tracked by the
_target_cache class data member.
"""
# Cache of target:instance pairs.
_target_cache = {}
@classmethod
def Load(cls, target):
"""Create class instance for target, using cached copy if available."""
if target in cls._target_cache:
return cls._target_cache[target]
obj = cls()
obj.target = target
obj.flashrom = Flashrom(target)
obj.cached_files = []
cls._target_cache[target] = obj
return obj
def GetChipId(self):
"""Caching get of flashrom chip identifier. None if no chip is present."""
if not hasattr(self, 'chip_id'):
info = self.flashrom.GetName()
self.chip_id = ' '.join([info['vendor'], info['name']]) if info else None
return self.chip_id
def GetFileName(self, sections=None):
"""Filename containing firmware data. None if no chip is present.
Args:
sections: Restrict the sections of firmware data to be stored in the file.
Returns:
Name of the file which contains the firmware data.
"""
if self.GetChipId() is None:
return None
sections = set(sections) if sections else None
for (fileref, sections_in_file) in self.cached_files:
if sections_in_file is None or (
sections is not None and sections.issubset(sections_in_file)):
return fileref.name
fileref = tempfile.NamedTemporaryFile(prefix='fw_%s_' % self.target)
self.flashrom.Read(filename=fileref.name, sections=sections)
self.cached_files.append((fileref, sections))
return fileref.name
def Write(self, filename):
"""Call flashrom write for specific sections."""
for (fileref, sections_in_file) in self.cached_files:
if fileref.name == filename:
self.flashrom.Write(filename=filename, sections=sections_in_file)
return
raise ValueError('%r is not found in the cached files' % (filename,))
def GetFirmwareImage(self, sections=None):
"""Returns a fmap.FirmwareImage instance.
Args:
sections: Restrict the sections of firmware data to be stored in the file.
Returns:
An instance of FormwareImage.
"""
with open(self.GetFileName(sections=sections), 'rb') as image:
return fmap.FirmwareImage(image.read())
def LoadEcFirmware():
"""Returns flashrom data from Embedded Controller chipset."""
return FirmwareContent.Load(TARGET_EC)
def LoadPDFirmware():
"""Returns flashrom data from Power Delivery chipset."""
return FirmwareContent.Load(TARGET_PD)
def LoadMainFirmware():
"""Returns flashrom data from main firmware (also known as BIOS)."""
return FirmwareContent.Load(TARGET_MAIN)
| python |
#
# This file is part of GreatFET
#
from __future__ import absolute_import
import usb
import time
import codecs
from .base import GlitchKitModule
from ..protocol import vendor_requests
# Quirk constant that helps us identify libusb's pipe errors, which bubble
# up as generic USBErrors with errno 60 on affected platforms.
LIBUSB_TIMEOUT = 60
LIBUSB_IO_ERROR = 5
class GlitchKitUSB(GlitchKitModule):
"""
"""
SHORT_NAME = 'usb'
HOST_TRANSFER_QUEUED = 0x002
HOST_SETUP_TRANSFER_QUEUED = 0x004
HOST_IN_TRANSFER_QUEUED = 0x008
HOST_OUT_TRANSFER_QUEUED = 0x010
HOST_TRANSFER_COMPLETE = 0x020
HOST_SETUP_TRANSFER_COMPLETE = 0x040
HOST_IN_TRANSFER_COMPLETE = 0x100
HOST_OUT_TRANSFER_COMPLETE = 0x080
DEVICE_TRANSFER_COMPLETE = 0x200
VBUS_ENABLED = 0x400
READ_INCOMPLETE = 0xFFFFFFFF
PRE_RESPONSE_DELAY = 0.01
# TODO: Figure out what should be in here vs in FaceDancer.
GET_DESCRIPTOR = 0x6
GET_DEVICE_DESCRIPTOR = 1 << 8
def __init__(self, board):
"""
Create a new GlitchKit module allowing inducing or waiting for USB
events, and then glitching.
Args:
board -- A representation of the GreatFET that will perform the actual
triggering.
"""
# Store a reference to the parent board.
self.board = board
self.api = board.apis.glitchkit_usb
@staticmethod
def supports_board(board):
""" Determines if this GreatFET supports GlitchKit via USB. """
return board.supports_api("glitchkit_usb")
def configure_future_requests(self, continue_despite_errors, disable_vbus_afterwards):
""" Configure future requests made by this GlitchKit module.
Arguments:
continue_despite_errors -- True iff stimuli should continue even
if errors occur.
disable_vbus_afterwards -- If set, VBUS will be disconnected after
a given USB request.
"""
self.api.configure_requests(continue_despite_errors, disable_vbus_afterwards)
@staticmethod
def _split(value):
# TODO: get rid of this
value_high = value >> 8
value_low = value & 0xFF
return [value_low, value_high]
@staticmethod
def build_request_type(is_in, type, recipient):
# TODO: FIXME: clean up consts
request_type = 0
if is_in:
request_type |= (1 << 7)
request_type |= (type << 5)
request_type |= (recipient)
return request_type
def build_setup_request(self, is_in=True, request_type=0, recipient=0, request=0, value=0, index=0, length=0):
# uint8_t request_type;
# uint8_t request;
# uint16_t value;
# uint16_t index;
# uint16_t length;
# TODO: replace me with a call to struct.pack?
setup_request = [self.build_request_type(is_in, request_type, recipient), request]
setup_request.extend(self._split(value))
setup_request.extend(self._split(index))
setup_request.extend(self._split(length))
return setup_request
def capture_control_in(self, request_type=0, recipient=0, request=0, value=0, index=0, length=0, timeout=30, ui_event_call=False):
# Build a setup packet...
setup_packet = bytes(self.build_setup_request(True, request_type, recipient, request, value, index, length))
# ... and issue the request.
return self.api.control_in(setup_packet, timeout=timeout * 1024)
| python |
"""配置文件"""
# 使用线程数量
THREAD_NUM = 4
# MySQL 配置
MYSQL_CONFIG = {
'host': 'localhost',
'port': 3306,
'user': 'root',
'pwd': 'password',
'db': 'glasses',
}
# 保存数据的表
MYSQL_TABLE_SAVE_EBD = 'ebd'
| python |
# The MIT License (MIT)
# Copyright (c) 2021 Tom J. Sun
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from urtypes import RegistryItem
from urtypes.cbor import DataItem
from .hd_key import HDKey, CRYPTO_HDKEY
from .ec_key import ECKey, CRYPTO_ECKEY
class MultiKey(RegistryItem):
def __init__(self, threshold, ec_keys, hd_keys):
super().__init__()
self.threshold = threshold
self.ec_keys = ec_keys
self.hd_keys = hd_keys
def __eq__(self, o):
return self.threshold == o.threshold and self.ec_keys == o.ec_keys and self.hd_keys == o.hd_keys
@classmethod
def registry_type(cls):
return None
def to_data_item(self):
map = {}
map[1] = self.threshold
combined_keys = self.ec_keys[:] + self.hd_keys[:]
keys = []
for key in combined_keys:
keys.append(DataItem(key.registry_type().tag, key.to_data_item()))
map[2] = keys
return map
@classmethod
def from_data_item(cls, item):
map = item.map
threshold = map[1]
keys = map[2]
ec_keys = []
hd_keys = []
for key in keys:
if key.tag == CRYPTO_HDKEY.tag:
hd_keys.append(HDKey.from_data_item(key))
elif key.tag == CRYPTO_ECKEY.tag:
ec_keys.append(ECKey.from_data_item(key))
return cls(threshold, ec_keys, hd_keys) | python |
import irc3, json
from alveos.asgi import channel_layer
from django.contrib.sessions.models import Session
from channels import Channel
@irc3.plugin
class Plugin(object):
"""Alveos IRC3 class"""
requires = [
'irc3.plugins.core',
'irc3.plugins.userlist',
'irc3.plugins.command',
'irc3.plugins.human',
]
def __init__(self, bot):
self.bot = bot
self.log = self.bot.log
def server_ready(self, **kwargs):
"""triggered after the server sent the MOTD (require core plugin)"""
print("inside server_ready")
print(kwargs)
self.bot.sysmsg_to_browser('Done connecting to IRC server!')
self.bot.loop.call_later(1, self.bot.get_messages)
def connection_lost(self, **kwargs):
"""triggered when connection is lost"""
print("inside connection_lost")
print(kwargs)
self.bot.sysmsg_to_browser('Lost connection to IRC server!')
def connection_made(self, **kwargs):
"""triggered when connection is up"""
print("inside connection_made")
print(kwargs)
self.bot.sysmsg_to_browser('Connection to IRC server established...')
@irc3.event(irc3.rfc.JOIN_PART_QUIT)
def on_join_part_quit(self, **kwargs):
print("inside on_join_part_quit()")
print(kwargs)
self.bot.ircmsg_to_browser(kwargs)
@irc3.event(irc3.rfc.PRIVMSG)
def on_privmsg(self, **kwargs):
print("inside on_privmsg")
print(kwargs)
self.bot.ircmsg_to_browser(kwargs)
@irc3.extend
def get_messages(self):
channel, message = channel_layer.receive_many(['to-ircbot-%s' % self.bot.config.django_session_key])
if message and channel:
print("got message from channel: %s" % message['text'])
if message['text']['type'] == 'irc-message':
self.bot.privmsg(message['text']['target'], message['text']['message'])
elif message['text']['type'] == 'command':
if message['text']['command'] == 'die':
self.bot.quit(reason=message['text']['reason'])
else:
print("unsupported command received: %s" % message['text']['command'])
else:
print("message with unsupported type '%s' received, not processing" % message['text']['type'])
# call this function again in 1 second
self.bot.loop.call_later(1, self.bot.get_messages)
@irc3.extend
def sysmsg_to_browser(self, message):
self.bot.send_to_browser({"alveos_version": "alveos-v1", "type": 'system_message', 'payload': {'message': message}})
@irc3.extend
def ircmsg_to_browser(self, message):
self.bot.send_to_browser({"alveos_version": "alveos-v1", 'type': 'irc_message', 'payload': message})
@irc3.extend
def send_to_browser(self, payload):
print("send to channel %s: %s" % (self.bot.config.reply_channel, payload))
Channel(self.bot.config.reply_channel).send({'text': json.dumps(payload)})
@irc3.extend
def get_django_session(self):
# get django session using session_key from commandline
try:
return Session.objects.get(session_key=self.bot.config.django_session_key)
except Session.DoesNotExist:
print("Session with session_key %s not found" % self.bot.config.django_session_key)
return False
| python |
#Crie um programa que leia duas notas de um aluno calcule sua média, mostrando no final, de acordo
#com sua média atingida:
#- Média abaixo de 5.0 REPROVADO
#- Média entre 5.0 e 6.9 RECUPERAÇÃO
#- Média 7.0 ou superior APROVADO
n1 = float(input('Digite sua primeira nota!'))
n2 = float(input('Digite sua segunda nota!'))
media = (n1+n2)/2
if media <= 5:
print('\033[31mVocê está REPROVADO com média {}!!!\033[31m'.format(media))
elif 7 > media >= 5:
print('\033[33mVocê está em recuperação gafanhoto com média {:.1f}!!!\033[33m'.format(media))
elif media >= 7:
print('\033[34mParabéns gafanhoto você esta aprovado com média {}!!!\033[34m'.format(media))
| python |
#!C:\Users\willi\AppData\Local\Programs\Python\Python38-32\python.exe
#!/usr/bin/python
import numpy as np # Unused
import pandas as pd # For dataframe
import matplotlib.pyplot as plt # For ploting graph (unused)
import psycopg2 # For database control
import time # For time record
from statsmodels.tsa.statespace.sarimax import SARIMAX # SARIMAX
from statsmodels.tsa.holtwinters import ExponentialSmoothing as HWES # HWES
import psutil # To monitor CPU usage
import threading # To monitor CPU usage
import warnings
warnings.filterwarnings("ignore")
initialRam = float(psutil.virtual_memory().percent)
initialCpu = float(psutil.Process().cpu_percent(interval=1))
SarimaxCpuUsage = []
SarimaxRamUsage = []
HwesCpuUsage = []
HwesRamUsage = []
#=============================================#
# Function Def #
#=============================================#
def display_cpu(lst,lst2):
global running
running = True
currentProcess = psutil.Process()
# start loop
while running:
lst.append(float(currentProcess.cpu_percent(interval=1)))
lst2.append(float(psutil.virtual_memory().percent))
def start(lst,lst2):
global t
# create thread and start it
t = threading.Thread(target=display_cpu, args=(lst,lst2,))
t.start()
def stop():
global running
global t
# use `running` to stop loop in thread so thread will end
running = False
# wait for thread's end
t.join()
def Average(lst):
if len(lst) == 0:
return 0
else:
return round(sum(lst) / len(lst),2)
#=============================================#
# Database Conn #
#=============================================#
conn = psycopg2.connect(database = "machine_learning", user = "postgres", password = "postgres", host = "localhost", port = "5432")
cur = conn.cursor()
cur.execute("delete from arima")
cur.execute("delete from hwes")
cur.execute("delete from analysis where analysis = 'time';");
cur.execute("delete from analysis where analysis = 'cpuUsage';");
cur.execute("delete from analysis where analysis = 'cpuMax';");
cur.execute("delete from analysis where analysis = 'ram';");
cur.execute("delete from analysis where analysis = 'error';");
cur.execute("SELECT * from dummy")
rows = cur.fetchall()
dataToPredict = pd.DataFrame(rows,columns = ['Month','Passengers'])
dataToPredict.set_index('Month',inplace = True)
dataToPredict.index = pd.to_datetime(dataToPredict.index)
cur.execute("SELECT * from datarange")
dataRange = int(cur.fetchall()[0][0])
years = 2 #in years
period = years * 12
#cur.execute("update flag set progress = 'Arima Done', id = 2 where id = 1;")
#conn.commit()
#=============================================#
# Arima Algorithm #
#=============================================#
# Split data into train sets
if (dataRange == 100):
train = dataToPredict.iloc[:len(dataToPredict)]
else:
print(len(dataToPredict))
trainLength = int(len(dataToPredict)*dataRange/100)
print(trainLength)
train = dataToPredict.iloc[:trainLength]
period = int(len(dataToPredict)*(100-dataRange)/100)
print(period)
#=======#
# Arima #
#=======#
start(SarimaxCpuUsage,SarimaxRamUsage)
startTime = time.time()
modelSarimax = SARIMAX(train['Passengers'],
order = (0, 1, 1),
seasonal_order =(2, 1, 1, 12))
resultSarimax = modelSarimax.fit()
forecastSarimax = resultSarimax.predict(start = len(train),
end = (len(train)-1) + period + 2,
typ = 'levels').rename('Forecast')
endTime = time.time()
arimaTime = endTime - startTime
stop()
#=======#
# HWES #
#=======#
start(HwesCpuUsage,HwesRamUsage)
startTime = time.time()
modelHwes = HWES(train, seasonal_periods=(period + 2), trend='add', seasonal='mul')
fittedHwes = modelHwes.fit(optimized=True, use_brute=True)
forecastHwes = fittedHwes.forecast(period + 2)
endTime = time.time()
hwesTime = endTime - startTime
stop()
#=============================================#
# Data Pushing #
#=============================================#
ArimaDate = []
ArimaValue = []
for i in forecastSarimax.values:
ArimaValue.append(i)
for i in forecastSarimax.index:
ArimaDate.append(str(i)[:10])
for i in range(0,len(ArimaDate)-1):
cur.execute("insert into arima (month,value) values (\'"+str(ArimaDate[i])+"\',"+str(round(ArimaValue[i]))+");");
HwesDate = []
HwesValue = []
for i in forecastHwes.values:
HwesValue.append(i)
for i in forecastHwes.index:
HwesDate.append(str(i)[:10])
for i in range(0,len(HwesDate)-1):
cur.execute("insert into hwes (month,value) values (\'"+str(HwesDate[i])+"\',"+str(round(HwesValue[i]))+");");
# Case if user choose to not input real data. (Accuracy based on training
if (dataRange != 100):
cur.execute("delete from accuracy;");
dataReal = dataToPredict.iloc[trainLength:]
dataReal_date = []
dataReal_value = []
for i in dataReal.values:
dataReal_value.append(int(i))
for i in dataReal.index:
dataReal_date.append(str(i)[:10])
arimaErrors = [abs(dataReal_value[i]-ArimaValue[i])/dataReal_value[i] for i in range(len(dataReal_value))]
arimaErrorsBias = sum(arimaErrors) * 1.0/len(dataReal_value) * 100
cur.execute("insert into analysis (algo,analysis,value) values (\'SARIMAX\', \'error\',"+str(arimaErrorsBias)+");");
hwesErrors = [abs(dataReal_value[i]-HwesValue[i])/dataReal_value[i] for i in range(len(dataReal_value))]
hwesErrorsBias = sum(hwesErrors) * 1.0/len(dataReal_value) * 100
cur.execute("insert into analysis (algo,analysis,value) values (\'HWES\', \'error\',"+str(hwesErrorsBias)+");");
for i in range(0,len(dataReal_date)-1):
accuracySarimax = (dataReal_value[i]-abs(ArimaValue[i]-dataReal_value[i]))/dataReal_value[i]*100
accuracyHwes = (dataReal_value[i]-abs(HwesValue[i]-dataReal_value[i]))/dataReal_value[i]*100
cur.execute("insert into accuracy (month,value,algo) values (\'"+str(dataReal_date[i])+"\',"+str(round(accuracySarimax,2))+","+"\'Sarimax\'"+");");
cur.execute("insert into accuracy (month,value,algo) values (\'"+str(dataReal_date[i])+"\',"+str(round(accuracyHwes,2))+","+"\'Hwes\'"+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'SARIMAX\', \'time\',"+str(arimaTime)+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'HWES\', \'time\',"+str(hwesTime)+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'SARIMAX\', \'cpuUsage\',"+str(Average(SarimaxCpuUsage))+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'HWES\', \'cpuUsage\',"+str(Average(HwesCpuUsage))+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'SARIMAX\', \'cpuMax\',"+ str(max(SarimaxCpuUsage))+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'HWES\', \'cpuMax\',"+ str(max(HwesCpuUsage))+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'SARIMAX\', \'ram\',"+str(Average(SarimaxRamUsage))+");");
cur.execute("insert into analysis (algo,analysis,value) values (\'HWES\', \'ram\',"+str(Average(HwesRamUsage))+");");
conn.commit()
| python |
from util.lambda_constants import MIN_COST, MIN_MEMORY_SIZE, STATIC_INVOCATION_COST
def compute_cost(memory_size, billed_duration):
return MIN_COST * (memory_size / MIN_MEMORY_SIZE) * billed_duration + STATIC_INVOCATION_COST
class ExecutionLog:
"""
Class representing the execution log of a AWS Lambda function
"""
def __init__(self, duration, billed_duration, memory_size, init_duration=0):
self.duration = duration
self.billed_duration = billed_duration
self.memory_size = memory_size
self.init_duration = init_duration
self.cost = compute_cost(memory_size, billed_duration)
def to_string(self):
return f"MemorySize: {self.memory_size} MB, Duration: {self.duration}, Billed Duration: {self.billed_duration}, Init Duration: {self.init_duration}, Cost: {'{0:.12f}'.format(self.cost)}"
| python |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import event_store_pb2 as event__store__pb2
class EventStoreStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.publish = channel.unary_unary(
'/eventstore.EventStore/publish',
request_serializer=event__store__pb2.PublishRequest.SerializeToString,
response_deserializer=event__store__pb2.PublishResponse.FromString,
)
self.subscribe = channel.unary_stream(
'/eventstore.EventStore/subscribe',
request_serializer=event__store__pb2.SubscribeRequest.SerializeToString,
response_deserializer=event__store__pb2.Notification.FromString,
)
self.unsubscribe = channel.unary_unary(
'/eventstore.EventStore/unsubscribe',
request_serializer=event__store__pb2.UnsubscribeRequest.SerializeToString,
response_deserializer=event__store__pb2.UnsubscribeResponse.FromString,
)
self.get = channel.unary_unary(
'/eventstore.EventStore/get',
request_serializer=event__store__pb2.GetRequest.SerializeToString,
response_deserializer=event__store__pb2.GetResponse.FromString,
)
class EventStoreServicer(object):
# missing associated documentation comment in .proto file
pass
def publish(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def subscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def unsubscribe(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_EventStoreServicer_to_server(servicer, server):
rpc_method_handlers = {
'publish': grpc.unary_unary_rpc_method_handler(
servicer.publish,
request_deserializer=event__store__pb2.PublishRequest.FromString,
response_serializer=event__store__pb2.PublishResponse.SerializeToString,
),
'subscribe': grpc.unary_stream_rpc_method_handler(
servicer.subscribe,
request_deserializer=event__store__pb2.SubscribeRequest.FromString,
response_serializer=event__store__pb2.Notification.SerializeToString,
),
'unsubscribe': grpc.unary_unary_rpc_method_handler(
servicer.unsubscribe,
request_deserializer=event__store__pb2.UnsubscribeRequest.FromString,
response_serializer=event__store__pb2.UnsubscribeResponse.SerializeToString,
),
'get': grpc.unary_unary_rpc_method_handler(
servicer.get,
request_deserializer=event__store__pb2.GetRequest.FromString,
response_serializer=event__store__pb2.GetResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'eventstore.EventStore', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| python |
from django import forms
from service_catalog.models import SupportMessage
from Squest.utils.squest_model_form import SquestModelForm
class SupportMessageForm(SquestModelForm):
class Meta:
model = SupportMessage
fields = ["content"]
content = forms.CharField(label="Add a comment",
required=False,
help_text="Markdown supported",
widget=forms.Textarea())
def __init__(self, *args, **kwargs):
self.sender = kwargs.pop('sender')
self.support = kwargs.pop('support')
super(SupportMessageForm, self).__init__(*args, **kwargs)
def save(self, commit=True):
message = super(SupportMessageForm, self).save(commit=False)
message.support = self.support
message.sender = self.sender
return message.save()
| python |
import os
import atexit
from typing import Union
from tempfile import TemporaryDirectory
from httpwatcher import HttpWatcherServer
from tornado.ioloop import IOLoop
from pavo.cli import handle_message
from ._build import Builder
def main() -> None:
"""Starts a local server that shows you your website in development.
"""
with TemporaryDirectory() as tmp_dir:
server = DevelopmentServer(tmp_dir)
handle_message('info', 'Starting local development server. Awaiting build.', header=True)
server.run()
class DevelopmentServer:
"""Containing class for the development server used in Pavo projects.
Args:
build_directory (str): The directory to temporarily keep the build in.
Attributes:
builder (Builder): The builder that is used to build the website that will be served to the user.
project_directory (str): The project directory to monitor for changes.
directory (str): The location of the temporary directory of the builder, used to serve files from.
paths_to_watch (list): The paths to watch for any changes in files.
server_settings (dict): Configuration settings that run the httpwatcher server.
server (HttpWatcherServer): The actual server that does the heavy work, serving content to the user.
"""
def __init__(self, build_directory: str) -> None:
self.builder: Builder = Builder(build_directory)
self.project_directory: str = os.getcwd()
self.directory: str = self.builder.tmp_dir
self.paths_to_watch: list[str] = [
f'{self.project_directory}/_data/',
f'{self.project_directory}/_pages/',
f'{self.project_directory}/_posts/',
f'{self.project_directory}/_static/templates',
f'{self.project_directory}/_static/styles'
]
self.server_settings: dict[str, Union[str, int]] = {
'ip': '127.0.0.1',
'port': 5556
}
atexit.register(handle_message, 'success', 'Shut down development server.')
self.server: HttpWatcherServer = HttpWatcherServer(
self.directory,
watch_paths=self.paths_to_watch,
on_reload=self._build_temporary_directory,
host=self.server_settings['ip'],
port=self.server_settings['port'],
watcher_interval=1.0,
recursive=True,
open_browser=True
)
def run(self) -> None:
"""Starts a development server and initiates the first build."""
self.builder.build(False)
self.server.listen()
handle_message('success',
f'Local development server opened in browser on {self.server.host}:{self.server.port}.')
try:
IOLoop.current().start()
except KeyboardInterrupt:
handle_message('debug', '', disable_logging=True)
handle_message('warn', 'Detected request to stop server. Please wait.')
self.server.shutdown()
def _build_temporary_directory(self) -> None:
"""Triggers a build to the temporary directory on detection of changes to the project."""
handle_message('info', 'Detected changes, rebuilding project.', header=True)
self.builder.build(False)
| python |
#!/usr/bin/env python3
import matplotlib
matplotlib.use('agg')
import os
import sys
import yt
import matplotlib.pyplot as plt
import numpy as np
from functools import reduce
from mpl_toolkits.axes_grid1 import ImageGrid
# assume that our data is in CGS
from yt.units import cm, amu
from yt.frontends.boxlib.api import CastroDataset
def make_plot(plotfile, fields, prefix="plot"):
ds = CastroDataset(plotfile)
xmin = ds.domain_left_edge[0]
xmax = ds.domain_right_edge[0]
ymin = ds.domain_left_edge[1]
ymax = ds.domain_right_edge[1]
xctr = 0.0 * xmin
L_x = xmax - xmin
yctr = 0.5 * (ymin + ymax)
L_y = ymax - ymin
fig = plt.figure()
fig.set_size_inches(12.0, 9.0)
width_frac = 0.1
grid = ImageGrid(fig, 111, nrows_ncols=(1, len(fields)),
axes_pad=1.0, label_mode="L", cbar_mode="each", cbar_pad=0)
for i, f in enumerate(fields):
sp = yt.SlicePlot(ds, "theta", f,
center=[xmin + 0.5*width_frac*L_x, yctr, 0.0*cm],
width=[width_frac*L_x, width_frac*L_y, 0.0*cm], fontsize="12")
sp.set_buff_size((2400,2400))
if f == "Ye":
sp.set_zlim(f, 0.46, 0.5)
sp.set_log(f, False)
sp.set_cmap(f, "magma_r")
elif f == "abar":
sp.set_log(f, False)
sp.set_cmap(f, "viridis")
elif f == "enuc":
sp.set_log(f, True, linthresh=1.e12)
sp.set_zlim(f, -1.e20, 1.e20)
sp.set_cmap(f, "bwr")
elif f == "MachNumber":
sp.set_zlim(f, 1.e-4, 0.3)
sp.set_cmap(f, "plasma")
elif f == "magvel":
sp.set_zlim(f, 100.0, 2.e7)
sp.set_cmap(f, "viridis")
elif f == "magvort":
sp.set_cmap(f, "magma")
sp.set_zlim(f, 1.e-2, 5)
if f == "enuc":
# now do a contour of density
sp.annotate_contour("in_nse", ncont=1, clim=(0.5, 0.5), take_log=False,
plot_args={"colors": "k", "linewidths": 2})
sp.set_axes_unit("cm")
plot = sp.plots[f]
plot.figure = fig
plot.axes = grid[i].axes
plot.cax = grid.cbar_axes[i]
sp._setup_plots()
sp.plots[f].axes.xaxis.set_major_locator(plt.MaxNLocator(4))
sp.plots[f].axes.ticklabel_format(axis="both", style="scientific", scilimits=(0,0))
fig.text(0.02, 0.02, "time = {:8.5f} s".format(float(ds.current_time)), transform=fig.transFigure)
fig.set_size_inches(19.2, 10.8)
fig.tight_layout()
fig.savefig(f"{prefix}_{os.path.basename(plotfile)}_slice.png")
if __name__ == "__main__":
plotfile = sys.argv[1]
fields = ["Ye", "abar", "enuc"]
make_plot(plotfile, fields, prefix="comp")
fields = ["MachNumber", "magvel", "magvort"]
make_plot(plotfile, fields, prefix="vel")
| python |
#!/usr/bin/python3
# This is not really an example but rather some code to test
# the behaviour of the pruss interconnect with regard to
# concurrent requests to the same local memory.
import sys
sys.path.insert( 0, '../src' )
from ti.icss import Icss
import ctypes
pruss = Icss( "/dev/uio/pruss/module" )
pruss.initialize()
( core0, core1 ) = pruss.cores
# setup trigger to start cores simultaneously
EVENT = 16
IRQ = 0
intc = pruss.intc
intc.ev_ch[ EVENT ] = IRQ
intc.ev_clear_one( EVENT )
intc.ev_enable_one( EVENT )
intc.out_enable_one( IRQ )
def start():
intc.ev_set_one( EVENT )
for core in pruss.cores:
core.load( 'fw/memspam.bin' )
core.wake_en = 1 << ( 30 + IRQ )
del core
iterations = 1000
def prepare( core, pc, addr, length ):
assert addr in range( 2**32 )
assert length in range( 1, 117 )
core.halt()
core.r0 = length | iterations << 16
core.r1 = addr
core.run( pc=pc, profiling=True )
prepare( core0, 1, 0x00000, 2 * 4 )
prepare( core1, 1, 0x02000, 2 * 4 )
m = core0.dram.map( ctypes.c_uint32 )
import time
def latency():
t0 = time.perf_counter()
m.value
t1 = time.perf_counter()
return t1 - t0
t0 = latency()
t0 = latency()
start()
t1 = latency()
print( "latency while idle: %.1f us" % ( t0 * 1e6 ) )
print( "latency while ram kept busy: %.1f us" % ( t1 * 1e6 ) )
print( "latency increase: %d pru cycles" % round( ( t1 - t0 ) * 200e6 ) )
while not ( core0.halted and core1.halted ):
pass
for core in pruss.cores:
( cycles, instrs ) = core.profiling_sample()
instrs -= 3 # jmp, slp 1, loop
cycles -= 4 # slp 1 is counted as two cycles
if instrs <= 0:
continue
if instrs % iterations:
sys.exit( "%d cycles, %d instrs, %s" % ( cycles, instrs, core.state ) )
ii = instrs // iterations
cc = round( cycles / iterations )
cycles -= cc * iterations
ss = cc - ii
msg = "%d cycles = %d instructions + %d stalls per iteration" % ( cc, ii, ss )
if cycles:
msg += " %+d stalls" % cycles
print( msg )
| python |
#!/usr/bin/env python
import os
from django.contrib.auth.management.commands import createsuperuser
from django.core.management import CommandError
class Command(createsuperuser.Command):
help = 'Create a superuser'
def handle(self, *args, **options):
password = os.getenv('db_pass')
username = os.getenv('db_user')
database = 'django_db'
if createsuperuser.get_user_model().objects.filter(username=os.getenv('db_user')):
print('Superuser already exists. SKIPPING...')
else:
super(Command, self).handle(*args, **options)
print('Creating superuser for this app...')
user = self.UserModel._default_manager.db_manager(database).get(username=username)
user.set_password(password)
user.save()
print('Superuser created!')
| python |
class Solution:
def XXX(self, l1: ListNode, l2: ListNode) -> ListNode:
out = l1
carry = 0
lv = l2.val
while True:
val = l1.val + lv
l1.val = val % 10
lv = val > 9 # carry
if l1.next:
if l2.next:
l2 = l2.next
lv += l2.val
else:
if l2.next:
l1.next = l2.next
l2.next = None
else:
l1.next = ListNode(1) if lv else None
break
l1 = l1.next
return out
| python |
# -*- coding: utf-8 -*-
"""
The ProductInfo elements.
"""
from typing import Union
import numpy
# noinspection PyProtectedMember
from ..sicd_elements.base import Serializable, DEFAULT_STRICT, _StringDescriptor, \
_DateTimeDescriptor, _ParametersDescriptor, ParametersCollection, \
_SerializableListDescriptor
__classification__ = "UNCLASSIFIED"
__author__ = "Thomas McCullough"
class CreationInfoType(Serializable):
"""
Parameters that provide general information about the CPHD product generation.
"""
_fields = ('Application', 'DateTime', 'Site', 'Parameters')
_required = ('DateTime', )
_collections_tags = {'Parameters': {'array': False, 'child_tag': 'Parameter'}}
# descriptors
Application = _StringDescriptor(
'Application', _required, strict=DEFAULT_STRICT,
docstring='Name and version of the application used to create the CPHD.') # type: str
DateTime = _DateTimeDescriptor(
'DateTime', _required, strict=DEFAULT_STRICT, numpy_datetime_units='us',
docstring='Date and time the image creation application processed the image (UTC).') # type: numpy.datetime64
Site = _StringDescriptor(
'Site', _required, strict=DEFAULT_STRICT,
docstring='The creation site of this CPHD product.') # type: str
Parameters = _ParametersDescriptor(
'Parameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional parameters.') # type: Union[None, ParametersCollection]
def __init__(self, Application=None, DateTime=None, Site=None, Parameters=None, **kwargs):
"""
Parameters
----------
Application : str
DateTime : numpy.datetime64|datetime|date|str
Site : str
Profile : str
Parameters : None|ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Application = Application
self.DateTime = DateTime
self.Site = Site
self.Parameters = Parameters
super(CreationInfoType, self).__init__(**kwargs)
class ProductInfoType(Serializable):
"""
Parameters that provide general information about the CPHD product and/or the
derived products that may be created from it.
"""
_fields = ('Profile', 'CreationInfos', 'Parameters')
_required = ()
_collections_tags = {
'CreationInfos': {'array': False, 'child_tag': 'CreationInfo'},
'Parameters': {'array': False, 'child_tag': 'Parameter'}}
# descriptors
Profile = _StringDescriptor(
'Profile', _required, strict=DEFAULT_STRICT,
docstring='Identifies what profile was used to create this CPHD product.') # type: str
CreationInfos = _SerializableListDescriptor(
'CreationInfos', CreationInfoType, _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Parameters that provide general information about the CPHD '
'product generation.') # type: Union[None, List[CreationInfoType]]
Parameters = _ParametersDescriptor(
'Parameters', _collections_tags, _required, strict=DEFAULT_STRICT,
docstring='Additional parameters.') # type: Union[None, ParametersCollection]
def __init__(self, Profile=None, CreationInfos=None, Parameters=None, **kwargs):
"""
Parameters
----------
Profile : str
CreationInfos : None|List[CreationInfoType]
Parameters : None|ParametersCollection|dict
kwargs
"""
if '_xml_ns' in kwargs:
self._xml_ns = kwargs['_xml_ns']
if '_xml_ns_key' in kwargs:
self._xml_ns_key = kwargs['_xml_ns_key']
self.Profile = Profile
self.CreationInfos = CreationInfos
self.Parameters = Parameters
super(ProductInfoType, self).__init__(**kwargs)
| python |
# -*- coding: utf-8 -*-
from __future__ import division
"""
Trains a ResNeXt Model on Cifar10 and Cifar 100. Implementation as defined in:
Xie, S., Girshick, R., Dollár, P., Tu, Z., & He, K. (2016).
Aggregated residual transformations for deep neural networks.
arXiv preprint arXiv:1611.05431.
"""
__author__ = "Pau Rodríguez López, ISELAB, CVC-UAB"
__email__ = "[email protected]"
import argparse
import os
import json
import torch
import torch.nn.functional as F
import torchvision.datasets as dset
import torchvision.transforms as transforms
from isonet.models.isonext import CifarISONext
from isonet.utils.config import C
import datetime
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Trains ResNeXt on CIFAR',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# Positional arguments
parser.add_argument('data_path', type=str,
help='Root for the Cifar dataset.')
parser.add_argument('dataset', type=str, choices=[
'cifar10', 'cifar100'], help='Choose between Cifar10/100.')
# Optimization options
parser.add_argument('--epochs', '-e', type=int,
default=300, help='Number of epochs to train.')
parser.add_argument('--batch_size', '-b', type=int,
default=128, help='Batch size.')
parser.add_argument('--learning_rate', '-lr', type=float,
default=0.1, help='The Learning Rate.')
parser.add_argument('--momentum', '-m', type=float,
default=0.9, help='Momentum.')
parser.add_argument('--decay', '-d', type=float,
default=0.0005, help='Weight decay (L2 penalty).')
parser.add_argument('--test_bs', type=int, default=10)
parser.add_argument('--schedule', type=int, nargs='+', default=[150, 225],
help='Decrease learning rate at these epochs.')
parser.add_argument('--gamma', type=float, default=0.1,
help='LR is multiplied by gamma on schedule.')
# Checkpoints
parser.add_argument('--save', '-s', type=str, default='./',
help='Folder to save checkpoints.')
parser.add_argument('--load', '-l', type=str,
help='Checkpoint path to resume / test.')
parser.add_argument('--test', '-t', action='store_true',
help='Test only flag.')
# Architecture
parser.add_argument('--depth', type=int, default=29, help='Model depth.')
parser.add_argument('--cardinality', type=int, default=8,
help='Model cardinality (group).')
parser.add_argument('--base_width', type=int, default=64,
help='Number of channels in each group.')
parser.add_argument('--widen_factor', type=int, default=4,
help='Widen factor. 4 -> 64, 8 -> 128, ...')
# Acceleration
parser.add_argument('--ngpu', type=int, default=1, help='0 = CPU.')
parser.add_argument('--gpu_id_list', type=str, default='', help="gpu id")
parser.add_argument('--prefetch', type=int, default=2,
help='Pre-fetching threads.')
# i/o
parser.add_argument('--log', type=str, default='./', help='Log folder.')
parser.add_argument('--cfg', required=True,
help='path to config file', type=str)
args = parser.parse_args()
# ---- setup configs ----
C.merge_from_file(args.cfg)
# C.SOLVER.TRAIN_BATCH_SIZE *= num_gpus
# C.SOLVER.TEST_BATCH_SIZE *= num_gpus
# C.SOLVER.BASE_LR *= num_gpus
C.freeze()
nextline = '\n'
starttime = datetime.datetime.now()
ttuple = starttime.timetuple()
startt = '_'.join(list(map(str, ttuple[1:6])))
modeloutput = f'{args.dataset}_isonext_{startt}_model.pytorch'
# Init logger
if not os.path.isdir(args.log):
os.makedirs(args.log)
log = open(os.path.join(
args.log, f'isonext_train_{args.dataset}_{args.ngpu}gpu_{startt}.txt'), 'w')
# log.write(starttime)
log.write(f'{starttime}{nextline}')
state = {k: v for k, v in args._get_kwargs()}
log.write(json.dumps(state) + '\n')
# Calculate number of epochs wrt batch size
args.epochs = args.epochs * 128 // args.batch_size
args.schedule = [x * 128 // args.batch_size for x in args.schedule]
# Init dataset
if not os.path.isdir(args.data_path):
os.makedirs(args.data_path)
mean = [x / 255 for x in [125.3, 123.0, 113.9]]
std = [x / 255 for x in [63.0, 62.1, 66.7]]
train_transform = transforms.Compose(
[transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4), transforms.ToTensor(),
transforms.Normalize(mean, std)])
test_transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize(mean, std)])
if args.dataset == 'cifar10':
train_data = dset.CIFAR10(
args.data_path, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR10(
args.data_path, train=False, transform=test_transform, download=True)
nlabels = 10
else:
train_data = dset.CIFAR100(
args.data_path, train=True, transform=train_transform, download=True)
test_data = dset.CIFAR100(
args.data_path, train=False, transform=test_transform, download=True)
nlabels = 100
train_loader = torch.utils.data.DataLoader(train_data, batch_size=args.batch_size, shuffle=True,
num_workers=args.prefetch, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_data, batch_size=args.test_bs, shuffle=False,
num_workers=args.prefetch, pin_memory=True)
# Init checkpoints
if not os.path.isdir(args.save):
os.makedirs(args.save)
# Init model, criterion, and optimizer
# 8,64,4
net = CifarISONext(args.cardinality, args.base_width, args.widen_factor)
log.write(f'{net}{nextline}')
log.flush()
device_ids = list(range(args.ngpu))
if args.ngpu > 1:
if args.gpu_id_list:
# device_ids = list(map(int, args.gpu_id_list.split(',')))
# os.environ['CUDA_VISIBLE_DEVICES']作用是只允许gpu gpu_id_list='3,5'可用,
# 然后使用Model = nn.DataParallel(Model, device_ids=[0,1]),作用是从可用的两个gpu中搜索第0和第1个位置的gpu。
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id_list
net = torch.nn.DataParallel(net, device_ids=device_ids)
if args.ngpu > 0:
# choose gpu to load model,defalt cuda:0
net.cuda()
# to solve warning :module must have its parameters and buffers on device cuda:3 (device_ids[0]) but found one of them on device: cuda:0
# first:not validate
# net.cuda(device=device_ids[0])
# second:
# device = torch.device(f'cuda:{device_ids[0]}')
# net.to(device)
optimizer = torch.optim.SGD(net.parameters(), state['learning_rate'], momentum=state['momentum'],
weight_decay=state['decay'], nesterov=True)
# train function (forward, backward, update)
def train():
net.train()
loss_avg = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = torch.autograd.Variable(
data.cuda()), torch.autograd.Variable(target.cuda())
# forward
output = net(data)
# backward
optimizer.zero_grad()
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
# exponential moving average
loss_avg = loss_avg * 0.2 + float(loss) * 0.8
state['train_loss'] = loss_avg
# test function (forward only)
def test():
net.eval()
loss_avg = 0.0
correct = 0
for batch_idx, (data, target) in enumerate(test_loader):
data, target = torch.autograd.Variable(
data.cuda()), torch.autograd.Variable(target.cuda())
# forward
output = net(data)
loss = F.cross_entropy(output, target)
# accuracy
pred = output.data.max(1)[1]
correct += float(pred.eq(target.data).sum())
# test loss average
loss_avg += float(loss)
state['test_loss'] = loss_avg / len(test_loader)
state['test_accuracy'] = correct / len(test_loader.dataset)
# Main loop
best_accuracy = 0.0
for epoch in range(args.epochs):
epochstarttime = datetime.datetime.now()
if epoch in args.schedule:
state['learning_rate'] *= args.gamma
for param_group in optimizer.param_groups:
param_group['lr'] = state['learning_rate']
state['epoch'] = epoch
train()
test()
if state['test_accuracy'] > best_accuracy:
best_accuracy = state['test_accuracy']
torch.save(net.state_dict(), os.path.join(
args.save, modeloutput))
log.write('%s\n' % json.dumps(state))
# print(state)
log.write("Best accuracy: %f\n" % best_accuracy)
epochendtime = datetime.datetime.now()
log.write(
f'end: {epochendtime}; len: {epochendtime - epochstarttime}{nextline}')
log.flush()
endtime = datetime.datetime.now()
log.write(
f'end: {endtime}; len: {endtime - starttime}{nextline}')
log.flush()
log.close()
| python |
from pathlib import Path
from os import path
from enum import Enum
import numbers
from collections.abc import Iterable
import io
import base64
from pptx import Presentation
from pptx.enum.chart import XL_CHART_TYPE, XL_LEGEND_POSITION
from pptx.chart.data import CategoryChartData, XyChartData, BubbleChartData
from pptx.enum.shapes import PP_PLACEHOLDER
from pptx.util import Pt
from itertools import islice
import pandas as pd
import numpy as np
class CHART_TYPE(Enum):
AREA = 'Area'
AREA_STACKED = 'Area-Stacked'
AREA_STACKED_100 = 'Area-Stacked-100'
BAR = 'Bar'
BAR_STACKED = 'Bar-Stacked'
BAR_STACKED_100 = 'Bar-Stacked-100'
COLUMN = 'Column'
COLUMN_STACKED = 'Column-Stacked'
COLUMN_STACKED_100 = 'Column-Stacked-100'
LINE = 'Line'
LINE_STACKED = 'Line-Stacked'
LINE_STACKED_100 = 'Line-Stacked-100'
LINE_MARKED = 'Line-Marked'
LINE_MARKED_STACKED = 'Line-Marked-Stacked'
LINE_MARKED_STACKED_100 = 'Line-Marked-Stacked-100'
DOUGHNUT = 'Doughnut'
DOUGHNUT_EXPLODED = 'Doughnut-Exploded'
PIE = 'Pie'
PIE_EXPLODED = 'Pie-Exploded'
RADAR = 'Radar'
RADAR_FILLED = 'Radar-Filled'
RADAR_MARKED = 'Radar-Marked'
XY_SCATTER = 'XY-Scatter'
XY_SCATTER_LINES = 'XY-Scatter-Lines'
XY_SCATTER_LINES_SMOOTHED = 'XY-Scatter-Lines-Smoothed'
XY_SCATTER_LINES_MARKED = 'XY-Scatter-Lines-Marked'
XY_SCATTER_LINES_MARKED_SMOOTHED = 'XY-Scatter-Lines-Marked-Smoothed'
BUBBLE = 'Bubble'
TABLE = 'Table'
class LEGEND_POSITION(Enum):
BOTTOM = 'Bottom'
CORNER = 'Corner'
LEFT = 'Left'
NONE = 'None'
RIGHT = 'Right'
TOP = 'Top'
def toPPT(presentation):
ppt = __create_presentation(presentation)
if ppt is None or isinstance(ppt, str):
return 'Could\'t create PPT'
slide_count = 0
body_font = presentation.get('body_font')
if body_font is None:
body_font = dict(
name='Verdana',
size=10
)
for slide in presentation.get('slides'):
if (slide.get('body_font') is None):
slide['body_font'] = body_font
slide_body_font = slide.get('body_font')
slide_count += 1
new_slide = __create_slide(ppt, slide)
if new_slide is None or isinstance(new_slide, str):
return 'Failed to create slide {}: {}'.format(slide_count, new_slide)
chart_count = 0
for chart in slide.get('charts'):
if (chart.get('body_font') is None):
chart['body_font'] = body_font
chart_count += 1
placeholder_num = chart.get('placeholder_num')
if placeholder_num is not None and placeholder_num > 0:
placeholder = __get_placeholder(new_slide, placeholder_num)
else:
chart_num = slide.get('chart_num', 1)
placeholder = __get_chart(new_slide, chart_num)
if placeholder is None or isinstance(placeholder, str):
return 'Failed to create placeholder for chart {} in slide {}: {}'.format(chart_count, slide_count, placeholder)
new_chart = __insert_object(new_slide, placeholder, chart)
if isinstance(new_chart, str):
return 'Failed to create chart {} in slide {}: {}'.format(chart_count, slide_count, new_chart)
return ppt
def toBase64URL(pres):
# Create string shell to insert the base64-encoded data
output_str = "<a href='data:application/vnd.openxmlformats-officedocument.presentationml.presentation;base64,{}'>Download here</a>"
# Create a new byte stream to save to
stream = io.BytesIO()
# Save the presentation content to the byte stream
pres.save(stream)
# Base64 encode the stream and convert to base64 ascii
encoded = base64.b64encode(stream.getvalue()).decode()
return output_str.format(encoded)
def __create_presentation(slideInfo):
template = slideInfo.get('template')
if (template is not None):
if (not isinstance(template, str)):
template = None
else:
if (not path.isfile(template)):
template = None
return Presentation(template)
def __create_slide(ppt, slide):
slide_num = slide.get('slide_num', 0)
layout_num = slide.get('layout_num', 1)
title = slide.get('title')
if (len(ppt.slide_layouts) <= layout_num):
return 'Layout number {} is outside the number of layouts found in this PPT [{}]'.format(layout_num, len(ppt.slide_layouts))
if slide_num == 0:
new_slide = ppt.slides.add_slide(ppt.slide_layouts[layout_num])
else:
if len(ppt.slides) >= slide_num:
new_slide = ppt.slides[slide_num-1]
else:
return 'Slide number {} is outside the number of slides found in this PPT [{}]'.format(slide_num, len(ppt.slides))
if new_slide.shapes.title is not None:
new_slide.shapes.title.text = title
return new_slide
def __get_placeholder(slide, placeholder_num):
if len(slide.placeholders) < placeholder_num or placeholder_num <= 0:
return 'Placeholder number {} outside the number of placeholders found in this slide [{}]'.format(placeholder_num, len(slide.placeholders))
placeholderIdx = []
for shape in slide.placeholders:
placeholderIdx.append(shape.placeholder_format.idx)
placeholder = slide.placeholders[placeholderIdx[placeholder_num-1]]
# Remove empty placeholder
sp = placeholder._sp
sp.getparent().remove(sp)
return placeholder
def __get_chart(slide, chart_num):
if chart_num == 0:
return 'Neither placeholder_number, nor chart_number were specified for this slide'
charts_found = 0
for shape in slide.shapes:
if shape.has_chart:
charts_found += 1
if charts_found == chart_num:
shape.element.getparent().remove(shape.element)
return shape
return 'Chart number {} is outside the number of charts found in this slide [{}]'.format(chart_num, charts_found)
def __infer_category_labels(data):
for dataframe in data:
firstCol = dataframe.iloc[:, 0]
for cell in firstCol:
if not isinstance(cell, numbers.Number):
return True
return False
def __infer_series_labels(data):
for dataframe in data:
for col in dataframe.columns:
if not isinstance(col, numbers.Number):
return True
return False
def __transpose_data(chartInfo):
transposed_data = []
for dataframe in chartInfo['data']:
if not isinstance(dataframe, pd.DataFrame):
return chartInfo
if chartInfo['first_column_as_labels'] and chartInfo['column_names_as_labels']:
indexColName = dataframe.columns[0]
df = dataframe.set_index(
dataframe.columns[0]).transpose().reset_index()
df.rename(columns={'index': indexColName}, inplace=True)
elif chartInfo['column_names_as_labels']:
df = dataframe.transpose().reset_index()
elif chartInfo['first_column_as_labels']:
df = dataframe.set_index(dataframe.columns[0]).transpose()
else:
df = dataframe.transpose()
transposed_data.append(df)
chartInfo['data'] = transposed_data
temp = chartInfo['column_names_as_labels']
chartInfo['column_names_as_labels'] = chartInfo['first_column_as_labels']
chartInfo['first_column_as_labels'] = temp
return chartInfo
def __get_dataframes(data):
if (not isinstance(data, pd.DataFrame) and not __iterable(data)):
return None
if (isinstance(data, pd.DataFrame)):
dfs = [data]
else:
for dataframe in data:
if not isinstance(dataframe, pd.DataFrame):
return None
dfs = data
return dfs
def __insert_object(slide, placeholder, chart):
data = chart.get('data')
if (data is None):
return 'No data was supplied for chart'
if (isinstance(data, pd.DataFrame)):
chart['data'] = [data]
for dataframe in chart['data']:
if not isinstance(dataframe, pd.DataFrame):
return 'Data supplied was neither a Pandas DataFrame, nor an array of Pandas DataFrames'
if not isinstance(chart.get('column_names_as_labels'), bool):
chart['column_names_as_labels'] = __infer_series_labels(
chart['data'])
if not isinstance(chart.get('first_column_as_labels'), bool):
chart['first_column_as_labels'] = __infer_category_labels(
chart['data'])
transpose = chart.get('transpose', False)
if transpose:
chart = __transpose_data(chart)
data = __get_dataframes(chart.get('data'))
dataframe = data[0]
chart_type = chart.get('chart_type', 'Table')
if chart_type == CHART_TYPE.AREA.value:
return __insert_chart(XL_CHART_TYPE.AREA, slide, placeholder, chart)
elif chart_type == CHART_TYPE.AREA_STACKED.value:
return __insert_chart(XL_CHART_TYPE.AREA_STACKED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.AREA_STACKED_100.value:
return __insert_chart(XL_CHART_TYPE.AREA_STACKED_100,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.BAR.value:
return __insert_chart(XL_CHART_TYPE.BAR_CLUSTERED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.BAR_STACKED.value:
return __insert_chart(XL_CHART_TYPE.BAR_STACKED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.BAR_STACKED_100.value:
return __insert_chart(XL_CHART_TYPE.BAR_STACKED_100,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.COLUMN.value:
return __insert_chart(XL_CHART_TYPE.COLUMN_CLUSTERED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.COLUMN_STACKED.value:
return __insert_chart(XL_CHART_TYPE.COLUMN_STACKED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.COLUMN_STACKED_100.value:
return __insert_chart(XL_CHART_TYPE.COLUMN_STACKED_100,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE.value:
return __insert_chart(XL_CHART_TYPE.LINE, slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE_STACKED.value:
return __insert_chart(XL_CHART_TYPE.LINE_STACKED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE_STACKED_100.value:
return __insert_chart(XL_CHART_TYPE.LINE_STACKED_100,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE_MARKED.value:
return __insert_chart(XL_CHART_TYPE.LINE_MARKERS,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE_MARKED_STACKED.value:
return __insert_chart(XL_CHART_TYPE.LINE_MARKERS_STACKED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.LINE_MARKED_STACKED_100.value:
return __insert_chart(XL_CHART_TYPE.LINE_MARKERS_STACKED_100,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.DOUGHNUT.value:
return __insert_chart(XL_CHART_TYPE.DOUGHNUT, slide, placeholder, chart)
elif chart_type == CHART_TYPE.DOUGHNUT_EXPLODED.value:
return __insert_chart(XL_CHART_TYPE.DOUGHNUT_EXPLODED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.PIE.value:
return __insert_chart(XL_CHART_TYPE.PIE, slide, placeholder, chart)
elif chart_type == CHART_TYPE.PIE_EXPLODED.value:
return __insert_chart(XL_CHART_TYPE.PIE_EXPLODED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.RADAR.value:
return __insert_chart(XL_CHART_TYPE.RADAR, slide, placeholder, chart)
elif chart_type == CHART_TYPE.RADAR_FILLED.value:
return __insert_chart(XL_CHART_TYPE.RADAR_FILLED,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.RADAR_MARKED.value:
return __insert_chart(XL_CHART_TYPE.RADAR_MARKERS,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.XY_SCATTER.value:
return __insert_xyzchart(XL_CHART_TYPE.XY_SCATTER,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.XY_SCATTER_LINES.value:
return __insert_xyzchart(XL_CHART_TYPE.XY_SCATTER_LINES_NO_MARKERS,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.XY_SCATTER_LINES_SMOOTHED.value:
return __insert_xyzchart(XL_CHART_TYPE.XY_SCATTER_SMOOTH_NO_MARKERS,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.XY_SCATTER_LINES_MARKED.value:
return __insert_chart(XL_CHART_TYPE.XY_SCATTER_LINES,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.XY_SCATTER_LINES_MARKED_SMOOTHED.value:
return __insert_xyzchart(XL_CHART_TYPE.XY_SCATTER_SMOOTH,
slide, placeholder, chart)
elif chart_type == CHART_TYPE.BUBBLE.value:
return __insert_xyzchart(XL_CHART_TYPE.BUBBLE, slide, placeholder, chart)
else:
return __insert_table(slide, placeholder, chart)
def __insert_table(slide, placeholder, chartInfo):
df = chartInfo['data'][0]
columns = df.shape[1]
rows = df.shape[0]
if chartInfo['column_names_as_labels']:
rows += 1
# Create new element with same shape and position as placeholder
table = slide.shapes.add_table(
rows, columns, placeholder.left, placeholder.top, placeholder.width, placeholder.height).table
table.first_row = chartInfo['column_names_as_labels']
table.first_col = chartInfo['first_column_as_labels']
# Populate table
colNames = df.columns.tolist()
rowNum = 0
if chartInfo['column_names_as_labels']:
col = 0
for colName in colNames:
table.cell(0, col).text = str(colName)
col += 1
rowNum += 1
for index, row in df.iterrows():
col = 0
for colName in colNames:
table.cell(rowNum, col).text = str(row.iloc[col])
col += 1
rowNum += 1
return table
def __iterable(obj):
return isinstance(obj, Iterable)
def __create_chartdata(chart):
chart_data = CategoryChartData()
# TODO: Deal with First Row as Labels and Column Names as Labels
colNames = chart['data'][0].columns.tolist()
offset = 0
if (chart['first_column_as_labels']):
offset = 1
if len(colNames) > offset:
colNum = 1
for colName in colNames[offset:]:
if (chart['column_names_as_labels']):
chart_data.categories.add_category(colName)
else:
chart_data.categories.add_category('Category '+str(colNum))
rowNum = 1
for index, row in chart['data'][0].iterrows():
data = []
for colName in colNames[offset:]:
data.append(row[colName])
if chart['first_column_as_labels']:
chart_data.add_series(str(row[0]), data)
else:
chart_data.add_series('Series ' + str(rowNum), data)
return chart_data
def __create_xyzdata(dfs):
chart_data = None
seriesNum = 1
for df in dfs:
colNames = df.columns.tolist()
name = 'Series ' + str(seriesNum)
if hasattr(df, 'name') and df.name != "":
name = df.name
if len(colNames) > 1 and len(colNames) < 4:
if len(colNames) == 2 and chart_data is None:
chart_data = XyChartData()
elif len(colNames) == 3 and chart_data is None:
chart_data = BubbleChartData()
series = chart_data.add_series(name)
for index, row in df.iterrows():
data = []
for colName in colNames:
data.append(row[colName])
if len(colNames) == 2:
series.add_data_point(data[0], data[1])
else:
series.add_data_point(data[0], data[1], data[2])
seriesNum += 1
return chart_data
def __insert_chart(chart_type, slide, placeholder, chart):
chart_data = __create_chartdata(chart)
if chart_data is None:
return 'Could not create chart data'
# Create new element with same shape and position as placeholder
new_chart = slide.shapes.add_chart(chart_type, placeholder.left,
placeholder.top, placeholder.width, placeholder.height, chart_data).chart
__set_font_object(new_chart.font, chart.get('body_font'))
__set_chart_title(new_chart, chart)
__set_axis_object(new_chart.value_axis, chart.get('y_axis'))
__set_chart_legend(new_chart, chart)
return new_chart
def __set_chart_title(new_chart, chart):
title = chart.get('title')
if title is not None:
title_tf = new_chart.chart_title.text_frame
title_tf.clear()
title_p = title_tf.paragraphs[0]
title_p.add_run().text = title
def __set_chart_legend(new_chart, chart):
legend_position = chart.get('legend_position')
if legend_position is not None and legend_position != LEGEND_POSITION.NONE.value:
new_chart.has_legend = True
if legend_position == LEGEND_POSITION.BOTTOM.value:
new_chart.legend.position = XL_LEGEND_POSITION.BOTTOM
elif legend_position == LEGEND_POSITION.CORNER.value:
new_chart.legend.position = XL_LEGEND_POSITION.CORNER
elif legend_position == LEGEND_POSITION.LEFT.value:
new_chart.legend.position = XL_LEGEND_POSITION.LEFT
elif legend_position == LEGEND_POSITION.RIGHT.value:
new_chart.legend.position = XL_LEGEND_POSITION.RIGHT
elif legend_position == LEGEND_POSITION.TOP.value:
new_chart.legend.position = XL_LEGEND_POSITION.TOP
if chart.get('overlay_legend', False):
new_chart.legend.include_in_layout = True
else:
new_chart.legend.include_in_layout = False
def __set_font_object(font_object, font):
font_object.name = font['name']
font_object.size = Pt(font['size'])
def __set_axis_object(axis_object, axis):
if axis is None:
axis = dict()
axis_object.visible = axis.get('visible', True) == True
axis_object.minimum_scale = axis.get('minimum_scale')
axis_object.maximum_scale = axis.get('maximum_scale')
has_major_gridlines = axis.get('has_major_grid_lines', False)
axis_object.has_major_gridlines = has_major_gridlines
has_minor_gridlines = axis.get('has_minor_grid_lines', False)
axis_object.has_minor_gridlines = has_minor_gridlines
has_title = axis.get('title', False) != False
if has_title:
axis_object.has_title = True
axis_object.axis_title = axis.get('title')
axis_object.tick_labels.number_format = axis.get(
'number_format', '$#0.0,,"M";[Red]($#0.0,,"M")')
def __insert_xyzchart(chart_type, slide, placeholder, chart):
chart_data = __create_xyzdata(chart['data'])
if chart_data is None:
return 'Could not create chart data'
# Create new element with same shape and position as placeholder
new_chart = slide.shapes.add_chart(chart_type, placeholder.left,
placeholder.top, placeholder.width, placeholder.height, chart_data).chart
__set_font_object(new_chart.font, chart.get('body_font'))
__set_chart_title(new_chart, chart)
__set_axis_object(new_chart.value_axis, chart.get('x_axis'))
__set_axis_object(new_chart.value_axis, chart.get('y_axis'))
__set_chart_legend(new_chart, chart)
return new_chart
def __get_datafile_name(filename):
"""
return the default template file that comes with the package
"""
return Path(__file__).parent / "data/" + filename
| python |
# Generated by Django 2.0.7 on 2018-08-06 11:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('principal', '0004_tienda_uuid'),
]
operations = [
migrations.AddField(
model_name='ciudad',
name='url_pagina',
field=models.URLField(default='https://www.ubereats.com/es-NZ/stores/'),
preserve_default=False,
),
]
| python |
import warnings
from contextlib import contextmanager
from veros import runtime_settings, runtime_state, veros_kernel
class Index:
__slots__ = ()
@staticmethod
def __getitem__(key):
return key
def noop(*args, **kwargs):
pass
@contextmanager
def make_writeable(*arrs):
orig_writeable = [arr.flags.writeable for arr in arrs]
writeable_arrs = []
try:
for arr in arrs:
arr = arr.copy()
arr.flags.writeable = True
writeable_arrs.append(arr)
if len(writeable_arrs) == 1:
yield writeable_arrs[0]
else:
yield writeable_arrs
finally:
for arr, orig_val in zip(writeable_arrs, orig_writeable):
try:
arr.flags.writeable = orig_val
except ValueError:
pass
def update_numpy(arr, at, to):
with make_writeable(arr) as warr:
warr[at] = to
return warr
def update_add_numpy(arr, at, to):
with make_writeable(arr) as warr:
warr[at] += to
return warr
def update_multiply_numpy(arr, at, to):
with make_writeable(arr) as warr:
warr[at] *= to
return warr
def solve_tridiagonal_numpy(a, b, c, d, water_mask, edge_mask):
import numpy as np
from scipy.linalg import lapack
out = np.zeros(a.shape, dtype=a.dtype)
if not np.any(water_mask):
return out
# remove couplings between slices
with make_writeable(a, c) as warr:
a, c = warr
a[edge_mask] = 0
c[..., -1] = 0
sol = lapack.dgtsv(a[water_mask][1:], b[water_mask], c[water_mask][:-1], d[water_mask])[3]
out[water_mask] = sol
return out
def fori_numpy(lower, upper, body_fun, init_val):
val = init_val
for i in range(lower, upper):
val = body_fun(i, val)
return val
def scan_numpy(f, init, xs, length=None):
import numpy as np
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
@veros_kernel(static_args=("use_ext",))
def solve_tridiagonal_jax(a, b, c, d, water_mask, edge_mask, use_ext=None):
import jax.lax
import jax.numpy as jnp
from veros.core.special.tdma_ import tdma, HAS_CPU_EXT, HAS_GPU_EXT
if use_ext is None:
use_ext = (HAS_CPU_EXT and runtime_settings.device == "cpu") or (
HAS_GPU_EXT and runtime_settings.device == "gpu"
)
if use_ext:
return tdma(a, b, c, d, water_mask, edge_mask)
warnings.warn("Could not use custom TDMA implementation, falling back to pure JAX")
a = water_mask * a * jnp.logical_not(edge_mask)
b = jnp.where(water_mask, b, 1.0)
c = water_mask * c
d = water_mask * d
def compute_primes(last_primes, x):
last_cp, last_dp = last_primes
a, b, c, d = x
cp = c / (b - a * last_cp)
dp = (d - a * last_dp) / (b - a * last_cp)
new_primes = (cp, dp)
return new_primes, new_primes
diags_transposed = [jnp.moveaxis(arr, 2, 0) for arr in (a, b, c, d)]
init = jnp.zeros(a.shape[:-1], dtype=a.dtype)
_, primes = jax.lax.scan(compute_primes, (init, init), diags_transposed)
def backsubstitution(last_x, x):
cp, dp = x
new_x = dp - cp * last_x
return new_x, new_x
_, sol = jax.lax.scan(backsubstitution, init, primes, reverse=True)
return jnp.moveaxis(sol, 0, 2)
def update_jax(arr, at, to):
return arr.at[at].set(to)
def update_add_jax(arr, at, to):
return arr.at[at].add(to)
def update_multiply_jax(arr, at, to):
return arr.at[at].multiply(to)
def flush_jax():
import jax
dummy = jax.device_put(0.0) + 0.0
try:
dummy.block_until_ready()
except AttributeError:
# if we are jitting, dummy is not a DeviceArray that we can wait for
pass
numpy = runtime_state.backend_module
if runtime_settings.backend == "numpy":
update = update_numpy
update_add = update_add_numpy
update_multiply = update_multiply_numpy
at = Index()
solve_tridiagonal = solve_tridiagonal_numpy
for_loop = fori_numpy
scan = scan_numpy
flush = noop
elif runtime_settings.backend == "jax":
import jax.lax
update = update_jax
update_add = update_add_jax
update_multiply = update_multiply_jax
at = Index()
solve_tridiagonal = solve_tridiagonal_jax
for_loop = jax.lax.fori_loop
scan = jax.lax.scan
flush = flush_jax
else:
raise ValueError(f"Unrecognized backend {runtime_settings.backend}")
| python |
# coding: utf-8
import logging
from typing import Dict, List, Iterable
from overrides import overrides
from allennlp.common import Params
from allennlp.common.checks import ConfigurationError
from allennlp.common.file_utils import cached_path
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.dataset_readers.dataset_utils import iob1_to_bioul
from allennlp.data.fields import Field, TextField, SequenceLabelField
from allennlp.data.instance import Instance
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.dataset_readers.dataset_utils import Ontonotes, OntonotesSentence
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
@DatasetReader.register("ner_ontonotes")
class NerOntonotesReader(DatasetReader):
"""
An ``allennlp.data.dataset_readers.dataset_reader.DatasetReader`` for reading
NER annotations in CoNll-formatted OntoNotes dataset.
NB: This DatasetReader was implemented before the current implementation of
``OntonotesNamedEntityRecognition`` in AllenNLP. It is thought doing pretty much the same thing.
Parameters
----------
token_indexers : ``Dict[str, TokenIndexer]``, optional (default=``{"tokens": SingleIdTokenIndexer()}``)
We use this to define the input representation for the text. See :class:`TokenIndexer`.
Map a token to an id.
domain_identifier : ``str``, optional (default = None)
The subdomain to load. If None is specified, the whole dataset is loaded.
label_namespace : ``str``, optional (default = "ontonotes_ner_labels")
The tag/label namespace for the task/dataset considered.
lazy : ``bool``, optional (default = False)
Whether or not the dataset should be loaded in lazy way.
Refer to https://github.com/allenai/allennlp/blob/master/tutorials/getting_started/laziness.md
for more details about lazyness.
coding_scheme: ``str``, optional (default=``IOB1``)
Specifies the coding scheme for ``ner_labels`` and ``chunk_labels``.
Valid options are ``IOB1`` and ``BIOUL``. The ``IOB1`` default maintains
the original IOB1 scheme in the CoNLL data.
In the IOB1 scheme, I is a token inside a span, O is a token outside
a span and B is the beginning of span immediately following another
span of the same type.
"""
def __init__(
self,
token_indexers: Dict[str, TokenIndexer] = None,
domain_identifier: str = None,
label_namespace: str = "ontonotes_ner_labels",
lazy: bool = False,
coding_scheme: str = "IOB1",
) -> None:
super().__init__(lazy)
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._domain_identifier = domain_identifier
self._label_namespace = label_namespace
self._coding_scheme = coding_scheme
if coding_scheme not in ("IOB1", "BIOUL"):
raise ConfigurationError("unknown coding_scheme: {}".format(coding_scheme))
@overrides
def _read(self, file_path: str):
file_path = cached_path(file_path) # if `file_path` is a URL, redirect to the cache
ontonotes_reader = Ontonotes()
logger.info("Reading NER instances from dataset files at: %s", file_path)
if self._domain_identifier is not None:
logger.info("Filtering to only include file paths containing the %s domain", self._domain_identifier)
for sentence in self._ontonotes_subset(ontonotes_reader, file_path, self._domain_identifier):
tokens = [Token(t) for t in sentence.words]
if not sentence.named_entities:
tags = ["O" for _ in tokens]
else:
tags = sentence.named_entities
if self._coding_scheme == "BIOUL":
tags = iob1_to_bioul(tags)
yield self.text_to_instance(tokens, tags)
@staticmethod
def _ontonotes_subset(
ontonotes_reader: Ontonotes, file_path: str, domain_identifier: str
) -> Iterable[OntonotesSentence]:
for conll_file in ontonotes_reader.dataset_path_iterator(file_path):
yield from ontonotes_reader.sentence_iterator(conll_file)
def text_to_instance(self, tokens: List[Token], tags: List[str] = None) -> Instance:
# pylint: disable=arguments-differ
fields: Dict[str, Field] = {}
text_field = TextField(tokens, token_indexers=self._token_indexers)
fields["tokens"] = text_field
if tags:
fields["tags"] = SequenceLabelField(
labels=tags, sequence_field=text_field, label_namespace=self._label_namespace
)
return Instance(fields)
| python |
from Core.GlobalExceptions import Exceptions
from Services.NetworkRequests import requests
from Services.Utils.Utils import Utils
from Download.Downloader.Engine.Config import Config
from Download.Downloader.Task.PrioritizedTask import PrioritizedTask
class SegmentDownloader(PrioritizedTask):
def __init__(self, url, segment, unmute, saveAs, priority=0):
super().__init__(target=self.download, priority=priority)
self.urls = self.getFileUrls(url, segment, unmute)
self.saveAs = saveAs
def download(self):
for i in range(Config.SEGMENT_DOWNLOAD_MAX_RETRY_COUNT):
for url in self.urls:
try:
self.downloadFile(url)
return
except Exceptions.FileSystemError:
raise Exceptions.FileSystemError
except:
pass
raise Exceptions.NetworkError
def getFileUrls(self, url, segment, unmute):
original = Utils.joinUrl(url, segment.fileName)
unmuted = Utils.joinUrl(url, segment.getUnmutedFileName())
muted = Utils.joinUrl(url, segment.getMutedFileName())
if segment.muted:
if not unmute:
return [unmuted, muted, original]
return [original, unmuted, muted]
def downloadFile(self, url):
try:
response = requests.get(url)
if response.status_code != 200:
raise
except:
raise Exceptions.NetworkError
try:
with open(self.saveAs, "wb") as file:
file.write(response.content)
return
except:
raise Exceptions.FileSystemError | python |
import json
from optimism.JaxConfig import *
from optimism import Mesh
def read_json_mesh(meshFileName):
with open(meshFileName, 'r', encoding='utf-8') as jsonFile:
meshData = json.load(jsonFile)
coordinates = np.array(meshData['coordinates'])
connectivity = np.array(meshData['connectivity'], dtype=int)
nodeSets = {}
for key in meshData['nodeSets']:
nodeSets[key] = np.array(meshData['nodeSets'][key])
sideSets = {}
exodusSideSets = meshData['sideSets']
for key in exodusSideSets:
elements = np.array(exodusSideSets[key][0], dtype=int)
sides = np.array(exodusSideSets[key][1], dtype=int)
sideSets[key] = np.column_stack((elements, sides))
blocks=None
return Mesh.construct_mesh_from_basic_data(coordinates, connectivity,
blocks, nodeSets, sideSets)
| python |
#Benjamin Ramirez August 9, 2016
#making class to keep track of encoder ticks on wheels
import RPi.GPIO as GPIO
class Encoder(object):
def __init__ (self, a_pin_num, b_pin_num):
self.a_pin = a_pin_num
self.b_pin = b_pin_num
GPIO.setmode(GPIO.BCM)
GPIO.setup(self.a_pin, GPIO.IN)
GPIO.setup(self.b_pin, GPIO.IN)
self.a_ticks, self.b_ticks, self.tot_ticks = 0, 0, 0
#setting up the edge detection interrupts
#because of the hall effect the encoders use
# we want to detect both rising and falling edges
GPIO.add_event_detect(self.a_pin, GPIO.BOTH, callback=self.a_call)
GPIO.add_event_detect(self.b_pin, GPIO.BOTH, callback=self.b_call)
def tot_call(self):
self.tot_ticks += 1
def a_call(self,channel):
#print "edge on A \n"
self.a_ticks += 1
self.tot_call()
def b_call(self,channel):
#print "edge on B \n"
self.b_ticks += 1
self.tot_call()
def get_ticks(self):
return self.tot_ticks
def get_a_ticks(self):
return self.a_ticks
def get_b_ticks(self):
return self.b_ticks
def reset(self):
self.a_ticks, self.b_ticks, self.tot_ticks = 0, 0, 0
| python |
from flask_wtf import FlaskForm
from wtforms import IntegerField, SelectField, SelectMultipleField, SubmitField, \
StringField
from wtforms.validators import DataRequired, Optional, NumberRange
class Search(FlaskForm):
min_age = IntegerField('From-years', validators=[Optional(), NumberRange(0, 1000, 'Too big or too small number')])
max_age = IntegerField('To-years', validators=[Optional(), NumberRange(0, 1000, 'Too big or too small number')])
sort_age = SelectField('Sort by...', choices=[
('False', 'Ascending age'),
('True', 'Descending age')
])
min_rating = IntegerField('From-rating', validators=[Optional()])
max_rating = IntegerField('To-rating', validators=[Optional()])
sort_rating = SelectField('Sort by...', choices=[
('True', 'Descending rating'),
('False', 'Ascending rating')
])
city = StringField('City', validators=[Optional()])
region = StringField('Region', validators=[Optional()])
country = StringField('Country', validators=[Optional()])
sex_pref = SelectField('Sexual Preference', choices=[
('Bisexual', 'Bisexual'),
('Male', 'Male'),
('Female', 'Female')
], validators=[Optional()])
tags = SelectMultipleField('Tags', choices=[
('Hunting', 'Hunting'),
('Fishing', 'Fishing'),
('Singing', 'Singing'),
('Fuck porcupine', 'Fuck porcupine'),
('Watching "Разведопрос"', 'Watching "Разведопрос"')
], validators=[Optional()])
submit = SubmitField('Submit', [DataRequired()])
| python |
# SPDX-FileCopyrightText: 2022 Stephan Lachnit <[email protected]>
#
# SPDX-License-Identifier: EUPL-1.2
"""
This module contains tools to manipulate DEP5 documents.
"""
from .classes import DEP5Document, DEP5FilesParagraph, DEP5HeaderParagraph, DEP5LicenseParagraph, DEP5Metadata
from .convert_calir import convert_calir_to_dep5, convert_dep5_to_calir
from .convert_text import parse_dep5, write_dep5
__all__ = [
'convert_calir_to_dep5',
'convert_dep5_to_calir',
'DEP5Document',
'DEP5FilesParagraph',
'DEP5HeaderParagraph',
'DEP5LicenseParagraph',
'DEP5Metadata',
'parse_dep5',
'write_dep5',
]
| python |
from rest_framework.exceptions import NotFound, PermissionDenied
from users.models import User, Role
from events.models import Event
from events.logic.event import get_events
def check_user_event_same_organization(view_method):
def _arguments_wrapper(
instance, request, requester: User, event_id: int, *args, **kwargs
):
try:
event = (
get_events(id=event_id).select_related("creator__organization").get()
)
if event.creator.organization != requester.organization:
raise PermissionDenied(
"User and event are in different organization.",
code="wrong_organization",
)
except (
Event.DoesNotExist,
Event.MultipleObjectsReturned,
PermissionDenied,
) as e:
raise NotFound("No event found.", code="no_event_found")
return view_method(
instance, request, requester=requester, event=event, *args, **kwargs
)
return _arguments_wrapper
def check_event_viewer(view_method):
def _arguments_wrapper(
instance, request, requester: User, event: Event, *args, **kwargs
):
is_admin = requester.role == Role.ADMIN
is_event_creator = requester == event.creator
has_view_event_permission = event.is_published or is_admin or is_event_creator
if not has_view_event_permission:
raise PermissionDenied(
"No permission to view event.",
code="no_view_event_permission",
)
return view_method(
instance, request, requester=requester, event=event, *args, **kwargs
)
return _arguments_wrapper
def check_event_modifier(view_method):
def _arguments_wrapper(
instance, request, requester: User, event: Event, *args, **kwargs
):
is_admin = requester.role == Role.ADMIN
is_event_creator = requester == event.creator
has_modify_event_permission = is_admin or is_event_creator
if not has_modify_event_permission:
raise PermissionDenied(
"No permission to modify event.",
code="no_modify_event_permission",
)
return view_method(
instance, request, requester=requester, event=event, *args, **kwargs
)
return _arguments_wrapper | python |
from taskplus.core.actions import ListTasksRequest
def test_list_tasks_request_without_parameters():
request = ListTasksRequest()
assert request.is_valid() is True
assert request.filters is None
def test_list_tasks_request_with_filters():
filters = dict(name='task')
request = ListTasksRequest(filters=filters)
assert request.is_valid() is True
assert request.filters == filters
def test_list_tasks_request_with_empty_filters():
filters = {}
request = ListTasksRequest(filters=filters)
assert request.is_valid() is True
assert request.filters is None
def test_list_tasks_request_invalid_filters():
filters = 5
request = ListTasksRequest(filters=filters)
assert request.is_valid() is False
assert request.filters == filters
assert len(request.errors) == 1
error = request.errors[0]
assert error.parameter == 'filters'
assert error.message == 'is not iterable'
| python |
from flask import Flask, redirect, url_for, render_template, current_app
from api import Refran
app = Flask(__name__)
@app.route('/')
def home():
refran = Refran()
return render_template('index.html', linea=refran.generate_refran())
if __name__ == '__main__':
app.run(debug=True)
| python |
import subprocess
from common.mapr_logger.log import Log
class OSCommand(object):
@staticmethod
def run(statements):
response, status = OSCommand.run2(statements)
return response
@staticmethod
def run3(statements, username=None, use_nohup=False, out_file=None, in_background=False, users_env=False, truncate_response=-1):
responses, status = OSCommand.run2(statements, username, use_nohup, out_file, in_background, users_env, truncate_response)
return responses, status, statements
@staticmethod
def run2(statements, username=None, use_nohup=False, out_file=None, in_background=False, users_env=False, truncate_response=-1):
if isinstance(statements, str):
statements = [statements]
responses = ''
status = 0
for statement in statements:
new_statement = ''
if use_nohup:
new_statement += 'nohup '
if username is not None:
new_statement += 'sudo '
if users_env:
new_statement += '-E '
new_statement += '-u ' + username + ' ' + statement
else:
new_statement += statement
if in_background:
if use_nohup and out_file is not None:
new_statement += ' > ' + out_file + ' 2>&1'
else:
new_statement += ' &>/dev/null'
new_statement += ' &'
Log.debug('RUN: %s' % new_statement)
process = subprocess.Popen('%s 2>&1' % new_statement, shell=True, stdout=subprocess.PIPE)
response = process.stdout.read()
# process.wait will only return None if the process hasn't terminated. We don't
# need to check for None here
status = process.wait()
if len(response) == 0:
response = '<no response>'
else:
# Python 3 returns byes or bytearray from the read() above
if not isinstance(response, str) and isinstance(response, (bytes, bytearray)):
response = response.decode("UTF-8")
Log.debug('STATUS: %s' % str(status))
if truncate_response > -1:
info = (response[:truncate_response] + '...(TEXT TRUNCATED)...') if len(response) > truncate_response else response
Log.debug('RESPONSE: %s' % info)
else:
Log.debug('RESPONSE: %s' % response)
responses += response
if status != 0:
break
return responses, status
@staticmethod
def run2_nolog(statements):
if isinstance(statements, str):
statements = [statements]
responses = ""
status = 0
for statement in statements:
process = subprocess.Popen("%s 2>&1" % statement, shell=True, stdout=subprocess.PIPE)
response = process.stdout.read()
# process.wait will only return None if the process hasn't terminated. We don't
# need to check for None here
status = process.wait()
responses += response
if status != 0:
break
return responses, status
| python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
ObsPy implementation for parsing the sc3ml format to an Inventory object.
This is a modified version of obspy.io.stationxml.
:author:
Mathijs Koymans ([email protected]), 11.2015 - [Jollyfant@GitHub]
:copyright:
The ObsPy Development Team ([email protected])
:license:
GNU Lesser General Public License, Version 3
(http://www.gnu.org/copyleft/lesser.html)
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA
import math
import re
import warnings
from lxml import etree
import obspy
from obspy.core.util.obspy_types import (ComplexWithUncertainties,
FloatWithUncertaintiesAndUnit)
from obspy.core.inventory import (Azimuth, ClockDrift, Dip,
Distance, Frequency, Latitude,
Longitude, SampleRate)
from obspy.core.inventory import (CoefficientsTypeResponseStage,
FilterCoefficient, FIRResponseStage,
PolesZerosResponseStage,
PolynomialResponseStage)
from obspy.io.stationxml.core import _read_floattype
SOFTWARE_MODULE = "ObsPy %s" % obspy.__version__
SOFTWARE_URI = "http://www.obspy.org"
SCHEMA_VERSION = ['0.5', '0.6', '0.7', '0.8', '0.9']
def _count_complex(complex_string):
"""
Returns number of complex numbers in string (formatted according to
SeisComp3 XML schema type "ComplexArray"). Raises an Exception if string
seems invalid.
"""
counts = set()
for char in '(,)':
counts.add(complex_string.count(char))
if len(counts) != 1:
msg = ("Invalid string for list of complex numbers:"
"\n'%s'") % complex_string
raise ValueError(msg)
return counts.pop()
def _parse_list_of_complex_string(complex_string):
"""
Returns a list of complex numbers, parsed from a string (formatted
according to SeisComp3 XML schema type "ComplexArray").
"""
count = _count_complex(complex_string)
numbers = re.findall(r'\(\s*([^,\s]+)\s*,\s*([^)\s]+)\s*\)',
complex_string)
if len(numbers) != count:
msg = ("Unexpected count of complex numbers parsed from string:"
"\n Raw string: '%s'\n Expected count of complex numbers: %s"
"\n Parsed complex numbers: %s") % (complex_string, count,
numbers)
raise ValueError(msg)
return numbers
def _read_sc3ml(path_or_file_object):
"""
Function for reading a stationXML file.
:param path_or_file_object: File name or file like object.
"""
root = etree.parse(path_or_file_object).getroot()
# Code can be used for version 0.7, 0.8, and 0.9
basespace = "http://geofon.gfz-potsdam.de/ns/seiscomp3-schema"
for version in SCHEMA_VERSION:
namespace = "%s/%s" % (basespace, version)
if root.find("{%s}%s" % (namespace, "Inventory")) is not None:
break
else:
raise ValueError("Schema version not supported.")
def _ns(tagname):
return "{%s}%s" % (namespace, tagname)
# This needs to be tested, did not find an inventory
# with the journal entry.
journal = root.find(_ns("Journaling"))
if journal is not None:
entry = journal.find(_ns("entry"))
if entry is not None:
created = _tag2obj(entry, _ns("created"), obspy.UTCDateTime)
sender = _tag2obj(entry, _ns("sender"), str)
else:
created = None
sender = "ObsPy Inventory"
# Set source to this script
source = "sc3ml import"
module = None
module_uri = None
# Collect all networks from the sc3ml inventory
networks = []
inv_element = root.find(_ns("Inventory"))
for net_element in inv_element.findall(_ns("network")):
networks.append(_read_network(inv_element, net_element, _ns))
return obspy.core.inventory.Inventory(networks=networks, source=source,
sender=sender, created=created,
module=module, module_uri=module_uri)
def _tag2obj(element, tag, convert):
"""
Reads text from tag in element
:param element: etree element
:param tag: name of tag to be read
:param convert: intrinsic function (e.g. int, str, float)
"""
try:
# Single closing tags e.g. <analogueFilterChain/>.text return None
# and will be converted to a string 'None' when convert is str
if element.find(tag).text is None:
return None
return convert(element.find(tag).text)
except Exception:
None
def _read_network(inventory_root, net_element, _ns):
"""
Reads the network structure
:param inventory_root: base inventory element of sc3ml
:param net_element: network element to be read
:param _ns: namespace
"""
# Get the network code as attribute (e.g. <network code="GB">)
network = obspy.core.inventory.Network(net_element.get("code"))
# There is no further information in the attributes of <network>
# Start and end date are included as tags
network.start_date = _tag2obj(net_element, _ns("start"), obspy.UTCDateTime)
network.end_date = _tag2obj(net_element, _ns("end"), obspy.UTCDateTime)
network.description = _tag2obj(net_element, _ns("description"), str)
# get the restricted_status (boolean)
# true is evaluated to 'open'; false to 'closed'
# to match stationXML format
network.restricted_status = _get_restricted_status(net_element, _ns)
# Collect the stations
stations = []
for sta_element in net_element.findall(_ns("station")):
stations.append(_read_station(inventory_root, sta_element, _ns))
network.stations = stations
return network
def _get_restricted_status(element, _ns):
"""
get the restricted_status (boolean)
true is evaluated to 'open' and false to 'closed'
to match stationXML formatting
"""
restricted_status = _tag2obj(element, _ns("restricted"), str)
if(restricted_status == 'false'):
return 'open'
else:
return 'closed'
def _read_station(inventory_root, sta_element, _ns):
"""
Reads the station structure
:param inventory_root: base inventory element of sc3ml
:param sta_element: station element to be read
:param _ns: name space
"""
# Read location tags
longitude = _read_floattype(sta_element, _ns("longitude"), Longitude,
datum=True)
latitude = _read_floattype(sta_element, _ns("latitude"), Latitude,
datum=True)
elevation = _read_floattype(sta_element, _ns("elevation"), Distance,
unit=True)
station = obspy.core.inventory.Station(code=sta_element.get("code"),
latitude=latitude,
longitude=longitude,
elevation=elevation)
station.site = _read_site(sta_element, _ns)
# There is no relevant info in the base node
# Read the start and end date (creation, termination) from tags
# "Vault" and "Geology" are not defined in sc3ml ?
station.start_date = _tag2obj(sta_element, _ns("start"), obspy.UTCDateTime)
station.end_date = _tag2obj(sta_element, _ns("end"), obspy.UTCDateTime)
station.creation_date = _tag2obj(sta_element, _ns("start"),
obspy.UTCDateTime)
station.termination_date = _tag2obj(sta_element, _ns("end"),
obspy.UTCDateTime)
# get the restricted_status (boolean)
# true is evaluated to 'open'; false to 'closed'
station.restricted_status = _get_restricted_status(sta_element, _ns)
# Get all the channels, sc3ml keeps these in <sensorLocation> tags in the
# station element. Individual channels are contained within <stream> tags
channels = []
for sen_loc_element in sta_element.findall(_ns("sensorLocation")):
for channel in sen_loc_element.findall(_ns("stream")):
channels.append(_read_channel(inventory_root, channel, _ns))
station.channels = channels
return station
def _read_site(sta_element, _ns):
"""
Reads site information from the station element tags
and region from network element
In sc3ml, site information are included as
tags in the station_element
:param sta_element: station element
:param _ns: namespace
"""
# The region is defined in the parent network element
net_element = sta_element.getparent()
region = _tag2obj(net_element, _ns("region"), str)
# The country, place, description are given in the
# station element
country = _tag2obj(sta_element, _ns("country"), str)
place = _tag2obj(sta_element, _ns("place"), str)
description = _tag2obj(sta_element, _ns("description"), str)
# The name is usually the description
name = description
return obspy.core.inventory.Site(name=name, description=None,
town=place, county=None, region=region,
country=country)
def _read_datalogger(equip_element, _ns):
"""
Reads equipment information from datalogger
Some information is not present > to None
:param data_log_element: element to be parsed
:param _ns: name space
"""
resource_id = equip_element.get("publicID")
description = _tag2obj(equip_element, _ns("description"), str)
manufacturer = _tag2obj(equip_element, _ns("digitizerManufacturer"), str)
model = _tag2obj(equip_element, _ns("digitizerModel"), str)
return obspy.core.inventory.Equipment(
resource_id=resource_id, type=model, description=description,
manufacturer=manufacturer, vendor=None, model=model,
serial_number=None, installation_date=None,
removal_date=None, calibration_dates=None)
def _read_sensor(equip_element, _ns):
"""
Reads equipment information from element
Some information is not present > to None
:param equip_element: element to be parsed
:param _ns: name space
"""
# try to read some element tags, most are missing anyway
resource_id = equip_element.get("publicID")
equipment_type = _tag2obj(equip_element, _ns("type"), str)
description = _tag2obj(equip_element, _ns("description"), str)
manufacturer = _tag2obj(equip_element, _ns("manufacturer"), str)
model = _tag2obj(equip_element, _ns("model"), str)
return obspy.core.inventory.Equipment(
resource_id=resource_id, type=equipment_type, description=description,
manufacturer=manufacturer, vendor=None, model=model,
serial_number=None, installation_date=None,
removal_date=None, calibration_dates=None)
def _read_channel(inventory_root, cha_element, _ns):
"""
reads channel element from sc3ml format
:param sta_element: channel element
:param _ns: namespace
"""
code = cha_element.get("code")
# Information is also kept within the parent <sensorLocation> element
sen_loc_element = cha_element.getparent()
location_code = sen_loc_element.get("code")
# get site info from the <sensorLocation> element
longitude = _read_floattype(sen_loc_element, _ns("longitude"), Longitude,
datum=True)
latitude = _read_floattype(sen_loc_element, _ns("latitude"), Latitude,
datum=True)
elevation = _read_floattype(sen_loc_element, _ns("elevation"), Distance,
unit=True)
depth = _read_floattype(cha_element, _ns("depth"), Distance,
unit=True)
# Set values to 0 if they are is missing (see #1816)
if longitude is None:
msg = "Sensor is missing longitude information, using 0.0"
warnings.warn(msg)
longitude = 0
if latitude is None:
msg = "Sensor is missing latitude information, using 0.0"
warnings.warn(msg)
latitude = 0
if elevation is None:
msg = "Sensor is missing elevation information, using 0.0"
warnings.warn(msg)
elevation = 0
if depth is None:
msg = "Channel is missing depth information, using 0.0"
warnings.warn(msg)
depth = 0
channel = obspy.core.inventory.Channel(
code=code, location_code=location_code, latitude=latitude,
longitude=longitude, elevation=elevation, depth=depth)
# obtain the sensorID and link to particular publicID <sensor> element
# in the inventory base node
sensor_id = cha_element.get("sensor")
sensor_element = inventory_root.find(_ns("sensor[@publicID='" + sensor_id +
"']"))
# obtain the poles and zeros responseID and link to particular
# <responsePAZ> publicID element in the inventory base node
if (sensor_element is not None and
sensor_element.get("response") is not None):
response_id = sensor_element.get("response")
response_elements = []
for resp_type in ['responsePAZ', 'responsePolynomial']:
search = "{}[@publicID='{}']".format(resp_type, response_id)
response_elements += inventory_root.findall(_ns(search))
if len(response_elements) == 0:
msg = ("Could not find response tag with public ID "
"'{}'.".format(response_id))
raise obspy.ObsPyException(msg)
elif len(response_elements) > 1:
msg = ("Found multiple matching response tags with the same "
"public ID '{}'.".format(response_id))
raise obspy.ObsPyException(msg)
response_element = response_elements[0]
else:
response_element = None
# obtain the dataloggerID and link to particular <responsePAZ> publicID
# element in the inventory base node
datalogger_id = cha_element.get("datalogger")
search = "datalogger[@publicID='" + datalogger_id + "']"
data_log_element = inventory_root.find(_ns(search))
channel.restricted_status = _get_restricted_status(cha_element, _ns)
# There is no further information in the attributes of <stream>
# Start and end date are included as tags instead
channel.start_date = _tag2obj(cha_element, _ns("start"), obspy.UTCDateTime)
channel.end_date = _tag2obj(cha_element, _ns("end"), obspy.UTCDateTime)
# Determine sample rate (given is a numerator, denominator)
# Assuming numerator is # samples and denominator is # seconds
numerator = _tag2obj(cha_element, _ns("sampleRateNumerator"), int)
denominator = _tag2obj(cha_element, _ns("sampleRateDenominator"), int)
# If numerator is zero, set rate to zero irrespective of the denominator.
# If numerator is non-zero and denominator zero, will raise
# ZeroDivisionError.
rate = numerator / denominator if numerator != 0 else 0
channel.sample_rate_ratio_number_samples = numerator
channel.sample_rate_ratio_number_seconds = denominator
channel.sample_rate = _read_float_var(rate, SampleRate)
if sensor_element is not None:
channel.sensor = _read_sensor(sensor_element, _ns)
if data_log_element is not None:
channel.data_logger = _read_datalogger(data_log_element, _ns)
temp = _read_floattype(data_log_element, _ns("maxClockDrift"),
ClockDrift)
if temp is not None:
if channel.sample_rate != 0.0:
channel.clock_drift_in_seconds_per_sample = \
_read_float_var(temp / channel.sample_rate, ClockDrift)
else:
msg = "Clock drift division by sample rate of 0: " \
"using sec/sample"
warnings.warn(msg)
channel.sample_rate = temp
channel.azimuth = _read_floattype(cha_element, _ns("azimuth"), Azimuth)
channel.dip = _read_floattype(cha_element, _ns("dip"), Dip)
channel.storage_format = _tag2obj(cha_element, _ns("format"), str)
if channel.sample_rate == 0.0:
msg = "Something went hopelessly wrong, found sampling-rate of 0!"
warnings.warn(msg)
# Begin to collect digital/analogue filter chains
# This information is stored as an array in the datalogger element
response_fir_id = []
response_paz_id = []
if data_log_element is not None:
# Find the decimation element with a particular num/denom
decim_element = data_log_element.find(_ns(
"decimation[@sampleRateDenominator='" +
str(int(denominator)) + "'][@sampleRateNumerator='" +
str(int(numerator)) + "']"))
analogue_filter_chain = _tag2obj(decim_element,
_ns("analogueFilterChain"), str)
if analogue_filter_chain is not None:
response_paz_id = analogue_filter_chain.split(" ")
digital_filter_chain = _tag2obj(decim_element,
_ns("digitalFilterChain"), str)
if digital_filter_chain is not None:
response_fir_id = digital_filter_chain.split(" ")
channel.response = _read_response(inventory_root, sensor_element,
response_element, cha_element,
data_log_element, _ns,
channel.sample_rate,
response_fir_id, response_paz_id)
return channel
def _read_instrument_sensitivity(sen_element, cha_element, _ns):
"""
reads the instrument sensitivity (gain) from the sensor and channel element
"""
gain = _tag2obj(cha_element, _ns("gain"), float)
frequency = _tag2obj(cha_element, _ns("gainFrequency"), float)
input_units_name = _tag2obj(sen_element, _ns("unit"), str)
output_units_name = str(None)
sensitivity = obspy.core.inventory.response.InstrumentSensitivity(
value=gain, frequency=frequency,
input_units=input_units_name,
output_units=output_units_name)
# assuming these are equal to frequencyStart/frequencyEnd
sensitivity.frequency_range_start = \
_tag2obj(sen_element, _ns("lowFrequency"), float)
sensitivity.frequency_range_end = \
_tag2obj(sen_element, _ns("highFrequency"), float)
return sensitivity
def _read_response(root, sen_element, resp_element, cha_element,
data_log_element, _ns, samp_rate, fir, analogue):
"""
reads response from sc3ml format
:param
:param _ns: namespace
"""
response = obspy.core.inventory.response.Response()
response.instrument_sensitivity = _read_instrument_sensitivity(
sen_element, cha_element, _ns)
if resp_element is None:
return response
"""
uncomment to include resource id for response (not shown in stationXML)
response.resource_id = resp_element.attrib.get('publicID')
if response.resource_id is not None:
response.resource_id = str(response.resource_id)
"""
# The sampling rate is not given per fir filter as in stationXML
# We are only given a decimation factor per stage, therefore we are
# required to reconstruct the sampling rates at a given stage from
# this chain of factors
# start with the final sampling_rate after all stages are applied
# invert the fir stages to reverse engineer (backwards) the sample rate
# during any fir stage
samp_rate = float(samp_rate)
fir_stage_rates = []
if len(fir):
fir = fir[::-1]
for fir_id in fir:
# get the particular fir stage decimation factor
# multiply the decimated sample rate by this factor
search = "responseFIR[@publicID='" + fir_id + "']"
fir_element = root.find(_ns(search))
if fir_element is None:
continue
dec_fac = _tag2obj(fir_element, _ns("decimationFactor"), int)
if dec_fac is not None and int(dec_fac) != 0:
samp_rate *= dec_fac
fir_stage_rates.append(float(samp_rate))
# Return filter chain to original and also revert the rates
fir = fir[::-1]
fir_stage_rates = fir_stage_rates[::-1]
# Attempt to read stages in the proper order
# sc3ml does not group stages by an ID
# We are required to do stage counting ourselves
stage = 1
# Get the sensor units, default to M/S
sensor_units = _tag2obj(sen_element, _ns("unit"), str)
if sensor_units is None:
msg = "Sensor unit not set, assuming M/S"
warnings.warn(msg)
sensor_units = "M/S"
# Get the first PAZ stage
# Input unit: M/S or M/S**2
# Output unit: V
if resp_element is not None:
paz_response = _read_response_stage(resp_element, _ns, samp_rate,
stage, sensor_units, 'V')
if paz_response is not None:
response.response_stages.append(paz_response)
stage += 1
# Apply analogue filter stages (if any)
# Input unit: V
# Output unit: V
if len(analogue):
for analogue_id in analogue:
search = "responsePAZ[@publicID='" + analogue_id + "']"
analogue_element = root.find(_ns(search))
if analogue_element is None:
msg = ('Analogue responsePAZ not in inventory:'
'%s, stopping before stage %i') % (analogue_id, stage)
warnings.warn(msg)
return response
analogue_response = _read_response_stage(analogue_element, _ns,
samp_rate, stage, 'V',
'V')
if analogue_response is not None:
response.response_stages.append(analogue_response)
stage += 1
# Apply datalogger (digitizer)
# Input unit: V
# Output unit: COUNTS
if data_log_element is not None:
coeff_response = _read_response_stage(data_log_element, _ns,
samp_rate, stage, 'V',
'COUNTS')
if coeff_response is not None:
response.response_stages.append(coeff_response)
stage += 1
# Apply final digital filter stages
# Input unit: COUNTS
# Output unit: COUNTS
for fir_id, rate in zip(fir, fir_stage_rates):
search = "responseFIR[@publicID='" + fir_id + "']"
stage_element = root.find(_ns(search))
if stage_element is None:
msg = ("fir response not in inventory: %s, stopping correction"
"before stage %i") % (fir_id, stage)
warnings.warn(msg)
return response
fir_response = _read_response_stage(stage_element, _ns, rate, stage,
'COUNTS', 'COUNTS')
if fir_response is not None:
response.response_stages.append(fir_response)
stage += 1
return response
def _read_response_stage(stage, _ns, rate, stage_number, input_units,
output_units):
elem_type = stage.tag.split("}")[1]
stage_sequence_number = stage_number
# Obtain the stage gain and frequency
# Default to a gain of 0 and frequency of 0 if missing
stage_gain = _tag2obj(stage, _ns("gain"), float) or 0
stage_gain_frequency = _tag2obj(stage, _ns("gainFrequency"),
float) or float(0.00)
name = stage.get("name")
if name is not None:
name = str(name)
resource_id = stage.get("publicID")
if resource_id is not None:
resource_id = str(resource_id)
# Determine the decimation parameters
# This is dependent on the type of stage
# Decimation delay/correction need to be normalized
if(elem_type == "responseFIR"):
decimation_factor = _tag2obj(stage, _ns("decimationFactor"), int)
if rate != 0.0:
temp = _tag2obj(stage, _ns("delay"), float) / rate
decimation_delay = _read_float_var(temp,
FloatWithUncertaintiesAndUnit,
unit=True)
temp = _tag2obj(stage, _ns("correction"), float) / rate
decimation_corr = _read_float_var(temp,
FloatWithUncertaintiesAndUnit,
unit=True)
else:
decimation_delay = _read_float_var("inf",
FloatWithUncertaintiesAndUnit,
unit=True)
decimation_corr = _read_float_var("inf",
FloatWithUncertaintiesAndUnit,
unit=True)
decimation_input_sample_rate = \
_read_float_var(rate, Frequency)
decimation_offset = int(0)
elif(elem_type == "datalogger"):
decimation_factor = int(1)
decimation_delay = _read_float_var(0.00,
FloatWithUncertaintiesAndUnit,
unit=True)
decimation_corr = _read_float_var(0.00,
FloatWithUncertaintiesAndUnit,
unit=True)
decimation_input_sample_rate = \
_read_float_var(rate, Frequency)
decimation_offset = int(0)
elif(elem_type == "responsePAZ" or elem_type == "responsePolynomial"):
decimation_factor = None
decimation_delay = None
decimation_corr = None
decimation_input_sample_rate = None
decimation_offset = None
else:
raise ValueError("Unknown type of response: " + str(elem_type))
# set up list of for this stage arguments
kwargs = {
"stage_sequence_number": stage_sequence_number,
"input_units": str(input_units),
"output_units": str(output_units),
"input_units_description": None,
"output_units_description": None,
"resource_id": None,
"resource_id2": resource_id,
"stage_gain": stage_gain,
"stage_gain_frequency": stage_gain_frequency,
"name": name,
"description": None,
"decimation_input_sample_rate": decimation_input_sample_rate,
"decimation_factor": decimation_factor,
"decimation_offset": decimation_offset,
"decimation_delay": decimation_delay,
"decimation_correction": decimation_corr
}
# Different processing for different types of responses
# currently supported:
# PAZ
# COEFF
# FIR
# Polynomial response is not supported, could not find example
if(elem_type == 'responsePAZ'):
# read normalization params
normalization_freq = _read_floattype(stage,
_ns("normalizationFrequency"),
Frequency)
normalization_factor = _tag2obj(stage, _ns("normalizationFactor"),
float)
# Parse the type of the transfer function
# A: Laplace (rad)
# B: Laplace (Hz)
# D: digital (z-transform)
pz_transfer_function_type = _tag2obj(stage, _ns("type"), str)
if pz_transfer_function_type == 'A':
pz_transfer_function_type = 'LAPLACE (RADIANS/SECOND)'
elif pz_transfer_function_type == 'B':
pz_transfer_function_type = 'LAPLACE (HERTZ)'
elif pz_transfer_function_type == 'D':
pz_transfer_function_type = 'DIGITAL (Z-TRANSFORM)'
else:
msg = ("Unknown transfer function code %s. Defaulting to Laplace"
"(rad)") % pz_transfer_function_type
warnings.warn(msg)
pz_transfer_function_type = 'LAPLACE (RADIANS/SECOND)'
# Parse string of poles and zeros
# paz are stored as a string in sc3ml
# e.g. (-0.01234,0.01234) (-0.01234,-0.01234)
zeros_array = stage.find(_ns("zeros")).text
poles_array = stage.find(_ns("poles")).text
if zeros_array is not None:
zeros_array = _parse_list_of_complex_string(zeros_array)
else:
zeros_array = []
if poles_array is not None:
poles_array = _parse_list_of_complex_string(poles_array)
else:
poles_array = []
# Keep counter for pole/zero number
cnt = 0
poles = []
zeros = []
for el in poles_array:
poles.append(_tag2pole_or_zero(el, cnt))
cnt += 1
for el in zeros_array:
zeros.append(_tag2pole_or_zero(el, cnt))
cnt += 1
# Return the paz response
return PolesZerosResponseStage(
pz_transfer_function_type=pz_transfer_function_type,
normalization_frequency=normalization_freq,
normalization_factor=normalization_factor, zeros=zeros,
poles=poles, **kwargs)
elif(elem_type == 'datalogger'):
cf_transfer_function_type = "DIGITAL"
numerator = []
denominator = []
return CoefficientsTypeResponseStage(
cf_transfer_function_type=cf_transfer_function_type,
numerator=numerator, denominator=denominator, **kwargs)
elif(elem_type == 'responsePolynomial'):
# Polynomial response (UNTESTED)
# Currently not implemented in ObsPy (20-11-2015)
f_low = None
f_high = None
max_err = None
appr_type = _tag2obj(stage, _ns("approximationType"), str)
appr_low = _tag2obj(stage, _ns("approximationLowerBound"), float)
appr_high = _tag2obj(stage, _ns("approximationUpperBound"), float)
coeffs_str = _tag2obj(stage, _ns("coefficients"), str)
if coeffs_str is not None:
coeffs = coeffs_str.split(" ")
coeffs_float = []
i = 0
# pass additional mapping of coefficient counter
# so that a proper stationXML can be formatted
for c in coeffs:
temp = _read_float_var(c, FilterCoefficient,
additional_mapping={str("number"): i})
coeffs_float.append(temp)
i += 1
return PolynomialResponseStage(
approximation_type=appr_type, frequency_lower_bound=f_low,
frequency_upper_bound=f_high, approximation_lower_bound=appr_low,
approximation_upper_bound=appr_high, maximum_error=max_err,
coefficients=coeffs, **kwargs)
elif(elem_type == 'responseFIR'):
# For the responseFIR obtain the symmetry and
# list of coefficients
coeffs_str = _tag2obj(stage, _ns("coefficients"), str)
coeffs_float = []
if coeffs_str is not None and coeffs_str != 'None':
coeffs = coeffs_str.split()
i = 0
# pass additional mapping of coefficient counter
# so that a proper stationXML can be formatted
for c in coeffs:
temp = _read_float_var(c, FilterCoefficient,
additional_mapping={str("number"): i})
coeffs_float.append(temp)
i += 1
# Write the FIR symmetry to what ObsPy expects
# A: NONE,
# B: ODD,
# C: EVEN
symmetry = _tag2obj(stage, _ns("symmetry"), str)
if(symmetry == 'A'):
symmetry = 'NONE'
elif(symmetry == 'B'):
symmetry = 'ODD'
elif(symmetry == 'C'):
symmetry = 'EVEN'
else:
raise ValueError('Unknown symmetry metric; expected A, B, or C')
return FIRResponseStage(
coefficients=coeffs_float, symmetry=symmetry, **kwargs)
def _tag2pole_or_zero(paz_element, count):
"""
Parses sc3ml poles and zeros
Uncertainties on poles removed, not present in sc3ml.xsd?
Always put to None so no internal conflict
The sanitization removes the first/last parenthesis
and split by comma, real part is 1st, imaginary 2nd
:param paz_element: tuple of poles or zeros e.g. ('12320', '23020')
"""
real, imag = map(float, paz_element)
if real is not None or imag is not None:
real = real or 0
imag = imag or 0
x = ComplexWithUncertainties(real, imag)
x.upper_uncertainty = None
x.upper_uncertainty = None
x.number = count
return x
def _read_float_var(elem, cls, unit=False, datum=False, additional_mapping={}):
"""
function to read floattype to cls object (based on _read_floattype)
normally ObsPy would read this directly from a tag, but with different
tag names this is no longer possible; instead we just pass the value
and not the tag name. We always set the unit/datum/uncertainties to None
because they are not provided by sc3ml ?
:param elem: float value to be converted
:param cls: obspy.core.inventory class
"""
try:
convert = float(elem)
except Exception:
warnings.warn(
"Encountered a value '%s' which could not be converted to a "
"float. Will be skipped. Please contact to report this "
"issue." % elem,
UserWarning)
return None
if math.isnan(convert):
warnings.warn("'%s' has a value of NaN. It will be skipped." %
elem, UserWarning)
return None
obj = cls(convert)
if unit:
obj.unit = None
if datum:
obj.datum = None
obj.lower_uncertainty = None
obj.upper_uncertainty = None
for key1, key2 in additional_mapping.items():
setattr(obj, key1, key2)
return obj
| python |
"""
Code taken from:
https://uvadlc-notebooks.readthedocs.io/en/latest/tutorial_notebooks/tutorial11/NF_image_modeling.html#Normalizing-Flows-as-generative-model
https://github.com/didriknielsen/survae_flows/blob/master/survae/transforms/surjections/slice.py
"""
from typing import Iterable, List
from FrEIA.modules import InvertibleModule
import torch
from nflows.utils import sum_except_batch
import numpy as np
class SplitPrior(InvertibleModule):
"""
A simple slice layer which factors out some elements and returns
the remaining elements for further transformation.
This is useful for constructing multi-scale architectures [1].
References:
[1] Density estimation using Real NVP,
Dinh et al., 2017, https://arxiv.org/abs/1605.08803
"""
def __init__(self, dims_in: Iterable[List[int]], prior):
super().__init__(dims_in)
self.prior = prior
# self.num_keep = num_keep
# def split_input(self, input):
# split_proportions = (self.num_keep, input.shape[self.dim] - self.num_keep)
# return torch.split(input, split_proportions, dim=self.dim)
def forward(self, x, c=[], rev=False, jac=True):
x = x[0]
if rev:
x_split = self.prior.sample(x.shape)
z = torch.cat([x, x_split], dim=1)
ldj = self.prior.log_prob(x_split)
else:
# split inputs
# z, z_split = self.split_input(x)
z, z_split = torch.chunk(x, 2, dim=1)
ldj = self.prior.log_prob(z_split)
ldj = sum_except_batch(ldj)
return (z,), ldj
def output_dims(self, input_dims):
if len(input_dims[0]) == 1:
d = input_dims[0]
new_dims = d // 2
elif len(input_dims[0]) == 3:
c, h, w = input_dims[0]
new_dims = (c // 2, h, w)
else:
raise ValueError("Errrr")
return [
new_dims,
]
class GeneralizedSplitPrior(InvertibleModule):
"""
A simple slice layer which factors out some elements and returns
the remaining elements for further transformation.
This is useful for constructing multi-scale architectures [1].
References:
[1] Density estimation using Real NVP,
Dinh et al., 2017, https://arxiv.org/abs/1605.08803
"""
def __init__(
self, dims_in: Iterable[List[int]], prior, split: int, split_dim: int = 0
):
super().__init__(dims_in)
self.prior = prior
if isinstance(split, int):
# get number of dimensions in sliced dimension
slice_dim = dims_in[0][split_dim]
# number of dimensions to keep
self.num_keep = split
# number of dimensions to remove
self.num_split = slice_dim - split
# the dimension for the split
self.split_dim = split_dim
elif isinstance(split, list) or isinstance(split, tuple):
# get number of dimensions in sliced dimension
slice_dim = dims_in[0][split_dim]
msg = f"splits ({split}) are not equal to total dims ({slice_dim})"
assert slice_dim == sum(list(split)), msg
# number of dimensions to keep
self.num_keep = split[0]
# number of dimensions to remove
self.num_split = split[1]
# the dimension for the split
self.split_dim = split_dim
else:
raise ValueError(f"Unrecognized split type: {split}")
# self.keep_dim
def split_input(self, input):
# split_proportions = (self.num_keep, input.shape[self.split_dim] - self.num_keep)
return torch.split(
input, (self.num_keep, self.num_split), dim=self.split_dim + 1
)
def forward(self, x, c=[], rev=False, jac=True):
x = x[0]
if rev:
# get dimensions
input_shape = list(x.shape)
# replace input shape with correct one (include batch dim)
input_shape[self.split_dim + 1] = self.num_split
# sample from latent dim
x_split = self.prior.sample(tuple(input_shape))
z = torch.cat([x, x_split], dim=1)
print(z.shape)
ldj = self.prior.log_prob(x_split)
else:
# split inputs
# z, z_split = self.split_input(x)
z, z_split = self.split_input(x)
ldj = self.prior.log_prob(z_split)
ldj = sum_except_batch(ldj)
return (z,), ldj
def output_dims(self, input_dims):
if len(input_dims[0]) == 1:
new_dims = list(input_dims[0])
new_dims[self.split_dim] = self.num_keep
elif len(input_dims[0]) == 3:
new_dims = list(input_dims[0])
new_dims[self.split_dim] = self.num_keep
else:
raise ValueError("Errrr")
return [
tuple(new_dims),
]
| python |
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 12:39:40 2021
@author: Clau
Paper: Energy sufficiency (SDEWES LA 2022)
User: Public lighting - LOWLANDS
"""
from core import User, np
User_list = []
#Definig users
PL = User("Public lighting ", 1)
User_list.append(PL)
#Appliances
PL_lamp_post = PL.Appliance(PL,1,40,2,310,0,300, 'yes', flat = 'yes')
PL_lamp_post.windows([0,362],[1082,1440],0.1) | python |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df_train = pd.read_csv('train.csv')
country = df_train['Country/Region']
country_set = list(set(country))
country_set = sorted(country_set)
province = df_train['Province/State']
for i in range(len(province)):
if(pd.isnull(province[i])):
province[i] = country[i]
province_set = list(set(province))
date = df_train['Date']
for i in range(len(date)):
dt = date[i]
mm = dt[5:7]
dd = dt[8:10]
mm= int(mm)
dd = int(dd)
if(mm==1):
day = dd
elif(mm==2):
day = 31+dd
elif(mm==3):
day = 31+29+dd
date[i] = day
date_set = sorted(list(set(date)))
confirm = df_train['ConfirmedCases']
fatal = df_train['Fatalities']
# In[3]:
k=0
key = province[0]
i = 0
l = len(province)
prov_confirm = []
prov_fatal = []
while(i < l):
if(key==province[i]):
prov_confirm.append(confirm[i])
prov_fatal.append(fatal[i])
i+=1
else:
plt.figure(k+1)
plt.plot(date_set, prov_confirm, label='Confimed cases', markerfacecolor = 'blue')
plt.plot(date_set, prov_fatal, label='Fatalities', markerfacecolor = 'red')
plt.xlabel('Day')
plt.ylabel('count')
plt.legend(loc='upper left')
plt.grid(True,linewidth=0.5,color='g', linestyle='--')
if(key == country[i-1]):
plt.title(key)
figname = '/home/pinaki/Desktop/covid19_jan_to_mar/' + key + '.png'
else:
plt.title(key+' / '+country[i-1])
figname = '/home/pinaki/Desktop/covid19_jan_to_mar/' + key + '-' + country[i-1] + '.png'
plt.savefig(figname, dpi=80)
plt.show()
k+=1
key = province[i]
prov_confirm = []
prov_fatal = []
# In[ ]:
| python |
#!/usr/bin/env python3
import Bio
from Bio.Seq import Seq
my_seq = Seq("ATGAGTACACTAGGGTAA")
print(my_seq)
rc = my_seq.reverse_complement()
pep = my_seq.translate()
print("revcom is", rc)
print("re-revcom is", rc.reverse_complement())
print(pep)
| python |
import loadgenome as lg
import parse as prs
import makepdf as mpdf
import sys, getopt
#print lg.loadgen("sample genomes/23andme_sample.txt")
#
def main(argv):
input_file = ''
output_file = ''
usage = 'Usage: python main.py -i <input_file> -o <output_file>'
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
print(usage)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print(usage)
sys.exit()
elif opt in ("-i", "--ifile"):
input_file = arg
elif opt in ("-o", "--ofile"):
output_file = arg
if not input_file:
print(usage)
sys.exit(2)
elif not output_file:
output_file = 'my_results'
mpdf.go(input_file, output_file)
if __name__ =='__main__':
main(sys.argv[1:])
| python |
import cv2
import numpy as np
import os
from glob import glob
#imagefiles = sorted(glob('./inputs/*.jpg'))
imagefiles = glob('./inputs/*.jpg')
images = []
for filename in imagefiles:
img = cv2.imread(filename)
images.append(img)
stitcher = cv2.Stitcher.create()
_, res = stitcher.stitch(images)
cv2.imshow('Panorama', res[100:-100,50:-50])
cv2.waitKey()
| python |
import streamlit as st
import time
from helpers import *
from streamlit.script_runner import RerunException
@st.cache(suppress_st_warning=True) # 👈 Changed this
def expensive_computation(a, b):
# 👇 Added this
st.write("Cache miss: expensive_computation(", a, ",", b, ") ran")
time.sleep(2) # This makes the function take 2s to run
return a * b
a = 2
b = 21
res = expensive_computation(a, b)
#select_period_input_cache()
st.write("Result:", res)
# Streamlit widgets automatically run the script from top to bottom. Since
# this button is not connected to any other logic, it just causes a plain
# rerun.
st.button("Re-run")
my_slot0 = st.sidebar.empty()
my_slot1 = st.sidebar.empty()
my_slot0.info("Clear cache")
if my_slot1.button("Clear"):
my_slot0.error("Do you really, really, wanna do this?")
if my_slot1.button("Yes I'm ready to rumble"):
caching.clear_cache()
st.balloons()
my_slot0.error("Cache is cleared, please reload to scrape new values")
time.sleep(10)
if my_slot1.button("reload"):
raise RerunException
| python |
{
"targets": [
{
"target_name": "equihashverify",
"dependencies": [
],
"sources": [
"src/blake/blake2-config.h",
"src/blake/blake2-impl.h",
"src/blake/blake2-round.h",
"src/blake/blake2.h",
"src/blake/blake2b-load-sse2.h",
"src/blake/blake2b-load-sse41.h",
"src/blake/blake2b-round.h",
"src/blake/blake2b.cpp",
"src/equi/equihash.cpp",
"src/equi/endian.c",
"equihashverify.cc",
],
"include_dirs": [
"<!(node -e \"require('nan')\")",
],
"defines": [
],
"cflags_cc": [
"-std=c++11",
"-Wl,--whole-archive",
"-fPIC",
"-fexceptions",
],
"link_settings": {
"libraries": [
"-Wl,-rpath,./build/Release/",
]
},
"conditions": [
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES'
}
}]
]
}
]
}
| python |
#Load libraries.
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.metrics.cluster import adjusted_rand_score
import phenograph
import matplotlib.pyplot as plt
from pylab import *
#Write function.
#Accept a dictionary of normalized matrices where the keys are downsample levels (0.1 to 1).
#Would run this method once per normalization method.
#Returns a single list.
def adjusted_rand_score_vector(normalized_matrices):
PCA_model = PCA(n_components=1000,svd_solver='randomized')
PC_column_names = ['PC' + str(i) for i in list(range(1,1001))]
components_normed_data_full = pd.DataFrame(data = PCA_model.fit_transform(normalized_matrices[1]),columns = PC_column_names)
full_communities, full_graph, full_Q = phenograph.cluster(components_normed_data_full)
adj_rand_scores = []
for split in list(np.array(range(1,10))/10):
components_normed_data_downsample = pd.DataFrame(data = PCA_model.fit_transform(normalized_matrices[split]),columns = PC_column_names)
downsample_communities,downsample_graph,downsample_Q = phenograph.cluster(components_normed_data_downsample)
adj_rand_scores.append(adjusted_rand_score(full_communities,downsample_communities))
return adj_rand_scores
| python |
#
# RegistrationManager.py
#
# (c) 2020 by Andreas Kraft
# License: BSD 3-Clause License. See the LICENSE file for further details.
#
# Managing resource / AE registrations
#
from Logging import Logging
from typing import Tuple, List
from Constants import Constants as C
from Configuration import Configuration
from resources.Resource import Resource
import CSE, Utils
from resources import ACP
class RegistrationManager(object):
def __init__(self) -> None:
Logging.log('RegistrationManager initialized')
def shutdown(self) -> None:
Logging.log('RegistrationManager shut down')
#########################################################################
#
# Handle new resources in general
#
def checkResourceCreation(self, resource: Resource, originator: str, parentResource: Resource = None) -> Tuple[str, int, str]:
if resource.ty == C.tAE:
if (originator := self.handleAERegistration(resource, originator, parentResource)) is None: # assigns new originator
return None, C.rcBadRequest, 'cannot register AE'
if resource.ty == C.tCSR:
if not self.handleCSRRegistration(resource, originator):
return None, C.rcBadRequest, 'cannot register CSR'
# Test and set creator attribute.
rc, msg = self.handleCreator(resource, originator)
if rc != C.rcOK:
return None, rc, msg
# ACPI assignments
if resource.ty != C.tAE: # Don't handle AE's, this was already done already in the AE registration
if resource.inheritACP:
del resource['acpi']
elif resource.acpi is None:
# If no ACPI is given, then inherit it from the parent,
# except when the parent is the CSE or the parent acpi is empty , then use the default
if parentResource.ty != C.tCSEBase and parentResource.acpi is not None:
resource['acpi'] = parentResource.acpi
elif parentResource.ty == C.tACP:
pass # Don't assign any ACPI when the parent is an ACP
else:
resource['acpi'] = [ Configuration.get('cse.security.defaultACPI') ] # Set default ACPIRIs
return originator, C.rcOK, None
# Check for (wrongly) set creator attribute as well as assign it to allowed resources.
def handleCreator(self, resource: Resource, originator: str) -> Tuple[int, str]:
# Check whether cr is set. This is wrong
if resource.cr is not None:
Logging.logWarn('Setting "creator" attribute is not allowed.')
return C.rcBadRequest, 'setting "creator" attribute is not allowed'
# Set cr for some of the resource types
if resource.ty in C.tCreatorAllowed:
resource['cr'] = Configuration.get('cse.originator') if originator in ['C', 'S', '', None ] else originator
return C.rcOK, None
def checkResourceDeletion(self, resource: Resource, originator: str) -> Tuple[bool, str, str]:
if resource.ty == C.tAE:
if not self.handleAEDeRegistration(resource):
return False, originator, 'cannot deregister AE'
if resource.ty == C.tCSR:
if not self.handleCSRDeRegistration(resource):
return False, originator, 'cannot deregister CSR'
return True, originator, None
#########################################################################
#
# Handle AE registration
#
def handleAERegistration(self, ae: Resource, originator: str, parentResource: Resource) -> str:
""" This method creates a new originator for the AE registration, depending on the method choosen."""
# check for empty originator and assign something
if originator is None or len(originator) == 0:
originator = 'C'
# Check for allowed orginator
# TODO also allow when there is an ACP?
if not Utils.isAllowedOriginator(originator, Configuration.get('cse.registration.allowedAEOriginators')):
Logging.logDebug('Originator not allowed')
return None
# Assign originator for the AE
if originator == 'C':
originator = Utils.uniqueAEI('C')
elif originator == 'S':
originator = Utils.uniqueAEI('S')
elif originator is not None:
originator = Utils.getIdFromOriginator(originator)
# elif originator is None or len(originator) == 0:
# originator = Utils.uniqueAEI('S')
Logging.logDebug('Registering AE. aei: %s ' % originator)
ae['aei'] = originator # set the aei to the originator
ae['ri'] = Utils.getIdFromOriginator(originator, idOnly=True) # set the ri of the ae to the aei (TS-0001, 10.2.2.2)
# Verify that parent is the CSEBase, else this is an error
if parentResource is None or parentResource.ty != C.tCSEBase:
return None
# Create an ACP for this AE-ID if there is none set
if ae.acpi is None or len(ae.acpi) == 0:
Logging.logDebug('Adding ACP for AE')
cseOriginator = Configuration.get('cse.originator')
# Add ACP for remote CSE to access the own CSE
acpRes = self._createACP(parentResource=parentResource,
rn=C.acpPrefix + ae.rn,
createdByResource=ae.ri,
originators=[ originator, cseOriginator ],
permission=Configuration.get('cse.acp.pv.acop'))
if acpRes[0] is None:
return None
ae['acpi'] = [ acpRes[0].ri ] # Set ACPI (anew)
# Add the AE to the accessCSEBase ACP so that it can at least retrieve the CSEBase
self._addToAccessCSBaseACP(ae.aei)
return originator
#
# Handle AE deregistration
#
def handleAEDeRegistration(self, resource: Resource) -> bool:
# remove the before created ACP, if it exist
Logging.logDebug('DeRegisterung AE. aei: %s ' % resource.aei)
Logging.logDebug('Removing ACP for AE')
acpSrn = '%s/%s%s' % (Configuration.get('cse.rn'), C.acpPrefix, resource.rn)
self._removeACP(srn=acpSrn, resource=resource)
# Remove from accessCSEBaseACP
self._removeFromAccessCSEBaseACP(resource.aei)
return True
#########################################################################
#
# Handle CSR registration
#
def handleCSRRegistration(self, csr: Resource, originator: str) -> bool:
Logging.logDebug('Registering CSR. csi: %s ' % csr['csi'])
# Create an ACP for this CSR if there is none set
Logging.logDebug('Adding ACP for CSR')
cseOriginator = Configuration.get('cse.originator')
localCSE, _, _ = Utils.getCSE()
# Add ACP for remote CSE to access the own CSE
if csr.acpi is None or len(csr.acpi) == 0:
acp = self._createACP(parentResource=localCSE,
rn='%s%s' % (C.acpPrefix, csr.rn),
createdByResource=csr.ri,
originators=[ originator, cseOriginator ],
permission=C.permALL)
if acp[0] is None:
return False
csr['acpi'] = [ acp[0].ri ] # Set ACPI (anew)
# Allow remote CSE to access the CSE, at least to read
self._addToAccessCSBaseACP(originator)
return True
#
# Handle CSR deregistration
#
def handleCSRDeRegistration(self, csr: Resource) -> bool:
Logging.logDebug('DeRegisterung CSR. csi: %s ' % csr['csi'])
# remove the before created ACP, if it exist
Logging.logDebug('Removing ACPs for CSR')
localCSE, _, _ = Utils.getCSE()
# Retrieve CSR ACP
# This might fail (which is okay!), because the ACP was not created during
# the registration of the CSR (identified by the rn that includes the
# name of the CSR)
acpi = '%s/%s%s' % (localCSE.rn, C.acpPrefix, csr.rn)
self._removeACP(srn=acpi, resource=csr)
# Remove from accessCSEBaseACP
self._removeFromAccessCSEBaseACP(csr.csi)
return CSE.dispatcher.updateResource(localCSE, doUpdateCheck=False)[0] is not None
#########################################################################
def _createACP(self, parentResource: Resource = None, rn: str = None, createdByResource: str = None, originators: List[str] = None, permission: int = None) -> Tuple[Resource, int, str]:
""" Create an ACP with some given defaults. """
if parentResource is None or rn is None or originators is None or permission is None:
return None, C.rcBadRequest, 'missing attribute(s)'
# Remove existing ACP with that name first
acpSrn = '%s/%s' % (Configuration.get('cse.rn'), rn)
if (acpRes := CSE.dispatcher.retrieveResource(id=acpSrn))[1] == C.rcOK:
CSE.dispatcher.deleteResource(acpRes[0]) # ignore errors
# Create the ACP
cseOriginator = Configuration.get('cse.originator')
selfPermission = Configuration.get('cse.acp.pvs.acop')
origs = originators.copy()
origs.append(cseOriginator) # always append cse originator
acp = ACP.ACP(pi=parentResource.ri, rn=rn, createdInternally=createdByResource)
acp.addPermission(origs, permission)
acp.addSelfPermission([ cseOriginator ], selfPermission)
if (res := self.checkResourceCreation(acp, cseOriginator, parentResource))[0] is None:
return None, res[1], res[2]
return CSE.dispatcher.createResource(acp, parentResource=parentResource, originator=cseOriginator)
def _removeACP(self, srn: str, resource: Resource) -> Tuple[Resource, int, str]:
""" Remove an ACP created during registration before. """
if (acpRes := CSE.dispatcher.retrieveResource(id=srn))[1] != C.rcOK:
Logging.logWarn('Could not find ACP: %s' % srn) # ACP not found, either not created or already deleted
else:
# only delete the ACP when it was created in the course of AE registration
if (ri := acpRes[0].createdInternally()) is not None and resource.ri == ri:
return CSE.dispatcher.deleteResource(acpRes[0])
return None, C.rcOK, None
def _addToAccessCSBaseACP(self, originator: str) -> None:
if (accessACP := CSE.dispatcher.retrieveResource(Configuration.get('cse.security.csebaseAccessACPI')))[0] is not None:
accessACP[0].addPermission([originator], C.permRETRIEVE)
accessACP[0].dbUpdate()
def _removeFromAccessCSEBaseACP(self, originator: str) -> None:
if (accessACP := CSE.dispatcher.retrieveResource(Configuration.get('cse.security.csebaseAccessACPI')))[0] is not None:
accessACP[0].removePermissionForOriginator(originator)
accessACP[0].dbUpdate()
| python |
#!/bin/python3
import sys
t = int(input().strip())
for a0 in range(t):
n, k = input().strip().split(' ')
n, k = [int(n),int(k)]
a = [int(a_temp) for a_temp in input().strip().split(' ')]
arrived_on_time = 0
for student_arrival in a:
if student_arrival <= 0:
arrived_on_time += 1
if arrived_on_time >= k:
print("NO")
else:
print("YES")
| python |
"""Tests for clover.data_ingest.parsing.parsers.table_structures"""
# pylint: disable=too-many-lines
import copy
import pytest
import sqlalchemy as sa
import sqlalchemy.dialects.postgresql as sa_pg
import sqlalchemy.sql.elements as sa_elements
import sqlalchemy.sql.functions as sa_func
from yalchemy import table_structures
# COLUMN DEFAULTS
def test_column_default_from_dict():
""" Test that we build this ColumnDefault object correctly from a dict """
col_default = table_structures.ColumnDefault.from_dict(
{'type': 'expression', 'value': 'foobar'}
)
assert col_default.type == table_structures.ColumnDefaultType.expression
assert col_default.value == 'foobar'
col_default = table_structures.ColumnDefault.from_dict(
{'type': 'expression', 'value': 'NOW()'},
)
assert col_default.type == table_structures.ColumnDefaultType.expression
assert col_default.value == 'NOW()'
col_default = table_structures.ColumnDefault.from_dict(
{'type': 'sequence', 'value': 'schema.id_seq'},
)
assert col_default.type == table_structures.ColumnDefaultType.sequence
assert col_default.value == 'schema.id_seq'
with pytest.raises(table_structures.InvalidColumnDefault) as exc:
table_structures.ColumnDefault.from_dict(
{'type': 'sequence', 'value': 'unqualified_seq'},
)
assert 'Schema must be specified for sequence types' in str(exc.value)
@pytest.mark.parametrize('sqla_server_default, expected_default_type, expected_value', [
# unquoted string
('foobar',
table_structures.ColumnDefaultType.expression, 'foobar'),
# quoted strings
('"foobar"',
table_structures.ColumnDefaultType.expression, 'foobar'),
("'foobar'",
table_structures.ColumnDefaultType.expression, 'foobar'),
# standard expression
(sa.text('NOW()'),
table_structures.ColumnDefaultType.expression, 'NOW()'),
# plain Sequence
(sa.Sequence('id_seq', schema='schema'),
table_structures.ColumnDefaultType.sequence, 'schema.id_seq'),
# sequences resulting from SQLAlchemy internals or table reflection
(sa.text('nextval(\'"schema.primary_key_seq"\'::regclass)'),
table_structures.ColumnDefaultType.sequence, 'schema.primary_key_seq'),
(sa.text("nextval('schema.primary_key_seq'::regclass)"),
table_structures.ColumnDefaultType.sequence, 'schema.primary_key_seq'),
(sa.text("nextval('schema.primary_key_seq')"),
table_structures.ColumnDefaultType.sequence, 'schema.primary_key_seq'),
(sa_func.next_value(sa.Sequence('primary_key_seq', schema='schema')),
table_structures.ColumnDefaultType.sequence, 'schema.primary_key_seq'),
])
def test_column_default_from_sqla(sqla_server_default, expected_default_type,
expected_value):
col_default = table_structures.ColumnDefault.from_sqla(sqla_server_default)
assert col_default.type == expected_default_type
assert col_default.value == expected_value
def test_column_default_from_invalid_sqla():
with pytest.raises(TypeError) as exc:
table_structures.ColumnDefault.from_sqla(1)
assert 'must be a sequence, string or SQLAlchemy TextClause' in str(exc.value)
with pytest.raises(table_structures.InvalidColumnDefault) as exc:
table_structures.ColumnDefault.from_sqla(sa.text("nextval('primary_key_seq')"))
assert 'Schema must be specified for sequence types' in str(exc.value)
@pytest.mark.parametrize('column ,expected_metadata', [
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, '1'),
{'type': 'expression', 'value': '1'}),
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, "'foobar'"),
{'type': 'expression', 'value': 'foobar'}),
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'NOW()'),
{'type': 'expression', 'value': 'NOW()'}),
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.sequence,
'schema.my_col_seq'),
{'type': 'sequence', 'value': 'schema.my_col_seq'}),
])
def test_column_default_to_dict(column, expected_metadata):
assert column.to_dict() == expected_metadata
def test_column_default_to_sqla():
col_default = table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression,
'foobar')
sa_obj = col_default.to_sqla()
assert isinstance(sa_obj, sa_elements.TextClause)
assert str(sa_obj) == 'foobar'
col_default = table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression,
'NOW()')
sa_obj = col_default.to_sqla()
assert isinstance(sa_obj, sa_elements.TextClause)
assert str(sa_obj) == 'NOW()'
col_default = table_structures.ColumnDefault(
table_structures.ColumnDefaultType.sequence,
'schema.id_seq'
)
sa_obj = col_default.to_sqla()
assert isinstance(sa_obj, sa.Sequence)
assert sa_obj.name == 'id_seq'
assert sa_obj.schema == 'schema'
@pytest.mark.parametrize('left, right, expected', [
# same parameters
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'NOW()'),
table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'NOW()'),
table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'NOW()')),
# different default types
pytest.mark.xfail(
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.sequence,
'myschema.my_seq'),
table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression,
'myschema.my_seq'),
None),
raises=table_structures.MergeError, strict=True),
# different default expressions
pytest.mark.xfail(
(table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'foo'),
table_structures.ColumnDefault(table_structures.ColumnDefaultType.expression, 'bar'),
None),
raises=table_structures.MergeError, strict=True),
])
def test_column_default_or(left, right, expected):
assert (left | right) == expected
def test_column_default_copy():
column_default = table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression, 'foobar'
)
copy1 = copy.copy(column_default)
assert copy1 == column_default
assert copy1 is not column_default
# COLUMNS
def test_column_from_dict():
""" Test that we build this Column object correctly from a dict """
col = table_structures.Column.from_dict(
{'name': 'col1', 'datatype': 'text', 'format': 'abc', 'required': True})
assert col.name == 'col1'
assert col.datatype == 'text'
assert col.format == 'abc'
assert col.required is True
assert col.default is None
col = table_structures.Column.from_dict(
{'name': 'col1', 'datatype': 'text', 'required': False})
assert col.name == 'col1'
assert col.datatype == 'text'
assert col.format is None
assert col.required is False
assert col.default is None
col = table_structures.Column.from_dict(
{'name': 'col1', 'datatype': 'text', 'required': True,
'default': {'value': 'barfoo', 'type': 'expression'}},
)
assert col.name == 'col1'
assert col.datatype == 'text'
assert col.format is None
assert col.required is True
assert col.default == table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression, 'barfoo')
col = table_structures.Column.from_dict(
{'name': 'col1', 'datatype': 'timestamptz', 'required': True,
'default': {'value': 'NOW()', 'type': 'expression'}},
)
assert col.name == 'col1'
assert col.datatype == 'timestamp_with_time_zone'
assert col.format is None
assert col.required is True
assert isinstance(col.default, table_structures.ColumnDefault)
assert col.default.type == table_structures.ColumnDefaultType.expression
assert col.default.value == 'NOW()'
col = table_structures.Column.from_dict(
{'name': 'col1', 'datatype': 'bigint', 'required': True,
'default': {'value': 'schema.primary_key_seq', 'type': 'sequence'}},
)
assert col.name == 'col1'
assert col.datatype == 'bigint'
assert col.format is None
assert col.required is True
assert isinstance(col.default, table_structures.ColumnDefault)
assert col.default.type == table_structures.ColumnDefaultType.sequence
assert col.default.value == 'schema.primary_key_seq'
def test_column_from_sqla():
""" Test that we take a sqlalchemy.Column and make the yalchemy column """
col = table_structures.Column.from_sqla(
sa.Column('col1', sa.VARCHAR(255), primary_key=True, nullable=False))
assert col.name == 'col1'
assert col.datatype == 'varchar'
assert col.format == [255]
assert col.required is True
assert col.default is None
col = table_structures.Column.from_sqla(
sa.Column('col1', sa.Integer, nullable=True))
assert col.name == 'col1'
assert col.datatype == 'integer'
assert col.format is None
assert col.required is False
assert col.default is None
col = table_structures.Column.from_sqla(
sa.Column('col1', sa.Integer, nullable=False,
# SQLAlchemy requires all server-side default primitives to be strings
server_default='1'))
assert col.name == 'col1'
assert col.datatype == 'integer'
assert col.format is None
assert col.required is True
assert col.default == table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression, '1')
col = table_structures.Column.from_sqla(
sa.Column('col1', sa_pg.UUID(), nullable=False,
server_default=sa.text('uuid_generate_v4()')))
assert col.name == 'col1'
assert col.datatype == 'uuid'
assert col.format is None
assert col.required is True
assert col.default == table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression, 'uuid_generate_v4()')
# pylint: disable=no-value-for-parameter
test_sequence = sa.Sequence('primary_key_seq', schema='schema')
col = table_structures.Column.from_sqla(
sa.Column('col1', sa.BigInteger(),
test_sequence,
server_default=test_sequence.next_value(),
nullable=False))
# pylint: enable=no-value-for-parameter
assert col.name == 'col1'
assert col.datatype == 'bigint'
assert col.format is None
assert col.required is True
assert col.default == table_structures.ColumnDefault(
table_structures.ColumnDefaultType.sequence, 'schema.primary_key_seq')
# should fail without schema
test_sequence = sa.Sequence('primary_key_seq')
with pytest.raises(table_structures.InvalidColumnDefault) as exc:
# pylint: disable=no-value-for-parameter
table_structures.Column.from_sqla(
sa.Column('col1', sa.BigInteger(),
test_sequence,
server_default=test_sequence.next_value(),
nullable=False))
# pylint: enable=no-value-for-parameter
assert 'must be qualified with a schema' in str(exc.value)
@pytest.mark.parametrize('column, expected_metadata', [
(table_structures.Column(name='foo', datatype='string', doc='foo doc'),
{'name': 'foo', 'datatype': 'string', 'required': False, 'doc': 'foo doc'}),
(table_structures.Column(name='foo', datatype='string', required=True),
{'name': 'foo', 'datatype': 'string', 'required': True}),
(table_structures.Column(name='bar', datatype='boolean', format_='t|f'),
{'name': 'bar', 'datatype': 'boolean', 'format': 't|f', 'required': False}),
(table_structures.Column(name='zing', datatype='string', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'zang')),
{'name': 'zing', 'datatype': 'string', 'required': True,
'default': {'value': 'zang', 'type': 'expression'}}),
(table_structures.Column(name='zing', datatype='timetz', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'current_time')),
{'name': 'zing', 'datatype': 'time_with_time_zone', 'required': True,
'default': {'value': 'current_time', 'type': 'expression'}}),
(table_structures.Column(name='zang', datatype='bigint', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.sequence,
'schema.primary_key_seq')),
{'name': 'zang', 'datatype': 'bigint', 'required': True,
'default': {'value': 'schema.primary_key_seq', 'type': 'sequence'}}),
])
def test_column_to_dict(column, expected_metadata):
assert column.to_dict() == expected_metadata
@pytest.mark.parametrize(
'yalchemy_col,sa_col,default_sa_obj_cls,default_sa_expression', [
(table_structures.Column(name='my_col', datatype='integer', required=False),
sa.Column('my_col', sa.Integer, nullable=True),
None,
None),
(table_structures.Column(name='my_col', datatype='varchar', format_=[123], required=True),
sa.Column('my_col', sa.VARCHAR(123), nullable=False),
None,
None),
(table_structures.Column(name='my_col', datatype='uuid', format_=[True], required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'uuid_generate_v4()')),
sa.Column('my_col', sa_pg.UUID(as_uuid=True), nullable=False,
server_default=sa.text('uuid_generate_v4()')),
sa_elements.TextClause,
'uuid_generate_v4()'),
(table_structures.Column(name='my_col', datatype='integer', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'1')),
sa.Column('my_col', sa.Integer, nullable=False, server_default='1'),
sa_elements.TextClause,
'1'),
(table_structures.Column(name='my_col', datatype='integer', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.sequence,
'schema.my_col_seq')),
# pylint: disable=no-value-for-parameter
sa.Column('my_col', sa.Integer, sa.Sequence('my_col_seq', schema='schema'),
nullable=False,
server_default=sa.Sequence('my_col_seq', schema='schema').next_value()),
# pylint: enable=no-value-for-parameter
sa_func.next_value,
'schema.my_col_seq'),
]
)
def test_column_to_sqla(yalchemy_col, sa_col, default_sa_obj_cls, default_sa_expression):
""" Test that we turn a yalchemy column into a sqlalchemy column """
generated_col = yalchemy_col.to_sqla()
assert generated_col.name == sa_col.name
assert generated_col.type.compile(sa_pg.dialect()) == \
sa_col.type.compile(sa_pg.dialect())
assert generated_col.nullable == sa_col.nullable
if default_sa_obj_cls is not None:
assert isinstance(generated_col.server_default, sa.DefaultClause)
wrapped_server_default = generated_col.server_default.arg
assert isinstance(wrapped_server_default, default_sa_obj_cls)
if default_sa_obj_cls == sa_elements.TextClause:
value = wrapped_server_default.text
else:
assert default_sa_obj_cls == sa_func.next_value
value = '{}.{}'.format(
wrapped_server_default.sequence.schema,
wrapped_server_default.sequence.name,
)
assert value == default_sa_expression
@pytest.mark.parametrize('left, right, expected', [
(table_structures.Column('foo', 'string'),
table_structures.Column('foo', 'string'),
table_structures.Column('foo', 'string')),
(table_structures.Column('foo', 'string'),
None,
table_structures.Column('foo', 'string')),
(None,
table_structures.Column('foo', 'string'),
table_structures.Column('foo', 'string')),
pytest.mark.xfail(
(table_structures.Column('foo', 'string'),
table_structures.Column('bar', 'string'),
table_structures.Column('foo', 'string')),
raises=table_structures.MergeError, strict=True),
pytest.mark.xfail(
(table_structures.Column('foo', 'string'),
table_structures.Column('foo', 'integer'),
table_structures.Column('foo', 'string')),
raises=table_structures.MergeError, strict=True),
(table_structures.Column('foo', 'boolean', format_='t|f'),
table_structures.Column('foo', 'boolean', format_='t|f'),
table_structures.Column('foo', 'boolean', format_='t|f')),
pytest.mark.xfail(
(table_structures.Column('foo', 'boolean', format_='t|f'),
table_structures.Column('foo', 'boolean', format_='1|0'),
table_structures.Column('foo', 'boolean', format_='t|f')),
raises=table_structures.MergeError, strict=True),
(table_structures.Column(name='foo', datatype='string',
default={'value': 'foo', 'type': 'expression'}),
table_structures.Column(name='foo', datatype='string',
default={'value': 'foo', 'type': 'expression'}),
table_structures.Column(name='foo', datatype='string',
default={'value': 'foo', 'type': 'expression'})),
# different default expressions
pytest.mark.xfail(
(table_structures.Column(name='foo', datatype='string',
default={'value': 'foo', 'type': 'expression'}),
table_structures.Column(name='foo', datatype='string',
default={'value': 'bar', 'type': 'expression'}),
table_structures.Column(name='foo', datatype='string',
default={'value': 'NOTPOSSIBLE', 'type': 'expression'})),
raises=table_structures.MergeError, strict=True),
# # left - no default, right - has default
pytest.mark.xfail(
(table_structures.Column(name='foo', datatype='string'),
table_structures.Column(name='foo', datatype='string',
default={'value': 'foo', 'type': 'expression'}),
table_structures.Column(name='foo', datatype='string',
default={'value': 'NOTPOSSIBLE', 'type': 'expression'})),
raises=table_structures.MergeError, strict=True),
], ids=str)
def test_column_or(left, right, expected):
assert (left | right) == expected
def test_column_copy():
column = table_structures.Column('foo', 'boolean', [], False)
copy1 = copy.copy(column)
assert copy1 == column
assert copy1 is not column
copy2 = copy.deepcopy(column)
assert copy2 == column
assert copy2 is not column
assert copy2.format is not column.format
# FOREIGN KEYS
def test_foreign_key_from_dict():
""" Test that we get a yalchemy foreign key from a dict correctly """
fkey = table_structures.ForeignKey.from_dict(
{'column': 'user_id', 'remote_table': 'user', 'remote_column': 'users'})
assert fkey.column == 'user_id'
assert fkey.remote_table == 'user'
assert fkey.remote_column == 'users'
def test_foreign_key_from_sqla():
""" Test that we take a sqlalchemy.ForeignKeyConstraints and
make the yalchemy foreign key """
fkey = table_structures.ForeignKey.from_sqla(
sa.ForeignKeyConstraint(['test_col'], ['other_table.other_col']))
assert fkey.column == 'test_col'
assert fkey.remote_table == 'other_table'
assert fkey.remote_column == 'other_col'
@pytest.mark.parametrize('fkey, expected_metadata', [
(table_structures.ForeignKey('other_foo', 'other', 'foo'),
{'column': 'other_foo',
'remote_table': 'other', 'remote_column': 'foo'}),
(table_structures.ForeignKey(['other_foo', 'other_bar'], 'other', ['foo', 'bar']),
{'column': ['other_foo', 'other_bar'],
'remote_table': 'other', 'remote_column': ['foo', 'bar']}),
])
def test_foreign_key_to_dict(fkey, expected_metadata):
assert fkey.to_dict() == expected_metadata
def test_foreign_key_to_sqla():
""" Test that we make the ForeignKeyConstraint correctly in sqlalchemy """
fkey_obj = table_structures.ForeignKey('test_col', 'other_table', 'other_col')
fkey = fkey_obj.to_sqla()
assert isinstance(fkey, sa.ForeignKeyConstraint)
assert len(fkey.elements) == 1
assert fkey.column_keys == ['test_col']
assert fkey.elements[0].target_fullname == 'other_table.other_col'
def test_foreign_key_hasing():
fkey1 = table_structures.ForeignKey('foo', 'bar', 'baz')
fkey2 = table_structures.ForeignKey('foo', 'bar', 'baz')
assert {fkey1: 1}[fkey2] == 1
assert {fkey1} == {fkey2}
assert {fkey1, fkey2} == {fkey1}
# INDEXES
@pytest.mark.parametrize('idx, expected_metadata', [
(table_structures.Index(['col_1']),
{'columns': ['col_1']}),
(table_structures.Index(['col_1', 'col_2']),
{'columns': ['col_1', 'col_2']}),
])
def test_index_to_dict(idx, expected_metadata):
assert idx.to_dict() == expected_metadata
def test_index_from_dict():
""" Test that we get a yalchemy Index from a dict correctly """
index = table_structures.Index.from_dict(
{'columns': ['col1', 'col2']})
assert index.columns == ['col1', 'col2']
def test_index_from_sqla():
""" Test that we take a sqlalchemy.Index and make the yalchemy Index """
index = table_structures.Index.from_sqla(
sa.Index('some_index', 'a_col', 'another_col'))
assert index.columns == ['a_col', 'another_col']
def test_index_to_sqla_unnamed():
""" Test that we make the sa.Index correctly from a yalchemy Index """
index_obj = table_structures.Index(columns=['col1', 'col2'])
index = index_obj.to_sqla(table_name='123')
assert isinstance(index, sa.Index)
# this is the correct hash for this table + column names
assert index.name == 'ix__18122589__123'
assert set(index.expressions) == {'col1', 'col2'}
def test_index_to_sqla_named():
""" Test that we make the sa.Index correctly from a yalchemy Index """
index_obj = table_structures.Index(columns=['col1', 'col2'], name='my_index')
index = index_obj.to_sqla(table_name='123')
assert isinstance(index, sa.Index)
assert index.name == 'my_index'
assert set(index.expressions) == {'col1', 'col2'}
def test_index_hashing():
idx1 = table_structures.Index(['col_1', 'col_2'])
idx2 = table_structures.Index(['col_1', 'col_2'])
assert {idx1: 1}[idx2] == 1
assert {idx1} == {idx2}
assert {idx1, idx2} == {idx1}
def test_index_str_repr():
idx1 = table_structures.Index(['col_1', 'col_2'])
assert str(idx1) == "Index(columns=['col_1', 'col_2'])"
idx2 = table_structures.Index(['col_1', 'col_2'], name='my_fixed_name')
assert str(idx2) == "Index(columns=['col_1', 'col_2'], name='my_fixed_name')"
# UNIQUE CONSTRAINTS
@pytest.mark.parametrize('constraint, expected_metadata', [
(table_structures.UniqueConstraint(['col_1']),
{'columns': ['col_1']}),
(table_structures.UniqueConstraint(['col_2', 'col_1']),
{'columns': ['col_2', 'col_1']}),
])
def test_unique_to_dict(constraint, expected_metadata):
assert constraint.to_dict() == expected_metadata
def test_unique_from_dict():
""" Test that we get a yalchemy UniqueConstraint from a dict correctly """
constraint = table_structures.UniqueConstraint.from_dict(
{'columns': ['col1', 'col2']})
assert constraint.columns == ['col1', 'col2']
def test_unique_from_sqla():
""" Test that we take a sqlalchemy.UniqueConstraint and make the yalchemy UniqueConstraint """
# unique constraint needs to be bound to a table
sa_table = sa.Table(
'test_table',
sa.MetaData(),
sa.Column('a_col', sa.Integer, primary_key=True),
sa.Column('another_col', sa.Text),
sa.UniqueConstraint('a_col', 'another_col', name='some_constraint'),
schema='test_schema')
unique_constraint = next(
c for c in sa_table.constraints if isinstance(c, sa.UniqueConstraint)
) # pragma: no cover
constraint = table_structures.UniqueConstraint.from_sqla(unique_constraint)
assert constraint.columns == ['a_col', 'another_col']
def test_unique_to_sqla_unnamed():
""" Test that we make the sa.UniqueConstraint correctly from a yalchemy UniqueConstraint """
constraint_obj = table_structures.UniqueConstraint(columns=['col1', 'col2'])
constraint = constraint_obj.to_sqla(table_name='123')
assert isinstance(constraint, sa.UniqueConstraint)
# this is the correct hash for this table + column names
assert constraint.name == 'uq__18122589__123'
# must be bound to a table to verify the resulting columns
sa_table = sa.Table( # noqa: F841 # pylint: disable=unused-variable
'test_table',
sa.MetaData(),
sa.Column('col1', sa.Integer),
sa.Column('col2', sa.Integer),
constraint,
schema='test_schema')
assert {c.name for c in constraint.columns} == {'col1', 'col2'}
def test_unique_to_sqla_named():
""" Test that we make the sa.UniqueConstraint correctly from a yalchemy UniqueConstraint """
constraint_obj = table_structures.UniqueConstraint(
columns=['col1', 'col2'],
name='my_constraint'
)
constraint = constraint_obj.to_sqla(table_name='123')
assert isinstance(constraint, sa.UniqueConstraint)
assert constraint.name == 'my_constraint'
# must be bound to a table to verify the resulting columns
sa_table = sa.Table( # noqa: F841 # pylint: disable=unused-variable
'test_table',
sa.MetaData(),
sa.Column('col1', sa.Integer),
sa.Column('col2', sa.Integer),
constraint,
schema='test_schema')
assert {c.name for c in constraint.columns} == {'col1', 'col2'}
def test_unique_hashing():
unique1 = table_structures.UniqueConstraint(['col_1', 'col_2'])
unique2 = table_structures.UniqueConstraint(['col_1', 'col_2'])
assert {unique1: 1}[unique2] == 1
assert {unique1} == {unique2}
assert {unique1, unique2} == {unique1}
def test_unique_str_repr():
idx1 = table_structures.UniqueConstraint(['col_1', 'col_2'])
assert str(idx1) == "UniqueConstraint(columns=['col_1', 'col_2'])"
idx2 = table_structures.UniqueConstraint(['col_1', 'col_2'], name='my_fixed_name')
assert str(idx2) == "UniqueConstraint(columns=['col_1', 'col_2'], name='my_fixed_name')"
# CheckConstraint
@pytest.mark.parametrize('constraint, expected_metadata', [
(table_structures.CheckConstraint('check1', 'col1 > col2'),
{'name': 'check1', 'check': 'col1 > col2'}),
(table_structures.CheckConstraint('check2', '((col1 == col2))'),
{'name': 'check2', 'check': '((col1 == col2))'}),
])
def test_constraint_to_dict(constraint, expected_metadata):
""" Test that we convert a yalchemy CheckConstraint
into the proper dict format """
assert constraint.to_dict() == expected_metadata
def test_constraint_from_dict():
""" Test that we get a yalchemy CheckConstraint from a dict correctly """
constraint = table_structures.CheckConstraint.from_dict(
{'name': 'check1', 'check': 'col1 > col2'})
assert constraint.name == 'check1'
assert constraint.check == 'col1 > col2'
def test_constraint_from_sqla(transacted_postgresql_db):
""" Test that we take a sqlalchemy.Column and make the yalchemy CheckConstraint """
constraint = table_structures.CheckConstraint.from_sqla(
sa.CheckConstraint('col1 > col2', name='a_check'))
assert constraint.name == 'a_check'
assert constraint.check == 'col1 > col2'
# test from one without a name
constraint = table_structures.CheckConstraint.from_sqla(
sa.CheckConstraint('col1 > col2'))
assert constraint.name is None
assert constraint.check == 'col1 > col2'
# test when sql-alchemy reflection calls the name '_unnamed_'
constraint = table_structures.CheckConstraint.from_sqla(
sa.CheckConstraint(
'col1 > col2',
name=sa.sql.elements._defer_none_name(value='_unnamed_'),
)
)
assert constraint.name is None
assert constraint.check == 'col1 > col2'
def test_constraint_to_sqla():
""" Test that we make the sa.CheckConstraint correctly from a yalchemy CheckConstraint """
constraint_obj = table_structures.CheckConstraint(name='check1', check='col1 < col2')
constraint = constraint_obj.to_sqla()
assert isinstance(constraint, sa.CheckConstraint)
assert constraint.name == 'check1'
assert str(constraint.sqltext) == 'col1 < col2'
def test_constraint_hasing():
con1 = table_structures.CheckConstraint('check1', 'col1 > col2')
con2 = table_structures.CheckConstraint('check1', 'col1 > col2')
assert {con1: 1}[con2] == 1
assert {con1} == {con2}
assert {con1, con2} == {con1}
# TABLES
def test_table_from_dict():
""" Test that we build a yalchemy Table from a dict """
table_dict = {
'name': 'my_table',
'schema': 'schema',
'columns': [
{'name': 'col1', 'datatype': 'varchar', 'format': [123], 'required': False},
{'name': 'col2', 'datatype': 'integer', 'required': True},
{'name': 'col3', 'datatype': 'integer', 'required': True,
'default': {'value': '-1', 'type': 'expression'}},
],
'foreign_keys': [
{'column': 'col2', 'remote_table': 'other_table', 'remote_column': 'other_col'}
],
'indexes': [{'columns': ['col1', 'col2']}],
'unique_constraints': [{'columns': ['col3']}],
'primary_keys': ['col2', 'col1']
}
table = table_structures.Table.from_dict(table_dict)
assert table.name == 'my_table'
assert table.schema == 'schema'
assert table.columns == [
table_structures.Column(name='col1', datatype='varchar', format_=[123], required=False),
table_structures.Column(name='col2', datatype='integer', required=True),
table_structures.Column(name='col3', datatype='integer', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'-1')),
]
assert table.foreign_keys == {
table_structures.ForeignKey(
column='col2', remote_table='other_table', remote_column='other_col'),
}
assert table.indexes == {
table_structures.Index(columns=['col1', 'col2'])
}
assert table.unique_constraints == {
table_structures.UniqueConstraint(['col3']),
}
assert table.primary_keys == ['col2', 'col1']
def test_table_from_sqla():
""" Test that we take a SQL Alchemy table and make a Table structure """
sa_table = sa.Table(
'test_table',
sa.MetaData(),
sa.Column('col1', sa.Integer, primary_key=True),
sa.Column('col2', sa.Text),
sa.Index('my_index', 'col1', 'col2'),
sa.ForeignKeyConstraint(['col1'], ['other_table.other_col']),
sa.UniqueConstraint('col2'),
sa.CheckConstraint('col1::text != col2', name='check1'),
schema='test_schema')
table = table_structures.Table.from_sqla(sa_table)
assert table.name == 'test_table'
assert table.schema == 'test_schema'
assert table.columns == [
table_structures.Column(name='col1', datatype='integer', format_=None, required=True),
table_structures.Column(name='col2', datatype='text', format_=None, required=False),
]
assert table.foreign_keys == {
table_structures.ForeignKey(
column='col1', remote_table='other_table', remote_column='other_col'),
}
assert table.indexes == {
table_structures.Index(columns=['col1', 'col2'])
}
assert table.primary_keys == ['col1']
assert table.unique_constraints == {
table_structures.UniqueConstraint(['col2'])
}
assert table.check_constraints == {
table_structures.CheckConstraint(
'check1', 'col1::text != col2')
}
def test_table_from_sqla_equality_from_to_yaml(transacted_postgresql_db):
"""
Test that a Table structure made from a reflected SQLAlchemy table is equal to the original.
Converts the table from and to yaml to ensure it can be serialized properly
"""
metadata = sa.MetaData(bind=transacted_postgresql_db.connection)
transacted_postgresql_db.connection.execute('''
create schema schema;
create table schema.other_table (
other_col integer unique
);
create sequence schema.my_table_col6_seq increment by 1 no minvalue no maxvalue;
create table schema.my_table (
col1 varchar(123),
col2 integer not null primary key references schema.other_table (other_col),
col3 integer not null unique,
col4 timestamp with time zone not null default now(),
col5 varchar(1) not null default 'Y',
-- fully defined default sequence
col6 integer not null default nextval('schema.my_table_col6_seq'::regclass),
-- default sequence shorthand
col7 serial
constraint check1 check ((col1 != 'value')),
constraint check2 check ((col1 != 'value') and (col2 != 0))
);
create index idx ON schema.my_table (col1, col2);
''')
table_dict = {
'name': 'my_table',
'schema': 'schema',
'columns': [
{'name': 'col1', 'datatype': 'varchar', 'format': [123], 'required': False},
{'name': 'col2', 'datatype': 'integer', 'required': True},
{'name': 'col3', 'datatype': 'integer', 'required': True},
{'name': 'col4', 'datatype': 'timestamptz', 'required': True,
'default': {'value': 'now()', 'type': 'expression'}},
{'name': 'col5', 'datatype': 'varchar', 'format': [1], 'required': True,
# SQLAlchemy includes explicit cast when reflecting plain string defaults
'default': {'value': "'Y'::character varying", 'type': 'expression'}},
{'name': 'col6', 'datatype': 'integer', 'required': True,
'default': {'value': 'schema.my_table_col6_seq', 'type': 'sequence'}},
{'name': 'col7', 'datatype': 'integer', 'required': True,
'default': {'value': 'schema.my_table_col7_seq', 'type': 'sequence'}},
],
'foreign_keys': [
{'column': 'col2', 'remote_table': 'schema.other_table', 'remote_column': 'other_col'}
],
'check_constraints': [
{'name': 'check1', 'check': "(col1 != 'value')"},
{'name': 'check2', 'check': "(col1 != 'value') and (col2 != 0)"}
],
'indexes': [{'columns': ['col1', 'col2']}],
'unique_constraints': [{'columns': ['col3']}],
'primary_keys': ['col2']
}
orig_table = table_structures.Table.from_dict(table_dict)
reflected_sa = sa.Table('my_table', metadata,
schema='schema',
autoload=True, autoload_with=transacted_postgresql_db.connection)
reflected_table_yaml = table_structures.Table.from_sqla(reflected_sa).to_yaml()
reflected_table = table_structures.Table.from_yaml(reflected_table_yaml)
assert reflected_table == orig_table
def test_geography_reflection(transacted_postgresql_db):
"""
Test that a geography column can be properly reflected
"""
metadata = sa.MetaData(bind=transacted_postgresql_db.connection)
transacted_postgresql_db.connection.execute('''
create schema schema;
create table schema.my_table (
zip_geocode geography(Point,4326)
);
''')
table_dict = {
'name': 'my_table',
'schema': 'schema',
'columns': [
{'name': 'zip_geocode', 'datatype': 'geography', 'format': ['point', 4326]}
]
}
orig_table = table_structures.Table.from_dict(table_dict)
reflected_sa = sa.Table('my_table', metadata,
schema='schema',
autoload=True, autoload_with=transacted_postgresql_db.connection)
reflected_table_yaml = table_structures.Table.from_sqla(reflected_sa).to_yaml()
reflected_table = table_structures.Table.from_yaml(reflected_table_yaml)
assert reflected_table == orig_table
def test_create_geography_column(transacted_postgresql_db):
"""
Test that a geograph column can be created in a table when converting a dict to sqlalchemy
"""
metadata = sa.MetaData(bind=transacted_postgresql_db.connection)
table_dict = {
'name': 'my_table',
'schema': 'schema',
'columns': [
{'name': 'zip_geocode', 'datatype': 'geography', 'format': ['point', 4326]}
]
}
table = table_structures.Table.from_dict(table_dict)
sqla_table = table.to_sqla(metadata)
transacted_postgresql_db.connection.execute('CREATE SCHEMA schema;')
sqla_table.create()
def test_table_to_dict():
""" Test that the whole table dict structure gets
created successfully """
table = table_structures.Table(
name='foo',
schema='test',
doc='my doc',
columns=[
table_structures.Column(name='id', datatype='integer', required=True, doc='id doc'),
table_structures.Column(name='other_id', datatype='integer'),
table_structures.Column(name='another_id', datatype='integer'),
table_structures.Column(name='source', datatype='uuid', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'uuid_generate_v4()')),
],
primary_keys=['id', 'another_id', 'other_id'],
foreign_keys=[
table_structures.ForeignKey('other_id', 'other', 'id'),
],
indexes=[
table_structures.Index(['other_id']),
table_structures.Index(['other_id', 'id']),
],
unique_constraints=[
table_structures.UniqueConstraint(['another_id'], name='unique1'),
],
check_constraints=[
table_structures.CheckConstraint('check1', 'id != other_id')
])
assert table.to_dict() == {
'name': 'foo',
'schema': 'test',
'doc': 'my doc',
'columns': [
{'name': 'id', 'datatype': 'integer', 'required': True, 'doc': 'id doc'},
{'name': 'other_id', 'datatype': 'integer', 'required': False},
{'name': 'another_id', 'datatype': 'integer', 'required': False},
{'name': 'source', 'datatype': 'uuid', 'required': True,
'default': {'value': 'uuid_generate_v4()', 'type': 'expression'}},
],
'foreign_keys': [
{'column': 'other_id', 'remote_table': 'other', 'remote_column': 'id'},
],
'indexes': [
{'columns': ['other_id']},
{'columns': ['other_id', 'id']},
],
'unique_constraints': [
{'columns': ['another_id'], 'name': 'unique1'}
],
'primary_keys': ['id', 'another_id', 'other_id'],
'check_constraints': [{'name': 'check1', 'check': 'id != other_id'}]
}
def test_table_to_sqla():
""" Test that we tae a full table_structures.Table """
table_obj = table_structures.Table(
name='a_table',
schema='a_schema',
columns=[
table_structures.Column(name='col1', datatype='varchar', format_=[123],
required=False),
table_structures.Column(name='col2', datatype='integer', required=True),
table_structures.Column(name='col3', datatype='integer'),
table_structures.Column(name='col4', datatype='timestamptz', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.expression,
'now()')),
table_structures.Column(name='col5', datatype='bigint', required=True,
default=table_structures.ColumnDefault(
table_structures.ColumnDefaultType.sequence,
'a_schema.a_table_col5_seq')),
],
foreign_keys=[table_structures.ForeignKey('col2', 'other_table', 'other_col')],
indexes=[table_structures.Index(['col1'])],
primary_keys=['col2'],
unique_constraints=[table_structures.UniqueConstraint(['col3'], name='uq_col3')],
check_constraints=[table_structures.CheckConstraint('check1', 'col1::text != col2')])
meta = sa.MetaData()
sa_table = table_obj.to_sqla(metadata=meta)
assert sa_table.name == 'a_table'
assert sa_table.schema == 'a_schema'
assert {col.name for col in sa_table.c} == {'col1', 'col2', 'col3', 'col4', 'col5'}
assert [i.constraint.columns.keys() for i in sa_table.foreign_keys] == [['col2']]
assert [j.name for i in sa_table.indexes for j in i.expressions] == ['col1']
assert [(c.name, c.sqltext) for c in sa_table.constraints if isinstance(c, sa.CheckConstraint)]
assert [(c.name, [col.name for col in c.columns])
for c in sa_table.constraints if isinstance(c, sa.UniqueConstraint)] == \
[('uq_col3', ['col3'])]
assert sa_table.c.col2.primary_key
assert sa_table.c.col4.server_default
assert str(sa_table.c.col4.server_default.arg) == 'now()'
assert sa_table.c.col5.server_default
assert isinstance(sa_table.c.col5.server_default, sa.DefaultClause)
assert isinstance(sa_table.c.col5.server_default.arg, sa_func.next_value)
assert sa_table.c.col5.server_default.arg.sequence.name == 'a_table_col5_seq'
assert sa_table.c.col5.server_default.arg.sequence.schema == 'a_schema'
assert sa_table.metadata == meta # assert it's the meta I gave it
# test without indexes
sa_table_no_indexes = table_obj.to_sqla(metadata=sa.MetaData(), include_indexes=False)
assert [j.name for i in sa_table_no_indexes.indexes for j in i.expressions] == []
# pylint: disable=pointless-statement,expression-not-assigned
def test_table_or():
table = table_structures.Table(
name='foo',
schema='test',
columns=[
table_structures.Column(name='id', datatype='integer', required=True),
table_structures.Column(name='other_id', datatype='integer'),
table_structures.Column(name='another_id', datatype='integer'),
],
primary_keys=['id'],
foreign_keys=[
table_structures.ForeignKey('other_id', 'other', 'id'),
],
indexes=[
table_structures.Index(['other_id']),
],
unique_constraints=[
table_structures.UniqueConstraint('other_id'),
],
check_constraints=[
table_structures.CheckConstraint('check1', 'id != other_id'),
])
assert (table | None) == table
assert (None | table) == table
other = copy.deepcopy(table)
other.columns = other.columns[:-1] + [
table_structures.Column(name='creation_date', datatype='date')]
merged = table | other
merged_columns = {col.name for col in merged.columns}
assert merged_columns == {'id', 'other_id', 'another_id', 'creation_date'}
other = copy.deepcopy(table)
other.indexes = set()
merged = table | other
assert merged.indexes == table.indexes
other = copy.deepcopy(table)
other.foreign_keys = set()
merged = table | other
assert not merged.foreign_keys
other = copy.deepcopy(table)
other.foreign_keys = {table_structures.ForeignKey('other_id', 'some_other_table', 'id')}
merged = table | other
assert not merged.foreign_keys
other = copy.deepcopy(table)
other.unique_constraints = {table_structures.UniqueConstraint('another_id')}
merged = table | other
assert not merged.unique_constraints
other = copy.deepcopy(table)
other.check_constraints = {table_structures.CheckConstraint('check2', 'other_id != 1')}
merged = table | other
assert not merged.check_constraints
other = copy.deepcopy(table)
other.columns[0].datatype = 'float'
with pytest.raises(table_structures.MergeError):
table | other
other = copy.deepcopy(table)
other.primary_keys = ['other_id']
with pytest.raises(table_structures.MergeError):
table | other
other = copy.deepcopy(table)
other.name = 'bar'
with pytest.raises(table_structures.MergeError):
table | other
with pytest.raises(TypeError):
table | object()
def test_no_column_given():
""" Test that we raise a NoDataType when no data type
is given in the spec. """
table_metadata = {
'name': 'pktable',
'schema': 'test_schema',
'columns': [
{'name': 'id'}, # missing datatype
{'name': 'somestr', 'datatype': 'text'},
],
'primary_keys': 'id',
}
with pytest.raises(table_structures.NoDataType):
table_structures.Table.from_dict(table_metadata)
def test_create_table_primary_key(transacted_postgresql_db):
""" Test that we create the primary keys correctly """
table_metadata = {
'name': 'pktable',
'schema': 'test_schema',
'columns': [
{'name': 'id', 'datatype': 'integer'},
{'name': 'somestr', 'datatype': 'text'},
],
'primary_keys': ['id'],
}
sa_meta = sa.MetaData()
table = table_structures.Table.from_dict(table_metadata)
transacted_postgresql_db.connection.execute('CREATE SCHEMA test_schema')
sa_table = table.to_sqla(metadata=sa_meta)
sa_table.create(transacted_postgresql_db.connection)
assert sa_table.c.id.primary_key
def test_create_table_primary_key_multiple(transacted_postgresql_db):
table_metadata = {
'name': 'pktable',
'schema': 'test_schema',
'columns': [
{'name': 'composite_key_1', 'datatype': 'text'},
{'name': 'composite_key_2', 'datatype': 'text'},
],
'primary_keys': ['composite_key_1', 'composite_key_2'],
}
sa_meta = sa.MetaData()
table = table_structures.Table.from_dict(table_metadata)
transacted_postgresql_db.connection.execute('CREATE SCHEMA test_schema')
sa_table = table.to_sqla(metadata=sa_meta)
sa_table.create(transacted_postgresql_db.connection)
assert sa_table.c.composite_key_1.primary_key
assert sa_table.c.composite_key_2.primary_key
def test_create_table_required_column(transacted_postgresql_db):
table_metadata = {
'name': 'reqcoltable',
'schema': 'test_schema',
'columns': [
{'name': 'required', 'datatype': 'text', 'required': True},
{'name': 'optional', 'datatype': 'text', 'required': False},
{'name': 'other', 'datatype': 'text'},
],
}
sa_meta = sa.MetaData()
table = table_structures.Table.from_dict(table_metadata)
transacted_postgresql_db.connection.execute('CREATE SCHEMA test_schema')
sa_table = table.to_sqla(metadata=sa_meta)
sa_table.create(transacted_postgresql_db.connection)
assert not sa_table.c.required.nullable
assert sa_table.c.optional.nullable
assert sa_table.c.other.nullable
# TABLE SET
def test_tableset_from_dict():
""" Test that we pull up a tableset correctly """
test_dict = {
'tables': [
{'name': 'foo', 'schema': 'test', 'columns': []},
{'name': 'bar', 'schema': 'test', 'columns': []},
],
}
table_set = table_structures.TableSet.from_dict(test_dict)
assert len(table_set.tables) == 2
assert {t.name for t in table_set.tables} == {'foo', 'bar'}
def test_tableset_from_sqla():
""" Test that we can take a list of sqlalchemy tables and
return a table set containing table_structures.Table for
each table """
table_1 = sa.Table('my_table1', sa.MetaData(), schema='test')
table_2 = sa.Table('my_table2', sa.MetaData(), schema='test')
table_set = table_structures.TableSet.from_sqla([table_1, table_2])
assert table_set.to_dict() == {
'tables': [
{'name': 'my_table1', 'schema': 'test', 'columns': []},
{'name': 'my_table2', 'schema': 'test', 'columns': []},
],
}
def test_tableset_to_dict():
""" Test that we create the dict for thet tableset """
table_set = table_structures.TableSet(tables=[
table_structures.Table(name='foo', schema='test'),
table_structures.Table(name='bar', schema='test'),
])
assert table_set.to_dict() == {
'tables': [
{'name': 'foo', 'schema': 'test', 'columns': []},
{'name': 'bar', 'schema': 'test', 'columns': []},
],
}
assert (table_set | None) == table_set
assert (None | table_set) == table_set
assert (table_set | table_set) == table_set
def test_tableset_to_sqla():
""" Test that we take a tableset and return a list of sqlalchemy tables """
table_set = table_structures.TableSet(tables=[
table_structures.Table(name='foo', schema='test'),
table_structures.Table(name='bar', schema='test'),
])
sa_tables = table_set.to_sqla()
assert all(isinstance(t, sa.Table) for t in sa_tables)
assert {t.name for t in sa_tables} == {'foo', 'bar'}
def test_tableset_unique_constraints():
""" Test that we properly add unique constraints to foreign
key targets when calling to_sqla() """
table_set = table_structures.TableSet.from_dict(
{
'tables': [{
'name': 'one',
'schema': 'test_schema',
'columns': [
{'name': 'id', 'datatype': 'integer'},
{'name': 'somestr', 'datatype': 'text'},
],
}, {
'name': 'two',
'schema': 'test_schema',
'columns': [
{'name': 'one_id', 'datatype': 'integer'},
{'name': 'otherstr', 'datatype': 'text'},
],
'foreign_keys': [{
'column': 'one_id',
'remote_table': 'test_schema.one',
'remote_column': 'id',
}],
'indexes': [{'columns': ['one_id']}],
}],
}
)
# make sure it's not there if we don't want it
tables = table_set.to_sqla(add_unique_constraints=False)
table_one = [i for i in tables if i.name == 'one'][0]
assert [i for i in table_one.constraints
if isinstance(i, sa.UniqueConstraint)] == []
# make sure unique constraint is there by default
tables = table_set.to_sqla(metadata=sa.MetaData())
table_one = [i for i in tables if i.name == 'one'][0]
table_one_unique_constraint = [
i for i in table_one.constraints if isinstance(i, sa.UniqueConstraint)
][0]
assert table_one_unique_constraint.columns.keys() == ['id']
| python |
import torch
try:
import torch_kdtree # if built with setuptools
except:
import os, sys; sys.path.append(os.path.join(os.path.dirname(__file__), "../../build")) # if built with cmake
import torch_kdtree
from torch_cluster import radius
from scipy.spatial import cKDTree
from time import time
import numpy as np
if __name__ == "__main__":
NUM = int(2**18)
RADIUS = 0.1
print(f"(python) num = {NUM}, radius = {RADIUS}")
########################################
data = torch.randn([NUM, 3], device="cuda")
t0 = time()
tree = torch_kdtree.torchBuildCUDAKDTree(data)
tree.cpu()
print(f"(python) time for building kdtree, and moving to cpu = {time() - t0}")
data = data.cpu()
########################################
query = torch.randn(NUM, 3)
t0 = time()
index, batch = tree.search_radius(query, RADIUS)
print(f"(python) time for querying on cpu using multithreads = {time() - t0}")
########################################
data_cuda = data.cuda()
data_query = query.cuda()
t0 = time()
index_gt = radius(data_cuda, data_query, r=RADIUS)
print(f"(python) time for querying on gpu using torch_cluster = {time() - t0}")
t0 = time()
index_gt = radius(data, query, r=RADIUS)
print(f"(python) time for querying on cpu using torch_cluster = {time() - t0}")
########################################
t0 = time()
index_gt = cKDTree(data.numpy()).query_ball_point(query.numpy(), r=RADIUS, workers=8)
print(f"(python) time for querying on cpu using cKDTree with 8 threads = {time() - t0}")
########################################
index_gt = torch.from_numpy(np.concatenate(index_gt)).long()
wrong_loc = torch.where(index != index_gt)[0]
print(f"(python) there are {len(wrong_loc)} mismatches in total")
| python |
from abc import ABCMeta, abstractmethod
class AlgebraicClass(metaclass=ABCMeta):
"""
Esta clase agrega estructura de algebra de frobenius a las clases
"""
@abstractmethod
def __repr__(self):
"""
Este metodo permite que se pueda mostrar una clase en pantalla
"""
pass
@abstractmethod
def __eq__(self, other):
"""
Este metodo permite que se pueda hacer la comparacion entre 2 clases
"""
pass
@abstractmethod
def __mul__(self, other):
"""
Este metodo permite que se puedan multiplicar 2 clases
"""
pass
class Printable(object):
"""
This class implements methods to show objects tha have estructure like sets
"""
def __init__(self):
# si en algun momento se agrego un nuevo elemento al conjunto, se debe
# actualizar la variable self.string
self.it_changed = True
# cache for the string representation of the set of elements
self.string = ''
def build_the_string(self):
# En el caso de que se imprima el semigrupo sin haber generado todos
# los elementos, se muestra en pantalla a los generadores
elements = self.elements
# Se muestran los elementos usando la notacion de conjunto
string = '{' + str(elements[0])
for element in elements[1:]:
string += ', ' + str(element)
string += '}'
return string
def __repr__(self):
# en caso de que se haya agregado un nuevo elemento, se debe
# reconstruir el string
if self.it_changed:
self.string = self.build_the_string()
self.it_changed = False
return self.string
| python |
from __future__ import print_function, absolute_import
import argparse
import os.path as osp
import random
import numpy as np
import sys
import time
import shutil
import h5py
from tqdm import tqdm
import torch
from torch import nn
from torch.backends import cudnn
from torch.utils.data import DataLoader
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.utils.data.distributed
from visda import datasets
from visda import models
from visda.evaluators import Evaluator, extract_features
from visda.utils.data import transforms as T
from visda.utils.data import IterLoader
from visda.utils.data.sampler import RandomMultipleGallerySampler, ShuffleBatchSampler
from visda.utils.data.preprocessor import Preprocessor
from visda.utils.logging import Logger
from visda.utils.serialization import load_checkpoint, save_checkpoint, copy_state_dict
from visda.utils.osutils import mkdir_if_missing
from visda.sda.options.test_options import TestOptions
from visda.sda.options.train_options import TrainOptions
from visda.sda.models.test_model import TestModel
from visda.sda.util.visualizer import Visualizer
from visda.sda.models import networks
from visda.sda.util.util import tensor2im, save_image
def get_data(name, data_dir):
dataset = datasets.create(name, data_dir)
return dataset
def get_test_loader(dataset, height, width, batch_size, workers):
normalizer = T.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
test_transformer = T.Compose([
T.Resize((height, width), interpolation=3),
T.ToTensor(),
normalizer
])
testset = sorted(dataset.train)
test_loader = DataLoader(
Preprocessor(testset, root=dataset.images_dir, transform=test_transformer),
batch_size=batch_size, num_workers=workers,
shuffle=False, pin_memory=True)
return test_loader
def main():
args = TrainOptions().parse() # get training argsions
if args.seed is not None:
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
main_worker(args)
def main_worker(args):
global start_epoch, best_mAP_reid, best_mAP_gan
args.gpu = None
args.rank = 0
total_iters = 0 # the total number of training iterations
cudnn.benchmark = True
log_dir = osp.dirname(args.resume)
print("==========\nArgs:{}\n==========".format(args))
mkdir_if_missing(osp.join(log_dir, 'personX_sda', 'image_train'))
# Create data loaders
dataset_source = get_data('personx', args.data_dir)
data_loader = get_test_loader(dataset_source, args.height, args.width, args.batch_size, args.workers)
# Create model
model = TestModel(args) # create a model given args.model and other argsions
model.load_networks('latest',args.resume)
model.eval()
# end = time.time()
with torch.no_grad():
for i, (imgs, fnames, pids, _, _) in enumerate(tqdm(data_loader)):
model.set_input({'A':imgs, 'A_paths':fnames})
model.test()
visuals = model.get_current_visuals() # get image results
for fname, img_tensor in zip(fnames, visuals['fake']):
img_np = tensor2im(img_tensor)
save_image(img_np, osp.join(log_dir, 'personX_sda', 'image_train', osp.basename(fname)))
if __name__ == '__main__':
main()
| python |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import time
import os
import sys
from synchronizers.new_base.eventstep import EventStep
from synchronizers.new_base.modelaccessor import VOLTService, RCORDSubscriber, model_accessor
class SubscriberDhcpEventStep(EventStep):
topics = ["dhcp.events"]
technology = "kafka"
def __init__(self, *args, **kwargs):
super(SubscriberDhcpEventStep, self).__init__(*args, **kwargs)
def get_onu_sn(self, event):
olt_service = VOLTService.objects.first()
onu_sn = olt_service.get_onu_sn_from_openflow(event["deviceId"], event["portNumber"])
if not onu_sn or onu_sn is None:
self.log.exception("dhcp.events: Cannot find onu serial number for this event", kafka_event=event)
raise Exception("dhcp.events: Cannot find onu serial number for this event")
return onu_sn
def process_event(self, event):
value = json.loads(event.value)
onu_sn = self.get_onu_sn(value)
subscriber = RCORDSubscriber.objects.get(onu_device=onu_sn)
self.log.debug("dhcp.events: Got event for subscriber", subscriber=subscriber, event_value=value, onu_sn=onu_sn)
subscriber.ip_address = value["ipAddress"]
subscriber.mac_address = value["macAddress"]
subscriber.save()
| python |
import json
import requests
from .config import BASE_URL, GIST_URL
class Do:
def __init__(self, gist):
self.gist = gist
def getMyID(self,gist_name):
'''
Getting gistID of a gist in order to make the workflow
easy and uninterrupted.
'''
r = requests.get(
'%s'%BASE_URL+'/users/%s/gists' % self.gist.username,
headers=self.gist.header
)
if (r.status_code == 200):
r_text = json.loads(r.text)
limit = len(r.json())
for g,no in zip(r_text, range(0,limit)):
for ka,va in r.json()[no]['files'].items():
if str(va['filename']) == str(gist_name):
return r.json()[no]['id']
return 0
def star(self, **args):
'''
star any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be starred')
r = requests.put(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'id': self.gist_id
}
return response
raise Exception('Gist can\'t be starred')
def unstar(self, **args):
'''
unstar any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be unstarred')
r = requests.delete(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'id': self.gist_id
}
return response
raise Exception('Gist can\'t be unstarred')
def fork(self, **args):
'''
fork any gist by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be forked')
r = requests.post(
'%s'%BASE_URL+'/gists/%s/forks' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 201):
response = {
'id': self.gist_id,
'description': r.json()['description'],
'public': r.json()['public'],
'comments': r.json()['comments']
}
return response
raise Exception('Gist can\'t be forked')
def checkifstar(self, **args):
'''
Check a gist if starred by providing gistID or gistname(for authenticated user)
'''
if 'name' in args:
self.gist_name = args['name']
self.gist_id = self.getMyID(self.gist_name)
elif 'id' in args:
self.gist_id = args['id']
else:
raise Exception('Either provide authenticated user\'s Unambigious Gistname or any unique Gistid to be checked for star')
r = requests.get(
'%s'%BASE_URL+'/gists/%s/star' % self.gist_id,
headers=self.gist.header
)
if (r.status_code == 204):
response = {
'starred': 'True',
'id': self.gist_id
}
else:
response = {
'starred': 'False'
}
return response
| python |
text = """
Alice was beginning to get very tired of sitting by her
sister on the bank, and of having nothing to do: once
or twice she had peeped into the book her sister was
reading, but it had no pictures or conversations in it,
"and what is the use of a book," thought Alice "without
pictures or conversations?"
So she was considering in her own mind (as well as she
could, for the hot day made her feel very sleepy and
stupid), whether the pleasure of making a daisy-chain
would be worth the trouble of getting up and picking
the daisies, when suddenly a White Rabbit with pink
eyes ran close by her.
There was nothing so very remarkable in that; nor did
Alice think it so very much out of the way to hear the
Rabbit say to itself, "Oh dear! Oh dear! I shall be
late!" (when she thought it over afterwards, it occurred
to her that she ought to have wondered at this, but at
the time it all seemed quite natural); but when the
Rabbit actually took a watch out of its waistcoat-pocket,
and looked at it, and then hurried on, Alice started
to her feet, for it flashed across her mind that she had
never before seen a rabbit with either a waistcoat-pocket,
or a watch to take out of it, and burning with curiosity,
she ran across the field after it, and fortunately was
just in time to see it pop down a large rabbit-hole
under the hedge.
"""
counts = {}
for c in "ABCDEFGHIJKLMNOPQRSTUVWXYZ":
counts[c] = 0
for c in text.upper():
if (c >= "A") and (c <= "Z"):
counts[c] = counts[c] + 1
for letter, count in sorted(counts.items()):
print("%s: %d" % (letter, count))
| python |
import unittest
from omniglot.omni import OmnilingualProcessor
from omnilingual import LanguageCode
class TestOmni(unittest.TestCase):
def setUp(self):
self.omni = OmnilingualProcessor(None)
self.maxDiff = None
if __name__ == "__main__":
unittest.main()
| python |
import pytest
import os
import time
import socket
from urllib.parse import urlparse
def is_port_open(hostname, port):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect_ex((hostname, port)) == 0
@pytest.fixture(scope="session")
def ENDPOINT():
return os.environ.get('URI_SERVER', 'http://localhost:8000')
@pytest.fixture(scope="session", autouse=True)
def wait_for_service(ENDPOINT):
"""
Before starting tests
Wait for service to become available
"""
_endpoint = urlparse(ENDPOINT)
for attempt in range(10):
try:
if is_port_open(_endpoint.hostname, _endpoint.port):
return
except Exception as ex:
pass
time.sleep(1)
raise Exception(f"{ENDPOINT} port is not active") # TODO: This does not seem to stop execution of tests?
#request.addfinalizer(finalizer_function)
| python |
# Generated by Django 3.0.1 on 2019-12-25 21:42
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Circle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True, help_text='Fecha y hora en la cual el objeto fue creado', verbose_name='created at')),
('modified', models.DateTimeField(auto_now=True, help_text='Fecha y hora en la cual el objeto fue ultimamente modificado', verbose_name='modified at')),
('name', models.CharField(max_length=140, verbose_name='Nombre del Circulo')),
('slug_name', models.SlugField(unique=True)),
('about', models.CharField(max_length=255, verbose_name='Descripcion del Circulo')),
('picture', models.ImageField(blank=True, null=True, upload_to='circles/pictures')),
('rides_offered', models.PositiveIntegerField(default=0)),
('rides_taken', models.PositiveIntegerField(default=0)),
('verified', models.BooleanField(default=False, help_text='Los círculos verificados también se conocen como comunidades oficiales.', verbose_name='Verificacion de Circulo')),
('is_public', models.BooleanField(default=True, help_text='Los círculos públicos se enumeran en la página principal para que todos sepan sobre su existencia.')),
('is_limited', models.BooleanField(default=False, help_text='Los círculos limitados pueden crecer hasta un número fijo de miembros.', verbose_name='Limitado')),
('members_limit', models.PositiveIntegerField(default=0, help_text='Si el círculo es limitado, este será el límite en el número de miembros.')),
],
options={
'ordering': ['-rides_taken', '-rides_offered'],
'get_latest_by': 'created',
'abstract': False,
},
),
]
| python |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import torch
import tests.helpers.utils as tutils
from pytorch_lightning import Trainer
from pytorch_lightning.strategies import DDPStrategy
from pytorch_lightning.utilities.seed import seed_everything
from tests.helpers.boring_model import BoringModel
from tests.helpers.datamodules import ClassifDataModule
from tests.helpers.runif import RunIf
from tests.strategies.test_dp import CustomClassificationModelDP
@pytest.mark.parametrize(
"trainer_kwargs",
(
pytest.param(dict(accelerator="gpu", devices=1), marks=RunIf(min_cuda_gpus=1)),
pytest.param(dict(strategy="dp", accelerator="gpu", devices=2), marks=RunIf(min_cuda_gpus=2)),
pytest.param(dict(strategy="ddp_spawn", accelerator="gpu", devices=2), marks=RunIf(min_cuda_gpus=2)),
),
)
def test_evaluate(tmpdir, trainer_kwargs):
tutils.set_random_main_port()
seed_everything(1)
dm = ClassifDataModule()
model = CustomClassificationModelDP()
trainer = Trainer(
default_root_dir=tmpdir, max_epochs=2, limit_train_batches=10, limit_val_batches=10, **trainer_kwargs
)
trainer.fit(model, datamodule=dm)
assert "ckpt" in trainer.checkpoint_callback.best_model_path
old_weights = model.layer_0.weight.clone().detach().cpu()
trainer.validate(datamodule=dm)
trainer.test(datamodule=dm)
# make sure weights didn't change
new_weights = model.layer_0.weight.clone().detach().cpu()
torch.testing.assert_allclose(old_weights, new_weights)
def test_model_parallel_setup_called(tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.configure_sharded_model_called = False
self.layer = None
def configure_sharded_model(self):
self.configure_sharded_model_called = True
self.layer = torch.nn.Linear(32, 2)
model = TestModel()
trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=2, limit_val_batches=2, max_epochs=1)
trainer.fit(model)
assert model.configure_sharded_model_called
@pytest.mark.parametrize(
["strategy", "strategy_cls"], [("DDP", DDPStrategy), ("DDP_FIND_UNUSED_PARAMETERS_FALSE", DDPStrategy)]
)
def test_strategy_str_passed_being_case_insensitive(strategy, strategy_cls):
trainer = Trainer(strategy=strategy)
assert isinstance(trainer.strategy, strategy_cls)
| python |
import asyncio
import logging
import signal
import socketio
import urllib
_API_V2_NAMESPACE = '/api/v2/socket_io'
_RECONNECT_ATTEMPTS = 1 # We most commonly get disconnected when the session
# expires, so we don't want to try many times
_LOGGER = logging.getLogger(__name__)
class SmartboxAPIV2Namespace(socketio.AsyncClientNamespace):
def __init__(self, session, namespace, dev_data_callback=None, node_update_callback=None):
super().__init__(namespace)
self._session = session
self._namespace = namespace
self._dev_data_callback = dev_data_callback
self._node_update_callback = node_update_callback
self._namespace_connected = False
self._received_message = False
self._received_dev_data = False
def on_connect(self):
_LOGGER.debug(f"Namespace {self._namespace} connected")
self._namespace_connected = True
async def on_disconnect(self):
_LOGGER.info(f"Namespace {self._namespace} disconnected")
self._namespace_connected = False
self._received_message = False
self._received_dev_data = False
# check if we need to refresh our token
# TODO: public method
if self._session._has_token_expired():
_LOGGER.info("Token expired, disconnecting")
# we need to call disconnect to disconnect all namespaces
await self.disconnect()
@property
def connected(self):
return self._namespace_connected
async def on_dev_data(self, data):
_LOGGER.debug(f"Received dev_data: {data}")
self._received_message = True
self._received_dev_data = True
if self._dev_data_callback is not None:
self._dev_data_callback(data)
async def on_update(self, data):
_LOGGER.debug(f"Received update: {data}")
if not self._received_message:
# The connection is only usable once we've received a message from
# the server (not on the connect event!!!), so we wait to receive
# something before sending our first message
await self.emit('dev_data', namespace=self._namespace)
self._received_message = True
if not self._received_dev_data:
_LOGGER.debug("Dev data not received yet, ignoring update")
return
if self._node_update_callback is not None:
self._node_update_callback(data)
class SocketSession(object):
def __init__(self,
session,
device_id,
dev_data_callback=None,
node_update_callback=None,
verbose=False,
add_sigint_handler=False,
ping_interval=20):
self._session = session
self._device_id = device_id
self._ping_interval = ping_interval
if verbose:
self._sio = socketio.AsyncClient(logger=True,
engineio_logger=True,
reconnection_attempts=_RECONNECT_ATTEMPTS)
else:
logging.getLogger('socketio').setLevel(logging.ERROR)
logging.getLogger('engineio').setLevel(logging.ERROR)
self._sio = socketio.AsyncClient()
self._api_v2_ns = SmartboxAPIV2Namespace(session, _API_V2_NAMESPACE, dev_data_callback, node_update_callback)
self._sio.register_namespace(self._api_v2_ns)
@self._sio.event
async def connect():
_LOGGER.debug("Connected")
if add_sigint_handler:
# engineio sets a signal handler on connect, which means we have to set our
# own in the connect callback if we want to override it
_LOGGER.debug("Adding signal handler")
event_loop = asyncio.get_event_loop()
def sigint_handler():
_LOGGER.debug("Caught SIGINT, cancelling loop")
asyncio.ensure_future(self.cancel())
event_loop.add_signal_handler(signal.SIGINT, sigint_handler)
async def _send_ping(self):
_LOGGER.debug(f"Starting ping task every {self._ping_interval}s")
while True:
await asyncio.sleep(self._ping_interval)
if not self._api_v2_ns.connected:
_LOGGER.debug("Namespace disconnected, not sending ping")
continue
_LOGGER.debug("Sending ping")
await self._sio.send('ping', namespace=_API_V2_NAMESPACE)
async def run(self):
self._ping_task = self._sio.start_background_task(self._send_ping)
# Will loop indefinitely unless our signal handler is set and called
self._loop_should_exit = False
while not self._loop_should_exit:
# TODO: accessors in session
encoded_token = urllib.parse.quote(self._session._access_token, safe='~()*!.\'')
url = f"{self._session._api_host}/?token={encoded_token}&dev_id={self._device_id}"
_LOGGER.debug(f"Connecting to {url}")
await self._sio.connect(url,
namespaces=[f"{_API_V2_NAMESPACE}?token={encoded_token}&dev_id={self._device_id}"])
_LOGGER.debug("Connected")
await self._sio.wait()
_LOGGER.debug("Connection loop exited, checking token")
loop = asyncio.get_event_loop()
await loop.run_in_executor(None, self._session._check_refresh)
await self._sio.disconnect()
async def cancel(self):
_LOGGER.debug("Disconnecting and cancelling tasks")
self._loop_should_exit = True
await self._sio.disconnect()
self._ping_task.cancel()
| python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
from hybridsql_const import PRIMITIVE_TYPES, VALID_PARTITION_TYPES, VALID_ORDER_TYPES, BUILTIN_OP_DICT, SQL_PRESERVED_NAMES
from gen_const_data import random_literal_bool, random_literal_int32, random_literal_int64, \
random_literal_float, random_literal_double, random_literal_string, random_literal_int16, \
random_literal_date, random_literal_timestamp
from hybridsql_param import sample_integer_config
from hybridsql_const import LAST_JOIN_SQL, LAST_JOIN_OP
from hybridsql_param import sample_string_config
class ColumnInfo:
def __init__(self, name, dtype, nullable=True):
'''
ColumnInfo 初始化方法
:param name: 列名
:param dtype: 列类型
:param nullable: 是否为null,默认可以为null
'''
self.name = name
self.dtype = dtype
self.nullable = nullable
class ColumnKey:
def __init__(self,partition_column,order_column):
self.partition_column = partition_column
self.order_column = order_column
def __hash__(self) -> int:
return hash(self.partition_column+":"+self.order_column)
def __eq__(self, other) -> bool:
if isinstance(other, ColumnKey):
return ((self.partition_column == other.partition_column) and (self.order_column == other.order_column))
else:
return False
class ColumnsPool:
def __init__(self, args):
self.args = args
self.name = None
self.id_column = None
self.order_columns = []
self.partition_columns = []
self.normal_columns = []
self.indexs = set()
self.expressions = []
self.output_columns = []
self.join_table = []
self.join_column = []
def get_select_all_sql(self):
return "(SELECT * FROM {}) AS {}".format(self.name, self.name)
def get_select_all_column_sql(self):
return "(SELECT {} FROM {}) AS {}".format(",".join([c.name for c in self.get_all_columns()]), self.name, self.name)
def get_select_column_sql(self, columns:list):
return "(SELECT {} FROM {}) AS {}".format(",".join([c.name for c in columns]), self.name, self.name)
def get_select_sql_by_all_type(self, expect_types:list):
all_columns = self.get_all_columns()
return "(SELECT {} FROM {}) AS {}".format(",".join([c.name for c in all_columns if c.dtype in expect_types]), self.name, self.name)
def get_select_sql_by_type(self, expect_types:list):
all_columns = self.get_all_columns()
columns = []
for c in all_columns:
if c.dtype in expect_types:
columns.append(c)
expect_types.remove(c.dtype)
return "(SELECT {} FROM {}) AS {}".format(",".join([c.name for c in columns]), self.name, self.name)
def get_sub_sql(self):
key = random.randint(1, 2)
select_all_sqls = {
0: self.name,
1: self.get_select_all_sql(),
2: self.get_select_all_column_sql()
}
return select_all_sqls[key]
def get_select_sql_by_type_and_index(self, expect_types:list):
columns = [c for c in self.normal_columns if c.dtype in expect_types]
res = []
if self.id_column is not None:
res.append(self.id_column)
res.extend(self.partition_columns)
res.extend(self.order_columns)
res.extend(columns)
return "SELECT {} FROM {}".format(",".join([c.name for c in res]), self.name)
def get_join_def_string(self, is_sub_select=False):
# LAST_JOIN_SQL = "LAST JOIN ${TABLE_NAME} ORDER BY ${ORDER_COLUMN} ON ${JOIN_EXPR}"
join_expr_op = sample_string_config(self.args.join_expr_op)
join_sqls = []
for table in self.join_table:
sql_string = LAST_JOIN_SQL
if is_sub_select:
sql_string = sql_string.replace("${TABLE_NAME}", table.get_sub_sql())
else:
sql_string = sql_string.replace("${TABLE_NAME}", table.name)
order_name = random.choice(table.order_columns).name
sql_string = sql_string.replace("${ORDER_COLUMN}", table.name+"."+order_name)
join_expr_num = sample_integer_config(self.args.join_expr_num)
on_exprs = []
for i in range(join_expr_num):
join_pk = random.choice(self.partition_columns).name
if i == 0:
op = "="
self.indexs.add(ColumnKey(join_pk, order_name))
table.indexs.add(ColumnKey(join_pk, order_name))
else:
op = random.choice(join_expr_op)
on_expr = self.name+"."+join_pk+" "+op+" "+table.name+"."+join_pk
on_exprs.append(on_expr)
sql_string = sql_string.replace("${JOIN_EXPR}", " and ".join(on_exprs))
join_sqls.append(sql_string)
return " ".join(join_sqls)
def get_all_columns(self):
'''
获取所有列
:return: ColumnInfo的list
'''
res = []
if self.id_column is not None:
res.append(self.id_column)
res.extend(self.partition_columns)
res.extend(self.order_columns)
res.extend(self.normal_columns)
return res
def set_unique_id(self, name, dtype):
'''
设置索引列
:param name: "id"
:param dtype: "int64"
:return: 无返回
'''
self.id_column = ColumnInfo(name, dtype, nullable=False)
def add_order_column(self, name, dtype, nullable=False):
'''
增加排序列
:param name: 列名
:param dtype: 类型
:param nullable: 是否为null,默认不为空
:return:
'''
column = ColumnInfo(name, dtype, nullable=nullable)
self.order_columns.append(column)
return column
def add_partition_column(self, name, dtype, nullable=False):
column = ColumnInfo(name, dtype, nullable=nullable)
self.partition_columns.append(column)
return column
@staticmethod
def sample_index(p):
'''
获取落在某一个概率中的 索引位置
:param p:
:return:
'''
weight = sum(p)
p = [_ / weight for _ in p]
samples = np.random.multinomial(1, p)
return list(samples).index(1)
@staticmethod
def do_create_new_column(prefix, cands, dtype, nullable):
'''
创建一个新的列
:param prefix: 前缀
:param cands: 列的list
:param dtype: 列类型
:param nullable: 是否为null
:return:
'''
#如果类型为空就从pk类型中选择一个
if dtype is None:
dtype = random.choice(PRIMITIVE_TYPES)
#如果是 类型的list 就从list中选择一个
elif isinstance(dtype, list) or isinstance(dtype, set):
dtype = random.choice(dtype)
#如果nullable 不填,默认为true
if nullable is None:
nullable = True
#生成列名
name = prefix + "_" + str(len(cands)) + "_" + str(dtype)
column = ColumnInfo(name, dtype=dtype, nullable=nullable)
# 生成的列添加到集合中
cands.append(column)
return column
def do_sample_column(self, prefix, column_list,
downward=True,
dtype=None,
nullable=None,
allow_const=False,
prob_use_existing=None,
prob_use_new=None,
prob_use_constant=None):
'''
生成一个列样本
:param prefix:
:param column_list:
:param downward:
:param dtype:
:param nullable:
:param allow_const:
:return:
'''
# probabilities for random generate leaf expression
if prob_use_existing is None:
prob_use_existing = self.args.prob_sample_exist_column
if prob_use_new is None:
prob_use_new = self.args.prob_sample_new_column
if prob_use_constant is None:
prob_use_constant = self.args.prob_sample_const_column
probs = [prob_use_existing]
if downward:
probs.append(prob_use_new)
# some data types can not be literal const
if allow_const and dtype not in ["int16", "date", "timestamp"]:
probs.append(prob_use_constant)
idx = self.sample_index(probs)
#idx==0 表示 是prob_use_existing
if idx == 0:
def is_compatible_column(c):
'''
判断采样出的列是否满足nullable和数据类型约束
:param c:
:return:
'''
if nullable is not None and c.nullable != nullable:
return False
elif dtype is not None:
if isinstance(dtype, list) or isinstance(dtype, set):
if c.dtype not in dtype:
return False
elif c.dtype != dtype:
return False
return True
candidates = list(filter(is_compatible_column, column_list))
#如果candidates为0,则创建一个列
if len(candidates) == 0:
if downward:
return self.do_create_new_column(prefix, column_list, dtype, nullable)
else:
return gen_literal_const(dtype, nullable=nullable)
# raise Exception("Candidates is empty, can not create new column in upward mode")
else:
return random.choice(candidates)
elif idx == 1 and downward:
return self.do_create_new_column(prefix, column_list, dtype, nullable)
else:
# 返回的是一个常量
return gen_literal_const(dtype, nullable=False)
def sample_partition_column(self, downward=True, nullable=False, new_pk=True):
'''
pk样本
:param downward:
:param nullable:
:return:
'''
if new_pk:
return self.do_sample_column("pk", self.partition_columns,
downward=downward,
allow_const=False,
dtype=VALID_PARTITION_TYPES,
nullable=nullable,
prob_use_existing=0,
prob_use_new=1,
prob_use_constant=0)
else:
return self.do_sample_column("pk", self.partition_columns,
downward=downward,
allow_const=False,
dtype=VALID_PARTITION_TYPES,
nullable=nullable,
prob_use_existing=1,
prob_use_new=0,
prob_use_constant=0)
def sample_order_column(self, downward=True, nullable=False):
'''
order样本
:param downward:
:param nullable:
:return:
'''
ts_type = sample_integer_config(self.args.ts_type)
order_type = VALID_ORDER_TYPES[ts_type]
return self.do_sample_column("order", self.order_columns,
downward=downward,
allow_const=False,
dtype=order_type,
nullable=nullable)
def sample_column(self, downward=True, dtype=None, nullable=None, allow_const=False):
'''
普通列样本
:param downward:
:param dtype:
:param nullable:
:param allow_const:
:return:
'''
return self.do_sample_column("c", self.normal_columns,
downward=downward,
allow_const=allow_const,
dtype=dtype,
nullable=nullable)
def init_table(self, args, window_defs, udf_defs, downward=True, keep_index=True, new_pk = True):
# sample expressions
expr_num = sample_integer_config(args.expr_num)
expr_depth = sample_integer_config(args.expr_depth)
table_pk_num = sample_integer_config(args.table_pk_num)
table_ts_num = sample_integer_config(args.table_ts_num)
output_names = []
pk_columns = []
order_columns = []
if downward:
if len(self.partition_columns) > 0:
pk_columns = self.partition_columns
else:
for i in range(table_pk_num):
pk_column = self.sample_partition_column(
downward=downward, nullable=args.index_nullable)
pk_columns.append(pk_column)
if len(self.order_columns)>0:
order_columns = self.order_columns
else:
for i in range(table_ts_num):
order_column = self.sample_order_column(
downward=downward, nullable=args.index_nullable)
if order_column not in order_columns:
order_columns.append(order_column)
else:
pk_columns = self.partition_columns
order_columns = self.order_columns
if keep_index:
# unique idx
index_column = self.id_column
if index_column is not None:
self.expressions.append(TypedExpr(index_column.name, index_column.dtype))
output_names.append(index_column.name)
# partition
for pk_column in pk_columns:
self.expressions.append(TypedExpr(pk_column.name, pk_column.dtype))
output_names.append(pk_column.name)
# order
for order_column in order_columns:
self.expressions.append(TypedExpr(order_column.name, order_column.dtype))
output_names.append(order_column.name)
if downward:
for window_def in window_defs:
window_order = random.choice(order_columns)
window_def.order_column.add(window_order.name)
window_pk_num = random.randint(1,table_pk_num)
for i in range(window_pk_num):
window_pk = random.choice(pk_columns)
window_def.pk_columns.add(window_pk.name)
self.indexs.add(ColumnKey(window_pk.name, window_order.name))
else:
for window_def in window_defs:
select_index = random.choice(list(self.indexs))
window_def.order_column.add(select_index.order_column)
window_def.pk_columns.add(select_index.partition_column)
window_pk_num = random.randint(1, len(self.partition_columns))
for _ in range(1, window_pk_num):
window_pk = random.choice(pk_columns)
window_def.pk_columns.add(window_pk.name)
for i in range(expr_num):
window_def = random.choice(window_defs)
alias_name = None
#生成别名
if args.use_alias_name:
alias_name = window_def.name + "_out_" + str(i)
#生成一个新的表达式
new_expr = sample_expr(udf_defs, self,
is_udaf=True,
over_window=window_def.name,
alias_name=alias_name,
allow_const=False,
depth=expr_depth,
downward=downward)
if alias_name is not None:
output_names.append(alias_name)
else:
output_names.append(new_expr.text)
self.expressions.append(new_expr)
# output schema
out_length = 1+len(pk_columns)+len(order_columns)+expr_num
for i in range(out_length):
self.output_columns.append(ColumnInfo(output_names[i], self.expressions[i].dtype))
def init_join_table(self, args, window_defs, udf_defs, downward=True, keep_index=True):
# sample expressions
expr_num = sample_integer_config(args.expr_num)
expr_depth = sample_integer_config(args.expr_depth)
table_pk_num = sample_integer_config(args.table_pk_num)
table_ts_num = sample_integer_config(args.table_ts_num)
output_names = []
all_expressions = []
pk_columns = []
order_columns = []
if downward:
if len(self.partition_columns) > 0:
pk_columns = self.partition_columns
else:
for i in range(table_pk_num):
pk_column = self.sample_partition_column(
downward=downward, nullable=args.index_nullable)
pk_columns.append(pk_column)
if len(self.order_columns)>0:
order_columns = self.order_columns
else:
for i in range(table_ts_num):
order_column = self.sample_order_column(
downward=downward, nullable=args.index_nullable)
order_columns.append(order_column)
else:
pk_columns = self.partition_columns
order_columns = self.order_columns
join_tables = self.join_table
tables = [self]
tables.extend(join_tables)
if keep_index:
# unique idx
index_column = self.id_column
if index_column is not None:
self.expressions.append(TypedExpr(self.name+"."+index_column.name, index_column.dtype))
all_expressions.append(TypedExpr(self.name+"."+index_column.name, index_column.dtype))
output_names.append(index_column.name)
# partition
for pk_column in pk_columns:
pk_expr_name = random.choice(tables).name+"."+pk_column.name
self.expressions.append(TypedExpr(pk_expr_name, pk_column.dtype))
all_expressions.append(TypedExpr(pk_expr_name, pk_column.dtype))
output_names.append(pk_column.name)
# order
for order_column in order_columns:
order_expr_name = random.choice(tables).name+"."+order_column.name
self.expressions.append(TypedExpr(order_expr_name, order_column.dtype))
all_expressions.append(TypedExpr(order_expr_name, order_column.dtype))
output_names.append(order_column.name)
if downward:
for window_def in window_defs:
window_order = random.choice(order_columns)
window_def.order_column.add(window_order.name)
window_pk_num = random.randint(1, table_pk_num)
for i in range(window_pk_num):
window_pk = random.choice(pk_columns)
window_def.pk_columns.add(window_pk.name)
self.indexs.add(ColumnKey(window_pk.name, window_order.name))
else:
for window_def in window_defs:
select_index = random.choice(list(self.indexs))
window_def.order_column.add(select_index.order_column)
window_def.pk_columns.add(select_index.partition_column)
window_pk_num = random.randint(1, len(self.partition_columns))
for _ in range(1, window_pk_num):
window_pk = random.choice(pk_columns)
window_def.pk_columns.add(window_pk.name)
for join_table in join_tables:
join_table.partition_columns = self.partition_columns
join_table.order_columns = self.order_columns
join_table.indexs = self.indexs
for i in range(expr_num):
window_def = random.choice(window_defs)
alias_name = None
#生成别名
if args.use_alias_name:
alias_name = window_def.name + "_out_" + str(i)
#生成一个新的表达式
table = random.choice(tables)
new_expr = sample_expr(udf_defs, table,
is_udaf=True,
over_window=window_def.name,
alias_name=alias_name,
allow_const=False,
depth=expr_depth,
downward=downward)
if alias_name is not None:
output_names.append(alias_name)
else:
output_names.append(new_expr.text)
table.expressions.append(new_expr)
all_expressions.append(new_expr)
# output schema
out_length = 1+len(pk_columns)+len(order_columns)+expr_num
for i in range(out_length):
self.output_columns.append(ColumnInfo(output_names[i], all_expressions[i].dtype))
class SubTable(ColumnsPool):
def __init__(self, args):
ColumnsPool.__init__(self, args)
self.sql = None
def get_sub_sql(self):
return "({}) AS {}".format(self.sql, self.name)
class TypedExpr:
def __init__(self, text, dtype):
self.text = text
self.dtype = dtype
def gen_literal_const(dtype, nullable):
'''
根据类型生成常量,表达式
:param dtype:
:param nullable:
:return:
'''
if dtype is None:
dtype = random.choice(PRIMITIVE_TYPES)
if dtype == "bool":
res = random_literal_bool(nullable)
res = "bool({})".format(res)
elif dtype == "int16":
res = random_literal_int16()
elif dtype == "int32":
res = random_literal_int32()
elif dtype == "int64":
res = random_literal_int64()
elif dtype == "float":
res = random_literal_float()
elif dtype == "double":
res = random_literal_double()
elif dtype == "date":
res = random_literal_date()
res = "date('{}')".format(res)
elif dtype == "timestamp":
res = random_literal_timestamp()
res = "timestamp({})".format(res)
else:
res = random_literal_string()
return TypedExpr(str(res), dtype)
def sample_expr(udf_pool, column_pool,
is_udaf=None,
expect_dtype=None,
over_window=None,
allow_const=True,
downward=True,
alias_name=None,
depth=1):
'''
生成表达式样本
:param udf_pool:
:param column_pool:
:param is_udaf:
:param expect_dtype:
:param over_window:
:param allow_const:
:param downward:
:param alias_name:
:param depth:
:return:
'''
# generate leaf expression
if depth <= 0:
column = column_pool.sample_column(
downward=downward, dtype=expect_dtype,
nullable=None, allow_const=allow_const)
if isinstance(column, ColumnInfo):
return TypedExpr(column_pool.name+"."+column.name, column.dtype)
else:
return column
# select a udf function
udf = udf_pool.sample_function(is_udaf=is_udaf, expect_dtype=expect_dtype)
if udf.name == 'at':
depth = 1
# sample child expressions
arg_types = udf.arg_types
arg_exprs = []
for dtype in arg_types:
child_is_udaf = None
child_allow_const = allow_const
child_depth = random.randint(0, depth - 1)
if dtype.startswith("list_"):
prob_find_list_expr = 0.3
find_list_expr = random.random() < prob_find_list_expr
if find_list_expr and child_depth > 0:
try:
child = sample_expr(
udf_pool, column_pool, is_udaf=child_is_udaf,
expect_dtype=dtype, over_window=None, allow_const=False,
downward=downward, alias_name=None, depth=child_depth)
arg_exprs.append(child)
continue
except ValueError:
pass
# uplift primitive typed expr as list
child_is_udaf = False
child_allow_const = False
dtype = dtype[5:]
child = sample_expr(
udf_pool, column_pool, is_udaf=child_is_udaf,
expect_dtype=dtype, over_window=None, allow_const=child_allow_const,
downward=downward, alias_name=None, depth=child_depth)
arg_exprs.append(child)
# add variadic arguments
if udf.is_variadic:
if udf.name == "concat_ws": # concat_ws take at least one argument
variadic_num = random.randint(1, 10)
else:
variadic_num = random.randint(0, 10)
for i in range(variadic_num):
child_depth = random.randint(0, depth - 1)
arg_exprs.append(sample_expr(
udf_pool, column_pool, is_udaf=None,
expect_dtype="string", over_window=None, allow_const=allow_const,
downward=downward, alias_name=None, depth=child_depth))
# do generate
if udf.name in BUILTIN_OP_DICT and 0 < len(arg_exprs) <= 2:
if len(arg_exprs) == 1:
text = "(%s %s)" % (BUILTIN_OP_DICT[udf.name],
arg_exprs[0].text)
else:
text = "(%s %s %s)" % (arg_exprs[0].text,
BUILTIN_OP_DICT[udf.name],
arg_exprs[1].text)
else:
if udf.name in SQL_PRESERVED_NAMES:
udf.name = "`" + udf.name + '`'
text = "%s(%s)" % (udf.name, ", ".join([_.text for _ in arg_exprs]))
if over_window is not None:
text += " OVER " + str(over_window)
if alias_name is not None:
text += " AS " + alias_name
return TypedExpr(text, udf.return_type)
| python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import, division, print_function
import re
import logging
import time
import os
import sys
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
from util.webRequest import WebRequest
from agent import Agent
logger = logging.getLogger(__name__)
@Agent.register
class CnProxy(Agent):
def __init__(self):
self.url = 'http://www.cnproxy.com/proxy{page}.html' # 从1-10
self.re_ip_pattern = re.compile(r'<tr><td>(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})<SCRIPT', re.I)
self.re_port_encode_pattern = re.compile(r'javascript>document.write\(":"([+\w]{2,10})\)</SCRIPT>')
self.port_dict = {
'v': '3',
'm': '4',
'a': '2',
'l': '9',
'q': '0',
'b': '5',
'i': '7',
'w': '6',
'r': '8',
'c': '1',
'+': ''
}
def extract_proxy(self, pages=10):
for page_num in range(1, pages):
try:
rp = WebRequest().get(self.url.format(page=page_num), timeout=10)
re_ip_result = self.re_ip_pattern.findall(rp.text)
re_port_encode_result = self.re_port_encode_pattern.findall(rp.text)
if not len(re_ip_result) or not len(re_port_encode_result):
raise Exception("empty")
if len(re_ip_result) != len(re_port_encode_result):
raise Exception("len(host) != len(port)")
for index, each_result in enumerate(re_port_encode_result):
each_result = each_result.strip()
host = re_ip_result[index]
port = int(''.join(list(map(lambda x: self.port_dict.get(x, ''), each_result))))
yield f'{host}:{port}'
except:
continue
time.sleep(3)
if __name__ == '__main__':
p = Agent.proxies[0]()
for proxy in p.extract_proxy():
print(proxy)
| python |
# Misc
comptrollerAddress = "0xAB1c342C7bf5Ec5F02ADEA1c2270670bCa144CbB"
curveAddressProvider = "0x0000000022D53366457F9d5E68Ec105046FC4383"
ethZapAddress = "0x5A0bade607eaca65A0FE6d1437E0e3EC2144d540"
eurt_namehash = "0xd5aa869323f85cb893514ce48950ba7e84a8d0bf062a7e3058bcc494217da39f"
masterChefAddress = "0xbD17B1ce622d73bD438b9E658acA5996dc394b0d"
oracleAddress = "0x83d95e0D5f402511dB06817Aff3f9eA88224B030"
sushiWhaleAddress = "0x7abE0cE388281d2aCF297Cb089caef3819b13448"
trustedMigratorAddress = "0x1824df8D751704FA10FA371d62A37f9B8772ab90"
unitrollerAddress = "0xAB1c342C7bf5Ec5F02ADEA1c2270670bCa144CbB"
usdcAddress = "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
userAddress = "0x4C026d5D6A7fe1B2e2B28B916Ef2016f6058F7B4" # sssuttonsss.eth
vestedYfiAddress = "0x34dDFC06ce0c39242Fb380066Ee01e409a4a525e"
wethUsdFeed = "0x5f4ec3df9cbd43714fe2740f5e3616155c5b8419"
whaleAddress = "0x53c286E0AbE87c9e6d4d95ebE62ceaFa4aFCE849"
yCrvAddress = "0x5dbcF33D8c2E976c6b560249878e6F1491Bca25c"
yfiUsdFeed = "0xa027702dbb89fbd58938e4324ac03b58d812b0e1"
yvBOOSTAddress = "0x9d409a0A012CFbA9B15F6D4B36Ac57A46966Ab9a"
yveCRVAddress = "0xc5bDdf9843308380375a611c18B50Fb9341f502A"
zapAddress = "0x5A0bade607eaca65A0FE6d1437E0e3EC2144d540"
# Providers
curveAddressProviderAddress = "0x0000000022D53366457F9d5E68Ec105046FC4383"
yearnAddressesProviderAddress = "0x9be19Ee7Bc4099D62737a7255f5c227fBcd6dB93"
# Registries
v2RegistryAddress = "0x50c1a2eA0a861A967D9d0FFE2AE4012c2E053804"
# --> 0th address in addresses provider
curveRegistryAddress = "0x90E00ACe148ca3b23Ac1bC8C240C2a7Dd9c2d7f5"
# --> 5th address in addresses provider
curveCryptoSwapRegistryAddress = "0x8F942C20D02bEfc377D41445793068908E2250D0"
# Factories
sushiswapFactoryAddress = "0xC0AEe478e3658e2610c5F7A4A2E1777cE9e4f2Ac"
uniswapFactoryAddress = "0x5C69bEe701ef814a2B6a3EDD4B1652CB9cc5aA6f"
uniswapV3FactoryAddress = "0x1F98431c8aD98523631AE4a59f267346ea31F984"
# Routers
sushiswapRouterAddress = "0xd9e1cE17f2641f24aE83637ab66a2cca9C378B9F"
uniswapRouterAddress = "0x7a250d5630B4cF539739dF2C5dAcb4c659F2488D"
# Vaults
yfiVaultAddress = "0xE14d13d8B3b85aF791b2AADD661cDBd5E6097Db1"
v2UsdcVaultV1Address = "0xe2F6b9773BF3A015E2aA70741Bde1498bdB9425b"
v2UsdcVaultV2Address = "0x5f18C75AbDAe578b483E5F43f12a39cF75b973a9"
v2YfiVaultAddress = "0xE14d13d8B3b85aF791b2AADD661cDBd5E6097Db1"
# Tokens
aLinkAddress = "0xA64BD6C70Cb9051F6A9ba1F163Fdc07E0DfB5F84"
cUsdt = "0xf650C3d88D12dB855b8bf7D11Be6C55A4e07dCC9"
cUsdc = "0x39aa39c021dfbae8fac545936693ac917d5e7563"
cDai = "0x5d3a536E4D6DbD6114cc1Ead35777bAB948E3643"
crvAddress = "0xD533a949740bb3306d119CC777fa900bA034cd52"
crvEURSUSDCAddress = "0x3D229E1B4faab62F621eF2F6A610961f7BD7b23B"
crvEURTUSDAddress = "0x3b6831c0077a1e44ED0a21841C3bC4dC11bCE833"
cvxCrvAddress = "0x9D0464996170c6B9e75eED71c68B99dDEDf279e8"
cyDai = "0x8e595470Ed749b85C6F7669de83EAe304C2ec68F"
cyDaiAddress = "0x8e595470Ed749b85C6F7669de83EAe304C2ec68F"
cySusdOldAddress = "0x4e3a36A633f63aee0aB57b5054EC78867CB3C0b8"
cySushiAddress = "0x226F3738238932BA0dB2319a8117D9555446102f"
cyUsdcAddress = "0x76eb2fe28b36b3ee97f3adae0c69606eedb2a37c"
cyWethAddress = "0x41c84c0e2EE0b740Cf0d31F63f3B6F627DC6b393"
cyYfiAddress = "0xFa3472f7319477c9bFEcdD66E4B948569E7621b9"
eCrvAddress = "0xA3D87FffcE63B53E0d54fAa1cc983B7eB0b74A9c"
ethAddress = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
eurs = "0xdB25f211AB05b1c97D595516F45794528a807ad8"
eursUsdcPool = "0x98a7F18d4E56Cfe84E3D081B40001B3d5bD3eB8B"
eurt = "0xC581b735A1688071A1746c968e0798D642EDE491"
ibAud = "0xfafdf0c4c1cb09d430bf88c75d88bb46dae09967"
ibEurPoolAddress = "0x19b080FE1ffA0553469D20Ca36219F17Fcf03859"
linkAddress = "0x514910771AF9Ca656af840dff83E8264EcF986CA"
oBtcAddress = "0x8064d9Ae6cDf087b1bcd5BDf3531bD5d8C537a68"
random_token = "0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e"
sAUD = "0xF48e200EAF9906362BB1442fca31e0835773b8B4"
sCHF = "0x0F83287FF768D1c1e17a42F44d644D7F22e8ee1d"
sEUR = "0xD71eCFF9342A5Ced620049e616c5035F1dB98620"
sGBP = "0x97fe22E7341a0Cd8Db6F6C021A24Dc8f4DAD855F"
sJPY = "0xF6b1C627e95BFc3c1b4c9B825a032Ff0fBf3e07d"
sKRW = "0x269895a3dF4D73b077Fc823dD6dA1B95f72Aaf9B"
steCrvAddress = "0x06325440D014e39736583c165C2963BA99fAf14E"
sushiAddress = "0x6B3595068778DD592e39A122f4f5a5cF09C90fE2"
sushiswapLpTokenAddress = "0x397FF1542f962076d0BFE58eA045FfA2d347ACa0" # USDC/WETH
threeCrvAddress = "0x6c3F90f043a72FA612cbac8115EE7e52BDe6E490"
threeCrvPoolAddress = "0xbEbc44782C7dB0a1A60Cb6fe97d0b483032FF1C7"
triCryptoAddress = "0xc4AD29ba4B3c580e6D59105FFf484999997675Ff"
triCryptoPoolAddress = "0xD51a44d3FaE010294C616388b506AcdA1bfAAE46"
uniswapLpTokenAddress = "0xB4e16d0168e52d35CaCD2c6185b44281Ec28C9Dc" # USDC/WETH
usdc = "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
usdcAddress = "0xA0b86991c6218b36c1d19D4a2e9Eb0cE3606eB48"
usdpAddress = "0x1456688345527bE1f37E9e627DA0837D6f08C925"
wbtcAddress = "0x2260FAC5E5542a773Aa44fBCfeDf7C193bc2C599"
wethAddress = "0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2"
yfiAddress = "0x0bc529c00C6401aEF6D220BE8C6Ea1667F6Ad93e"
yfiEthAddress = "0x29059568bB40344487d62f7450E78b8E6C74e0e5"
yfiEthPoolAddress = "0xC26b89A667578ec7b3f11b2F98d6Fd15C07C54ba"
wethUsdcPoolAddress = "0x8ad599c3A0ff1De082011EFDDc58f1908eb6e6D8"
# Earn v2
yDaiV2Address = "0x16de59092dAE5CcF4A1E6439D611fd0653f0Bd01"
yUsdcV2Address = "0xd6aD7a6750A7593E092a9B218d66C0A814a3436e"
yUsdtV2Address = "0x83f798e925BcD4017Eb265844FDDAbb448f1707D"
ySusdV2Address = "0xF61718057901F84C4eEC4339EF8f0D86D2B45600"
yTusdV2Address = "0x73a052500105205d34daf004eab301916da8190f"
yWbtcV2Address = "0x04Aa51bbcB46541455cCF1B8bef2ebc5d3787EC9"
# Earn v3
yDaiV3Address = "0xC2cB1040220768554cf699b0d863A3cd4324ce32"
yUsdcV3Address = "0x26EA744E5B887E5205727f55dFBE8685e3b21951"
yUsdtV3Address = "0xE6354ed5bC4b393a5Aad09f21c46E101e692d447"
yBusdV3Address = "0x04bC0Ab673d88aE9dbC9DA2380cB6B79C4BCa9aE"
| python |
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Input:
HASH = "hash"
class Output:
FOUND = "found"
REPORTS = "reports"
THREATSCORE = "threatscore"
class LookupHashInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"hash": {
"type": "string",
"title": "Hash",
"description": "MD5/SHA1/SHA256 Hash",
"order": 1
}
},
"required": [
"hash"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class LookupHashOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"found": {
"type": "boolean",
"title": "Found",
"description": "True if found",
"order": 1
},
"reports": {
"type": "array",
"title": "Reports",
"description": "Reports",
"items": {
"$ref": "#/definitions/report"
},
"order": 3
},
"threatscore": {
"type": "integer",
"title": "Threatscore",
"description": "Threat Score (max found)",
"order": 2
}
},
"definitions": {
"report": {
"type": "object",
"title": "report",
"properties": {
"analysis_start_time": {
"type": "string",
"title": "Analysis Start Time",
"order": 14
},
"compromised_hosts": {
"type": "array",
"title": "Compromised Hosts",
"description": "Compromised Hosts",
"items": {
"type": "string"
},
"order": 18
},
"domains": {
"type": "array",
"title": "Domains",
"description": "Domains",
"items": {
"type": "string"
},
"order": 19
},
"environmentDescription": {
"type": "string",
"title": "EnvironmentDescription",
"description": "Environment Description",
"order": 6
},
"environment_id": {
"type": "string",
"title": "Environment Id",
"description": "Environment ID",
"order": 1
},
"hosts": {
"type": "array",
"title": "Hosts",
"description": "Hosts",
"items": {
"type": "string"
},
"order": 20
},
"isinteresting": {
"type": "boolean",
"title": "Isinteresting",
"description": "Is Interesting",
"order": 16
},
"isurlanalysis": {
"type": "boolean",
"title": "Isurlanalysis",
"order": 13
},
"md5": {
"type": "string",
"title": "Md5",
"description": "MD5",
"order": 3
},
"multiscan_detectrate_pcnt": {
"type": "number",
"title": "Multiscan Detectrate Pcnt",
"order": 10
},
"sha1": {
"type": "string",
"title": "Sha1",
"description": "SHA1",
"order": 2
},
"sha256": {
"type": "string",
"title": "Sha256",
"description": "SHA256",
"order": 4
},
"size": {
"type": "integer",
"title": "Size",
"description": "Size",
"order": 7
},
"submitname": {
"type": "string",
"title": "Submitname",
"description": "Submission Name",
"order": 5
},
"targeturl": {
"type": "string",
"title": "Targeturl",
"description": "Target URL",
"order": 9
},
"threatlevel": {
"type": "integer",
"title": "Threatlevel",
"description": "Threat Level",
"order": 17
},
"threatscore": {
"type": "integer",
"title": "Threatscore",
"order": 15
},
"type": {
"type": "string",
"title": "Type",
"description": "ASCII Text",
"order": 8
},
"virustotal_detectrate_pcnt": {
"type": "number",
"title": "Virustotal Detectrate Pcnt",
"description": "VT Detect Rate Percent",
"order": 12
},
"virustotal_familyname": {
"type": "string",
"title": "Virustotal Familyname",
"description": "VT Family Name",
"order": 11
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| python |
"""
Created: 2001/08/05
Purpose: Turn components into a sub-package
__version__ = "$Revision: 1.1 $"
__date__ = "$Date: 2001/12/11 23:47:11 $"
"""
| python |
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import absolute_import, division, print_function, unicode_literals
BOOTSTRAP_XAR = "bootstrap_xar.sh"
RUN_XAR_MAIN = "__run_xar_main__.py"
BOOTSTRAP_XAR_TEMPLATE = """#!/bin/sh -eu
readlink_e() {{
local path="$1"
readlink -e "$path" 2>/dev/null && return
# macosx / bsd readlink doesn't support -e
# so use pwd -P with some recursive readlinking
# strip trailing /
path="${{path%/}}"
# make path an absolute path
if [[ "${{path:0:1}}" != "/" ]]
then
path="$(pwd -P)/$path"
fi
local slash_basename=""
local counter=0
while [[ -h "$path" ]]
do
if [[ counter -gt 200 ]]
then
echo "ERROR: Cyclical symbolic link detected: $path" 1>&2
return
fi
counter=$(( counter + 1 ))
target="$(readlink "$path")"
if [[ "${{target:0:1}}" == "/" ]]
then
path="$target"
else
slash_basename="/$(basename "$path")"
path="${{path%$slash_basename}}/$target"
fi
done
# determine the target
slash_basename="/$(basename "$path")"
if [[ "$slash_basename" == "/.." || "$slash_basename" == "/." ]]
then
slash_basename=""
fi
local parent_dir="${{path%$slash_basename}}"
# subshell to preserve the cwd (instead of pushd/popd)
(cd "$parent_dir"; echo "$(pwd -P)$slash_basename")
}}
BOOTSTRAP_PATH="$0"
ORIGINAL_EXECUTABLE="$1"; shift
DIR=$(dirname "$BOOTSTRAP_PATH")
# Save any existing LD_LIBRARY_PATH
if [ -n "${{LD_LIBRARY_PATH+SET}}" ]; then
export XAR_SAVED_LD_LIBRARY_PATH=$LD_LIBRARY_PATH
fi
# Don't inherit PYTHONPATH. We set it to be the XAR mountpoint.
if [ -n "${{PYTHONPATH+SET}}" ]; then
export XAR_SAVED_PYTHONPATH=$PYTHONPATH
fi
export XAR_INVOKED_NAME="$ORIGINAL_EXECUTABLE"
export LD_LIBRARY_PATH="$DIR"
export PYTHONPATH="$DIR"
export XAR_RUNTIME_FILES
XAR_RUNTIME_FILES="$(dirname "$(readlink_e "$BOOTSTRAP_PATH")")"
export XAR_PYTHON_COMMAND="{python}"
exec {python} "$DIR/{run_xar_main}" "$@"
"""
def run_xar_main(**kwargs):
"""
Constructs the run_xar_main given the template arguments.
If the {function} template argument is present, then the entry point
{module}.{function}() is executed as main. Otherwise, {module} is run as the
main module.
"""
run_xar_main = """
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Put everything inside an __invoke_main() function.
# This way anything we define won't pollute globals(), since runpy
# will propagate our globals() as to the user's main module.
def __invoke_main():
import fcntl
import os
import shlex
import sys
sys.argv[0] = os.getenv("XAR_INVOKED_NAME")
# Hold a file descriptor open to a file inside our XAR to keep it
# mounted while the xar is running. We simply open the actual
# directory rather than any file (which would also work).
xar_mountpoint = os.getenv('XAR_RUNTIME_FILES')
if xar_mountpoint:
fd = os.open(xar_mountpoint, os.O_RDONLY)
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC)
"""
if "function" in kwargs:
run_xar_main += """
import {module}
{module}.{function}()
"""
else:
run_xar_main += """
import runpy
module = "{module}"
runpy._run_module_as_main(module, False)
"""
run_xar_main += """
__invoke_main()
"""
return run_xar_main.format(**kwargs)
| python |
# Copyright (c) 2018 FlashX, LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import graphene
from gtmcore.inventory.inventory import InventoryManager
from gtmcore.logging import LMLogger
from gtmcore.activity import ActivityStore, ActivityDetailRecord, ActivityDetailType, ActivityRecord, ActivityType
from gtmcore.activity.utils import ImmutableDict, TextData, DetailRecordList
from lmsrvcore.auth.user import get_logged_in_username, get_logged_in_author
from lmsrvlabbook.api.objects.activity import ActivityRecordObject
from lmsrvlabbook.api.connections.activity import ActivityConnection
logger = LMLogger.get_logger()
class CreateUserNote(graphene.relay.ClientIDMutation):
"""Mutation to create a new user note entry in the activity feed of lab book
The `linked_commit` is an empty string since there is no linked commit
"""
class Input:
owner = graphene.String(required=True)
labbook_name = graphene.String()
dataset_name = graphene.String()
title = graphene.String(required=True)
body = graphene.String(required=False)
tags = graphene.List(graphene.String, required=False)
# Return the new Activity Record
new_activity_record_edge = graphene.Field(lambda: ActivityConnection.Edge)
@classmethod
def _create_user_note(cls, lb, title, body, tags):
store = ActivityStore(lb)
data = TextData('markdown', body) if body else ImmutableDict()
adr = ActivityDetailRecord(ActivityDetailType.NOTE,
show=True,
importance=255,
data=data)
ar = ActivityRecord(ActivityType.NOTE,
message=title,
linked_commit="no-linked-commit",
importance=255,
tags=tags,
detail_objects=DetailRecordList([adr]))
ar = store.create_activity_record(ar)
return ar
@classmethod
def mutate_and_get_payload(cls, root, info, owner, title, labbook_name=None, dataset_name=None,
body=None, tags=None, client_mutation_id=None):
if labbook_name is not None and dataset_name is not None:
raise ValueError("A note can be created in only 1 repository at a time.")
username = get_logged_in_username()
if labbook_name:
name = labbook_name
repository_type = 'labbook'
r = InventoryManager().load_labbook(username, owner, labbook_name,
author=get_logged_in_author())
elif dataset_name:
name = dataset_name
repository_type = 'dataset'
r = InventoryManager().load_dataset(username, owner, dataset_name,
author=get_logged_in_author())
else:
raise ValueError("You must either set `labbookName` or `datasetName` to create a note.")
with r.lock():
ar = cls._create_user_note(r, title, body, tags)
return CreateUserNote(new_activity_record_edge=ActivityConnection.Edge(
node=ActivityRecordObject(owner=owner,
name=name,
_repository_type=repository_type,
commit=ar.commit),
cursor=ar.commit))
| python |
import json
import logging
from pathlib import Path
from typing import Any, Iterable, List, Set, Union
import numpy as np
import pandas as pd
from hyperstyle.src.python.review.application_config import LanguageVersion
from hyperstyle.src.python.review.common.file_system import Extension
from hyperstyle.src.python.review.quality.penalty import PenaltyIssue
from hyperstyle.src.python.review.reviewers.utils.print_review import convert_json_to_issues
from analysis.src.python.evaluation.common.csv_util import ColumnName, write_dataframe_to_csv
from analysis.src.python.evaluation.common.file_util import AnalysisExtension, get_restricted_extension
from analysis.src.python.evaluation.common.xlsx_util import create_workbook, remove_sheet, write_dataframe_to_xlsx_sheet
logger = logging.getLogger(__name__)
def filter_df_by_language(df: pd.DataFrame, languages: Set[LanguageVersion],
column: str = ColumnName.LANG.value) -> pd.DataFrame:
return filter_df_by_iterable_value(df, column, set(map(lambda l: l.value, languages)))
def filter_df_by_iterable_value(df: pd.DataFrame, column: str, value: Iterable) -> pd.DataFrame:
return df.loc[df[column].isin(value)]
def filter_df_by_single_value(df: pd.DataFrame, column: str, value: Any) -> pd.DataFrame:
return df.loc[df[column] == value]
def drop_duplicates(df: pd.DataFrame, column: str = ColumnName.CODE.value) -> pd.DataFrame:
return df.drop_duplicates(column, keep='last').reset_index(drop=True)
# Find all rows and columns where two dataframes are inconsistent.
# For example:
# row | column |
# -------------------------
# 3 | column_1 | True
# | column_2 | True
# -------------------------
# 4 | column_1 | True
# | column_2 | True
# means first and second dataframes have different values
# in column_1 and in column_2 in 3-th and 4-th rows
def get_inconsistent_positions(first: pd.DataFrame, second: pd.DataFrame) -> pd.DataFrame:
ne_stacked = (first != second).stack()
inconsistent_positions = ne_stacked[ne_stacked]
inconsistent_positions.index.names = [ColumnName.ROW.value, ColumnName.COLUMN.value]
return inconsistent_positions
# Create a new dataframe with all items that are different.
# For example:
# | old | new
# ---------------------------------
# row column | |
# 3 grade | EXCELLENT | MODERATE
# 4 grade | EXCELLENT | BAD
def get_diffs(first: pd.DataFrame, second: pd.DataFrame) -> pd.DataFrame:
changed = get_inconsistent_positions(first, second)
difference_locations = np.where(first != second)
changed_from = first.values[difference_locations]
changed_to = second.values[difference_locations]
return pd.DataFrame({
ColumnName.OLD.value: changed_from,
ColumnName.NEW.value: changed_to},
index=changed.index)
def get_solutions_df(ext: Union[Extension, AnalysisExtension], file_path: Union[str, Path]) -> pd.DataFrame:
try:
if ext == AnalysisExtension.XLSX:
lang_code_dataframe = pd.read_excel(file_path)
else:
lang_code_dataframe = pd.read_csv(file_path)
except FileNotFoundError as e:
logger.error('XLSX-file or CSV-file with the specified name does not exists.')
raise e
return lang_code_dataframe
def get_solutions_df_by_file_path(path: Path) -> pd.DataFrame:
ext = get_restricted_extension(path, [AnalysisExtension.XLSX, AnalysisExtension.CSV])
return get_solutions_df(ext, path)
def write_df_to_file(df: pd.DataFrame, output_file_path: Path, extension: Union[AnalysisExtension, Extension]) -> None:
if extension == AnalysisExtension.CSV:
write_dataframe_to_csv(output_file_path, df)
elif extension == AnalysisExtension.XLSX:
create_workbook(output_file_path)
write_dataframe_to_xlsx_sheet(output_file_path, df, 'inspection_results')
# remove empty sheet that was initially created with the workbook
remove_sheet(output_file_path, 'Sheet')
def read_df_from_file(input_file_path: Path) -> pd.DataFrame:
ext = get_restricted_extension(input_file_path, [AnalysisExtension.XLSX, AnalysisExtension.CSV])
if ext == AnalysisExtension.XLSX:
df = pd.read_excel(input_file_path)
else:
df = pd.read_csv(input_file_path)
return df
def get_issues_from_json(str_json: str) -> List[PenaltyIssue]:
parsed_json = json.loads(str_json)['issues']
return convert_json_to_issues(parsed_json)
def get_issues_by_row(df: pd.DataFrame, row: int) -> List[PenaltyIssue]:
return get_issues_from_json(df.iloc[row][ColumnName.TRACEBACK.value])
def equal_df(expected_df: pd.DataFrame, actual_df: pd.DataFrame) -> bool:
return expected_df.reset_index(drop=True).equals(
actual_df.reset_index(drop=True)) or (expected_df.empty and actual_df.empty)
| python |
"""
Extract functions for space time raster, 3d raster and vector datasets
(C) 2012-2013 by the GRASS Development Team
This program is free software under the GNU General Public
License (>=v2). Read the file COPYING that comes with GRASS
for details.
:authors: Soeren Gebbert
"""
from .core import get_tgis_message_interface, get_current_mapset, SQLDatabaseInterfaceConnection
from .abstract_map_dataset import AbstractMapDataset
from .open_stds import open_old_stds, check_new_stds, open_new_stds
from .datetime_math import create_suffix_from_datetime
from .datetime_math import create_time_suffix
from .datetime_math import create_numeric_suffix
from multiprocessing import Process
import grass.script as gscript
from grass.exceptions import CalledModuleError
############################################################################
def extract_dataset(input, output, type, where, expression, base, time_suffix,
nprocs=1, register_null=False, layer=1,
vtype="point,line,boundary,centroid,area,face", ):
"""Extract a subset of a space time raster, raster3d or vector dataset
A mapcalc expression can be provided to process the temporal extracted
maps.
Mapcalc expressions are supported for raster and raster3d maps.
:param input: The name of the input space time raster/raster3d dataset
:param output: The name of the extracted new space time raster/raster3d
dataset
:param type: The type of the dataset: "raster", "raster3d" or vector
:param where: The temporal SQL WHERE statement for subset extraction
:param expression: The r(3).mapcalc expression or the v.extract where
statement
:param base: The base name of the new created maps in case a mapclac
expression is provided
:param time_suffix: string to choose which suffix to use: gran, time, num%*
(where * are digits)
:param nprocs: The number of parallel processes to be used for mapcalc
processing
:param register_null: Set this number True to register empty maps
(only raster and raster3d maps)
:param layer: The vector layer number to be used when no timestamped
layer is present, default is 1
:param vtype: The feature type to be extracted for vector maps, default
is point,line,boundary,centroid,area and face
"""
# Check the parameters
msgr = get_tgis_message_interface()
if expression and not base:
msgr.fatal(_("You need to specify the base name of new created maps"))
mapset = get_current_mapset()
dbif = SQLDatabaseInterfaceConnection()
dbif.connect()
sp = open_old_stds(input, type, dbif)
# Check the new stds
new_sp = check_new_stds(output, type, dbif, gscript.overwrite())
if type == "vector":
rows = sp.get_registered_maps(
"id,name,mapset,layer", where, "start_time", dbif)
else:
rows = sp.get_registered_maps("id", where, "start_time", dbif)
new_maps = {}
if rows:
num_rows = len(rows)
msgr.percent(0, num_rows, 1)
# Run the mapcalc expression
if expression:
count = 0
proc_count = 0
proc_list = []
for row in rows:
count += 1
if count % 10 == 0:
msgr.percent(count, num_rows, 1)
if sp.get_temporal_type() == 'absolute' and time_suffix == 'gran':
old_map = sp.get_new_map_instance(row["id"])
old_map.select(dbif)
suffix = create_suffix_from_datetime(old_map.temporal_extent.get_start_time(),
sp.get_granularity())
map_name = "{ba}_{su}".format(ba=base, su=suffix)
elif sp.get_temporal_type() == 'absolute' and time_suffix == 'time':
old_map = sp.get_new_map_instance(row["id"])
old_map.select(dbif)
suffix = create_time_suffix(old_map)
map_name = "{ba}_{su}".format(ba=base, su=suffix)
else:
map_name = create_numeric_suffix(base, count, time_suffix)
# We need to modify the r(3).mapcalc expression
if type != "vector":
expr = expression
expr = expr.replace(sp.base.get_map_id(), row["id"])
expr = expr.replace(sp.base.get_name(), row["id"])
expr = "%s = %s" % (map_name, expr)
# We need to build the id
map_id = AbstractMapDataset.build_id(map_name, mapset)
else:
map_id = AbstractMapDataset.build_id(map_name, mapset,
row["layer"])
new_map = sp.get_new_map_instance(map_id)
# Check if new map is in the temporal database
if new_map.is_in_db(dbif):
if gscript.overwrite():
# Remove the existing temporal database entry
new_map.delete(dbif)
new_map = sp.get_new_map_instance(map_id)
else:
msgr.error(_("Map <%s> is already in temporal database"
", use overwrite flag to overwrite") %
(new_map.get_map_id()))
continue
# Add process to the process list
if type == "raster":
msgr.verbose(_("Applying r.mapcalc expression: \"%s\"")
% expr)
proc_list.append(Process(target=run_mapcalc2d,
args=(expr,)))
elif type == "raster3d":
msgr.verbose(_("Applying r3.mapcalc expression: \"%s\"")
% expr)
proc_list.append(Process(target=run_mapcalc3d,
args=(expr,)))
elif type == "vector":
msgr.verbose(_("Applying v.extract where statement: \"%s\"")
% expression)
if row["layer"]:
proc_list.append(Process(target=run_vector_extraction,
args=(row["name"] + "@" +
row["mapset"], map_name,
row["layer"], vtype,
expression)))
else:
proc_list.append(Process(target=run_vector_extraction,
args=(row["name"] + "@" +
row["mapset"], map_name,
layer, vtype,
expression)))
proc_list[proc_count].start()
proc_count += 1
# Join processes if the maximum number of processes are
# reached or the end of the loop is reached
if proc_count == nprocs or count == num_rows:
proc_count = 0
exitcodes = 0
for proc in proc_list:
proc.join()
exitcodes += proc.exitcode
if exitcodes != 0:
dbif.close()
msgr.fatal(_("Error in computation process"))
# Empty process list
proc_list = []
# Store the new maps
new_maps[row["id"]] = new_map
msgr.percent(0, num_rows, 1)
temporal_type, semantic_type, title, description = sp.get_initial_values()
new_sp = open_new_stds(output, type, sp.get_temporal_type(), title,
description, semantic_type, dbif,
gscript.overwrite())
# collect empty maps to remove them
empty_maps = []
# Register the maps in the database
count = 0
for row in rows:
count += 1
if count % 10 == 0:
msgr.percent(count, num_rows, 1)
old_map = sp.get_new_map_instance(row["id"])
old_map.select(dbif)
if expression:
# Register the new maps
if row["id"] in new_maps:
new_map = new_maps[row["id"]]
# Read the raster map data
new_map.load()
# In case of a empty map continue, do not register empty
# maps
if type == "raster" or type == "raster3d":
if new_map.metadata.get_min() is None and \
new_map.metadata.get_max() is None:
if not register_null:
empty_maps.append(new_map)
continue
elif type == "vector":
if new_map.metadata.get_number_of_primitives() == 0 or \
new_map.metadata.get_number_of_primitives() is None:
if not register_null:
empty_maps.append(new_map)
continue
# Set the time stamp
new_map.set_temporal_extent(old_map.get_temporal_extent())
# Insert map in temporal database
new_map.insert(dbif)
new_sp.register_map(new_map, dbif)
else:
new_sp.register_map(old_map, dbif)
# Update the spatio-temporal extent and the metadata table entries
new_sp.update_from_registered_maps(dbif)
msgr.percent(num_rows, num_rows, 1)
# Remove empty maps
if len(empty_maps) > 0:
names = ""
count = 0
for map in empty_maps:
if count == 0:
names += "%s" % (map.get_name())
else:
names += ",%s" % (map.get_name())
count += 1
if type == "raster":
gscript.run_command("g.remove", flags='f', type='raster',
name=names, quiet=True)
elif type == "raster3d":
gscript.run_command("g.remove", flags='f', type='raster_3d',
name=names, quiet=True)
elif type == "vector":
gscript.run_command("g.remove", flags='f', type='vector',
name=names, quiet=True)
dbif.close()
###############################################################################
def run_mapcalc2d(expr):
"""Helper function to run r.mapcalc in parallel"""
try:
gscript.run_command("r.mapcalc", expression=expr,
overwrite=gscript.overwrite(), quiet=True)
except CalledModuleError:
exit(1)
def run_mapcalc3d(expr):
"""Helper function to run r3.mapcalc in parallel"""
try:
gscript.run_command("r3.mapcalc", expression=expr,
overwrite=gscript.overwrite(), quiet=True)
except CalledModuleError:
exit(1)
def run_vector_extraction(input, output, layer, type, where):
"""Helper function to run r.mapcalc in parallel"""
try:
gscript.run_command("v.extract", input=input, output=output,
layer=layer, type=type, where=where,
overwrite=gscript.overwrite(), quiet=True)
except CalledModuleError:
exit(1)
| python |
import FWCore.ParameterSet.Config as cms
enableSonicTriton = cms.Modifier()
| python |
# PLY package
# Author: David Beazley ([email protected])
# https://dabeaz.com/ply/index.html
__version__ = '4.0'
__all__ = ['lex','yacc']
| python |
import hvac
import os
client = hvac.Client(url='https://localhost:8200', verify=False) # use false for testing only (self signed cert on dev machine)
client.token = os.environ['VAULT_TOKEN']
secret = client.secrets.kv.v2.read_secret_version(mount_point="apikeys_prod", path='keys') # https://hvac.readthedocs.io/en/stable/source/hvac_api_secrets_engines.html#hvac.api.secrets_engines.KvV2.read_secret_version
print(secret['data']['data']['foobar']) | python |
from selenium import webdriver
import time
import arrow
from datetime import datetime
from bs4 import BeautifulSoup
import threading
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
import os
'''
Created on 13 Sep 2013
Updated 12 Nov 2017
@author: rob dobson
'''
class ExDivDates():
hourToRunAt = 4
bRunAlready = False
bFirstRunDone = False
conversionRatesSymbols = {
"C$": {"iso":"CAD","def":1.6},
"$": {"iso":"USD","def":1.3},
"€": {"iso":"EUR","def":1.1},
"R": {"iso":"ZAR","def":18.9},
"p": {"iso":"","def":100},
"£": {"iso":"GBP","def":1.0}
}
def __init__(self, exchangeRates):
self._exchangeRates = exchangeRates
self.running = False
self.stocksExDivInfo = {}
self.lock = threading.Lock()
self.runHeadless = True
def run(self):
self.running = True
self.t = threading.Thread(target=self.do_thread_scrape)
self.t.start()
def stop(self):
self.running = False
def setTimeToRunAt(self, hourToRunAt):
self.hourToRunAt = hourToRunAt
def addToStockInfo(self, symbol, stkInfoDict):
itemsToAdd = ['exDivDate','exDivAmount','paymentDate']
self.lock.acquire()
if symbol in self.stocksExDivInfo:
for iti in itemsToAdd:
if iti in self.stocksExDivInfo[symbol]:
stkInfoDict[iti] = self.stocksExDivInfo[symbol][iti]
self.lock.release()
def setFromStockHoldings(self, stockHoldings):
itemsToAdd = ['exDivDate','exDivAmount','paymentDate']
exDivOnly = {}
for stock in stockHoldings:
sym = stock['symbol']
if stock['exDivDate'] == "" or stock['exDivAmount'] == 0 or stock['paymentDate'] == "":
continue
if sym in exDivOnly:
if 'exDivDate' in exDivOnly[sym]:
if exDivOnly[sym]['exDivDate'] != "":
continue
exDivOnly[sym] = { 'symbol':sym, 'exDivDate':stock['exDivDate'], 'exDivAmount':stock['exDivAmount'], 'paymentDate':stock['paymentDate'] }
for stock in exDivOnly.values():
if "symbol" in stock:
newDict = { 'exDivDataFromHoldings': True }
for item in itemsToAdd:
if item in stock:
newDict[item] = stock[item]
self.stocksExDivInfo[stock["symbol"]] = newDict
def convertFromPence(self, val):
newVal = None
try:
for sym, exRateInfo in self.conversionRatesSymbols.items():
if sym in val:
val = val.replace(sym, "")
newVal = float(val)
exchgRate = self._exchangeRates.getExVsGBPByIso(exRateInfo["iso"])
if exchgRate is not None:
newVal /= exchgRate
else:
newVal /= exRateInfo["def"]
break
if newVal is None:
newVal = float(val)
except:
newVal = None
return newVal
def convertFromShortDate(self, val):
newVal = ""
try:
newVal = arrow.get(val, "DD-MMM")
newVal = newVal.replace(year=arrow.now().year)
if newVal < arrow.now():
newVal = newVal.shift(years=+1)
newVal = newVal.format("YYYY-MM-DD")
except:
newVal = ""
return newVal
def extractDataFromPage(self, pageText):
# parse and extract ex dividend table
soup = BeautifulSoup(pageText, "html5lib")
exDivTable = soup.select("body section table tbody tr")
# print(exDivTable)
# Extract rows and columns from table
exDivInfo = {}
attrNames = ["exDivEPIC", "exDivName", "exDivMarket", "exDivSharePrice", "exDivAmount", "exDivImpact",
"exDivDeclared", "exDivDate", "paymentDate"]
exDivTableLine = 0
for exDivRow in exDivTable:
exDivValid = True
exDivItems = {"exDivTableLine": exDivTableLine}
exDivTableLine += 1
exDivCells = exDivRow.select("td")
for elIdx in range(len(exDivCells)):
if elIdx >= len(attrNames):
break
attrName = attrNames[elIdx]
val = exDivCells[elIdx].getText().strip()
# Convert currency fields
if attrName == "exDivSharePrice" or attrName == "exDivAmount":
val = self.convertFromPence(val)
if val is None and attrName == "exDivAmount":
exDivValid = False
break
# Convert time fields
if attrName == "paymentDate" or attrName == "exDivDate" or attrName == "exDivDeclared":
val = self.convertFromShortDate(val)
if val == "" and (attrName == "exDivDate" or attrName == "paymentDate"):
exDivValid = False
break
exDivItems[attrName] = val
if exDivValid and "exDivEPIC" in exDivItems:
if not exDivItems["exDivEPIC"] in exDivInfo:
exDivInfo[exDivItems["exDivEPIC"]] = exDivItems
else:
print("Got 2 or more dividend lines, returning only earliest for", exDivItems["exDivEPIC"])
else:
print("Skipping", exDivItems)
# for sym, vals in exDivInfo.items():
# print(vals)
print("ExDivDates: Processed", len(exDivTable), "rows, got", len(exDivInfo), "symbols")
return exDivInfo
def do_thread_scrape(self):
while(self.running):
# Check if it is time to run
bRunNow = False
hourNow = datetime.now().hour
if self.bFirstRunDone:
testHour = hourNow
if testHour < self.hourToRunAt:
testHour = hourNow + 24
if testHour >= self.hourToRunAt and testHour < self.hourToRunAt + 1:
if not self.bRunAlready:
bRunNow = True
else:
self.bRunAlready = False
else:
bRunNow = True
if bRunNow:
pageURL = "http://www.dividenddata.co.uk"
print("ExDivDates:", datetime.now().strftime("%Y-%m-%d %H:%M"), ", URL", pageURL)
self.bFirstRunDone = True
self.bRunAlready = True
if self.runHeadless:
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--no-sandbox")
chrome_options.add_argument("--disable-extensions")
browser = webdriver.Chrome(chrome_options=chrome_options)
else:
browser = webdriver.Firefox() # Get local session of firefox
browser.get(pageURL) # Load page
exDivInfoDict = self.extractDataFromPage(browser.page_source)
# Close the browser now we're done
browser.close()
# Put found stocks into the dictionary of current data
for sym, vals in exDivInfoDict.items():
ySymbol = sym
if "exDivMarket" in vals:
market = vals["exDivMarket"]
if market.startswith("FTSE"):
ySymbol = sym + "L" if sym.endswith(".") else sym + ".L"
self.lock.acquire()
self.stocksExDivInfo[ySymbol] = vals
self.lock.release()
for i in range(60):
if not self.running:
break
time.sleep(1)
if __name__ == '__main__':
## Test code
ss = ExDivDates()
ss.run()
| python |
import argparse
import logging
MEDIUM_CHOICES = ["CD", "SACD", "DVD", "DVD-A", "Blu-ray",
"Web",
"Vinyl", "78RPM Vinyl",
"LP", "Vinyl LP", "45RPM Vinyl LP",
"EP", "Vinyl EP", "45RPM Vinyl EP",
"180g Vinyl LP", "180g 45RPM Vinyl LP",
"200g Vinyl LP", "200g 45RPM Vinyl LP",
"220g Vinyl LP", "220g 45RPM Vinyl LP",
"Reel-to-reel", "8-Track", "Cassette", "VHS"]
def ParseArguments():
args = argparse.ArgumentParser(prog="FLAC to MKA")
args.add_argument("directory",
help="Directory containing source files")
args.add_argument("--output",
help="Output file name")
args.add_argument("--image",
help="Manually specify cover art file")
args.add_argument("--forceimage",
action="store_true",
help="Skip resolution and aspect ratio check of cover art")
args.add_argument("--genre",
help="Manually specify genre")
args.add_argument("--year",
help="Manually specify year (first release)")
args.add_argument("--artist",
help="Manually specify artist")
args.add_argument("--album",
help="Manually specify album")
args.add_argument("--label",
help="Specify the label that issued this release; useful for re-releases")
args.add_argument("--issuedate",
help="Specify the date this version was released; useful for re-releases")
args.add_argument("--version",
help="Specify version of release; useful for regional releases, volumes, or special editions")
args.add_argument("--medium",
help="Specify source medium of release",
choices=MEDIUM_CHOICES)
args.add_argument("--disc",
help="Disc number (must specify number of discs)")
args.add_argument("--discs",
help="Number of discs (must specify disc number)")
args.add_argument("--no-confirm",
action="store_true",
help="Don't print metadata before running")
args.add_argument("--cue",
action="store_true",
help="Produce only CUE file")
args.add_argument("--cueflac",
action="store_true",
help="Produce CUE+FLAC as output instead of MKA")
args.add_argument("--skipmerge",
action="store_true",
help="Skip merging of FLAC files, requires file already exists")
disc_group = args.add_mutually_exclusive_group()
disc_group.add_argument("--multidisc",
action="store_true",
help="MKA output should merge multiple discs preserving disc and track numbering")
disc_group.add_argument("--nodiscs",
action="store_true",
help="Ignore disc tags; track numbers start at 1 and all tracks are merged")
logging_group = args.add_mutually_exclusive_group()
logging_group.add_argument("--error",
action="store_const",
help="Report logging messages at error level or higher",
dest="logging_level",
const=logging.ERROR)
logging_group.add_argument("--warning",
action="store_const",
help="Report logging messages at warning level or higher",
dest="logging_level",
const=logging.WARNING)
logging_group.add_argument("--info",
action="store_const",
help="Report logging messages at information level or higher",
dest="logging_level",
const=logging.INFO)
logging_group.add_argument("--debug",
action="store_const",
help="Report all logging messages",
dest="logging_level",
const=logging.DEBUG)
args.set_defaults(logging_level=logging.CRITICAL)
return args.parse_args() # Uses sys.argv
| python |
"""
=====================================
Hawkes simulation with exotic kernels
=====================================
Simulation of Hawkes processes with usage of custom kernels
"""
import matplotlib.pyplot as plt
import numpy as np
from tick.base import TimeFunction
from tick.hawkes import SimuHawkes, HawkesKernelExp, HawkesKernelTimeFunc
from tick.plot import plot_point_process
t_values = np.array([0, 1, 1.5], dtype=float)
y_values = np.array([0, .2, 0], dtype=float)
tf1 = TimeFunction([t_values, y_values],
inter_mode=TimeFunction.InterConstRight, dt=0.1)
kernel_1 = HawkesKernelTimeFunc(tf1)
t_values = np.array([0, .1, 2], dtype=float)
y_values = np.array([0, .4, -0.2], dtype=float)
tf2 = TimeFunction([t_values, y_values],
inter_mode=TimeFunction.InterLinear, dt=0.1)
kernel_2 = HawkesKernelTimeFunc(tf2)
hawkes = SimuHawkes(kernels=[[kernel_1, kernel_1],
[HawkesKernelExp(.07, 4), kernel_2]],
baseline=[1.5, 1.5], verbose=False, seed=23983)
run_time = 40
dt = 0.01
hawkes.track_intensity(dt)
hawkes.end_time = run_time
hawkes.simulate()
fig, ax = plt.subplots(hawkes.n_nodes, 1, figsize=(14, 8))
plot_point_process(hawkes, t_max=20, ax=ax)
plt.show()
| python |
#!/usr/bin/env python3
import subprocess
import os
from libsw import file_filter, settings, build_queue, build_index, logger
def register_ip(ip):
path = settings.get('install_path') + 'etc/remote-deploy'
return file_filter.AppendUnique(path, ip, True).run()
def unregister_ip(ip):
path = settings.get('install_path') + 'etc/remote-deploy'
return file_filter.RemoveExact(path, ip).run()
def get_registered_ips():
path = settings.get('install_path') + 'etc/remote-deploy'
if not os.path.exists(path):
return []
ip_list = []
with open(path, 'r') as ip_file:
for line in ip_file:
line = line.strip()
if len(line) > 0:
ip_list.append(line)
return ip_list
def deploy(force):
log_path = settings.get('install_path') + 'var/log/remote-deploy'
with open(log_path, 'a+') as log_file:
log = logger.Log(log_file)
queue = build_queue.new_queue(force)
build_index.Index().populate_builders(queue)
queue.run()
if queue.failed():
log.log('Error: Unable to deploy. Build failed.')
else:
for ip in get_registered_ips():
log.log('')
log.log('Checking deployment for ' + ip)
queue.set_failed_file(settings.get('install_path') + 'etc/deploy-failures/' + ip)
for builder, status in queue.queue:
status = get_deploy_live_status(ip, log, force, queue, builder)
log.log(builder.slug + ' ' + status)
if status == 'ready':
builder.deploy(ip, log)
def check_deploy():
log = logger.Log()
queue = build_queue.new_queue()
build_index.Index().populate_builders(queue)
update_list = queue.run_check()
if len(update_list) > 0:
log.log("Error: Software must be updated locally first.")
else:
for ip in get_registered_ips():
log.log('')
log.log('Checking deployment for ' + ip)
queue.set_failed_file(settings.get('install_path') + 'etc/deploy-failures/' + ip)
for builder, status in queue.queue:
status = get_deploy_live_status(ip, log, False, queue, builder)
log.log(builder.slug + ' ' + status)
debug = False
def get_deploy_live_status(ip, log, force, queue, builder, level=0):
"""
Recalculate the status of a builder deployment by checking it's dependencies.
Args:
builder - The builder to check
level - The recursive depth level the status check is in
"""
status = 'missing'
for b, s in queue.queue:
if b is builder:
status = s
if status == '' or status == 'waiting':
if status == '' and not builder.needs_deploy(ip, log, force):
status = 'pass'
else:
status = 'ready'
deps = builder.dependencies()
if len(deps) > 0:
for slug in deps:
dep_builder, dep_status = queue.entry(slug)
if dep_status == False:
log.log('Unable to find package "' + slug + '" needed for "' + builder.slug + '"')
return 'failed'
dep_status = get_deploy_live_status(ip, log, force, queue, dep_builder, level + 1)
if dep_status == 'failed' or dep_status == 'missing':
return 'failed'
elif dep_status == 'waiting' or dep_status == 'ready':
status = 'waiting'
elif dep_status == 'done':
if status != 'waiting':
status = 'ready'
if debug:
dmsg = 'Checking:'
for i in range(level):
dmsg += ' '
dmsg += builder.slug + ' ' + status
print(dmsg)
return status | python |
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import unittest
import dill
import numpy as np
from fastestimator.op.numpyop.univariate import Calibrate
from fastestimator.test.unittest_util import is_equal
class TestCalibrate(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.single_input = [np.array([1, 2, 3, 5])]
cls.single_output = [np.array([0.5, 1.0, 1.5, 2.5])]
cls.multi_input = [np.array([2, 2]), np.array([0, 1, 2])]
cls.multi_output = [np.array([1, 1]), np.array([0, 0.5, 1])]
def test_single_input(self):
op = Calibrate(inputs='x', outputs='x', calibration_fn=lambda x: x/2)
data = op.forward(data=self.single_input, state={})
self.assertTrue(is_equal(data, self.single_output))
def test_multi_input(self):
op = Calibrate(inputs=['x', 'y'], outputs=['x', 'y'], calibration_fn=lambda x: x/2)
data = op.forward(data=self.multi_input, state={})
self.assertTrue(is_equal(data, self.multi_output))
def test_single_input_fn_from_disk(self):
tmpdirname = tempfile.mkdtemp()
fn_path = os.path.join(tmpdirname, 'fn.pkl')
fn = lambda x: x/2
with open(fn_path, 'wb') as f:
dill.dump(fn, f)
op = Calibrate(inputs='x', outputs='x', calibration_fn=fn_path)
with self.subTest("Do nothing on Warmup"):
resp = op.forward(self.single_input, state={'warmup': True})
self.assertTrue(np.array_equal(resp, self.single_input))
with self.subTest("Load function during regular execution"):
resp = op.forward(self.single_input, state={'warmup': False})
self.assertTrue(np.array_equal(resp, self.single_output))
os.remove(fn_path)
with self.subTest("Continue to use function without re-loading"):
resp = op.forward(self.single_input, state={'warmup': False})
self.assertTrue(np.array_equal(resp, self.single_output))
| python |
import sys
from itertools import product
import intake
def all_params():
all_params = {}
cat = intake.open_catalog('catalog.yaml')
for item in cat:
description = cat[item].describe()
params = description["user_parameters"]
params = {params[i]["name"]: params[i]["allowed"] for i in range(len(params))}
# clean-up blank values if needed
seasons = [s for s in params["season"] if s != ""]
if "grid" in params.keys():
grids = [g for g in params["grid"] if g != ""]
# FESOM is currently the only item without a "region" parameter
if "region" not in params.keys():
cat_kwargs = [p for p in product(params["datatype"], seasons)]
cat_kwargs = [{"datatype": i[0], "season": i[1]} for i in cat_kwargs]
else:
non_grid_datatypes = [d for d in params["datatype"] if d != "grid"]
cat_kwargs = [
p for p in product(params["region"], non_grid_datatypes, seasons)
]
cat_kwargs = [{"region": i[0], "datatype": i[1], "season": i[2]} for i in cat_kwargs]
if "grid" in params.keys():
more_kwargs = [p for p in product(params["region"], ["grid"], grids)]
more_kwargs = [
{"region": i[0], "datatype": i[1], "grid": i[2]} for i in more_kwargs
]
cat_kwargs = cat_kwargs + more_kwargs
all_params.update({item: cat_kwargs})
return all_params, cat
def main(params_only=False, all_params=all_params):
all_params, cat = all_params()
for item in all_params.keys():
print(f"\n{item}")
print(f"{len(all_params[item])} parameterizations for {item}: {all_params[item]}")
if not params_only:
for d in all_params[item]:
print(f"\n\n{item}: loading parameterization {d}")
# this specific dataset has not been added yet, and I don't think there's a way
# to skip it in the `catalog.yaml` user paramaters, so we skip it manually here:
if item == "FESOM" and d["datatype"] == "int" and d["season"] == "aso":
pass
else:
ds = cat[item](**d).to_dask()
print(ds)
if __name__ == "__main__":
if "params_only" in sys.argv:
main(params_only=True)
else:
main()
| python |
from logging import error, info, basicConfig, getLogger, warning
from os import environ as env
from gitlabdata.orchestration_utils import (
postgres_engine_factory,
snowflake_engine_factory,
query_executor,
)
from google_sheets_client import GoogleSheetsClient
from qualtrics_client import QualtricsClient
from sheetload_dataframe_utils import dw_uploader
def construct_qualtrics_contact(result):
return {
"firstName": result["first_name"],
"lastName": result["last_name"],
"email": result["email_address"],
"language": result["language"],
"embeddedData": {"gitlabUserID": result["user_id"]},
}
def get_qualtrics_request_table_name(file_id):
return "".join(x for x in file_id if x.isalpha()).lower()
def should_file_be_processed(file, qualtrics_mailing_lists):
file_name = file.title
_, tab = file_name.split(".")
if tab in qualtrics_mailing_lists:
info(
f"{file_name}: Qualtrics already has mailing list with corresponding name -- not processing."
)
return False
if file.sheet1.title != tab:
error(f"{file_name}: First worksheet did not match expected name of {tab}")
return False
return True
def process_qualtrics_file(
file, is_test, google_sheet_client, schema, qualtrics_client
):
tab = file.sheet1.title
dataframe = google_sheet_client.load_google_sheet(None, file.title, tab)
if list(dataframe.columns.values)[0].lower() != "id":
warning(f"{file.title}: First column did not match expected name of id")
return
if not is_test:
file.sheet1.update_acell("A1", "processing")
engine = snowflake_engine_factory(env, "LOADER", schema)
analytics_engine = snowflake_engine_factory(env, "CI_USER")
table = get_qualtrics_request_table_name(file.id)
dw_uploader(engine, table, dataframe, schema)
query = f"""
SELECT first_name, last_name, email_address, language, user_id
FROM ANALYTICS_SENSITIVE.QUALTRICS_API_FORMATTED_CONTACTS WHERE user_id in
(
SELECT id
FROM RAW.{schema}.{table}
WHERE TRY_TO_NUMBER(id) IS NOT NULL
)
"""
results = []
if not is_test:
results = query_executor(analytics_engine, query)
qualtrics_contacts = [construct_qualtrics_contact(result) for result in results]
final_status = "processed"
if not is_test:
try:
mailing_id = qualtrics_client.create_mailing_list(
env["QUALTRICS_POOL_ID"], tab, env["QUALTRICS_GROUP_ID"]
)
except:
file.sheet1.update_acell(
"A1",
"Mailing list could not be created in Qualtrics. Try changing mailing list name.",
)
raise
else:
error_contacts = qualtrics_client.upload_contacts_to_mailing_list(
env["QUALTRICS_POOL_ID"], mailing_id, qualtrics_contacts
)
error_contacts_ids = [
contact["embeddedData"]["gitlabUserID"] for contact in error_contacts
]
if error_contacts_ids:
final_status = f"{final_status} except {error_contacts_ids}"
if is_test:
info(f"Not renaming file for test.")
else:
file.sheet1.update_acell("A1", final_status)
def qualtrics_loader(load_type: str):
is_test = load_type == "test"
google_sheet_client = GoogleSheetsClient()
prefix = "qualtrics_mailing_list."
if is_test:
prefix = "test_" + prefix
all_qualtrics_files_to_load = [
file
for file in google_sheet_client.get_visible_files()
if file.title.lower().startswith(prefix)
]
schema = "qualtrics_mailing_list"
if not is_test:
qualtrics_client = QualtricsClient(
env["QUALTRICS_API_TOKEN"], env["QUALTRICS_DATA_CENTER"]
)
qualtrics_mailing_lists = [
mailing_list for mailing_list in qualtrics_client.get_mailing_lists()
]
else:
qualtrics_client = None
qualtrics_mailing_lists = []
qualtrics_files_to_load = list(
filter(
lambda file: should_file_be_processed(file, qualtrics_mailing_lists),
all_qualtrics_files_to_load,
)
)
info(f"Found {len(qualtrics_files_to_load)} files to process.")
for file in qualtrics_files_to_load:
process_qualtrics_file(
file, is_test, google_sheet_client, schema, qualtrics_client
)
| python |
# Interactive Help
'''digitando help() no console python
Ou help(print)
ou print(input.__doc__)'''
# Docstrings
'''def contador(i, f, p):
"""==> Faz uma contagem e mostra na tela.
:param i: início da contagem
:param f: fim da contagem
:param p: passo da contagem
:return: sem retorno"""
c = i
while c <= f:
print(f'{c}', end=' ')
c += p
print('FIM')
contador(2, 10, 2)
help(contador)'''
# Parâmetros opcionais
'''def somar(a=0, b=0, c=0):
s = a + b + c
print(f'A soma vale {s}')
somar(3, 2, 5)
somar(8, 4)
somar()'''
# Escopo de Variáveis
'''def teste():
x = 8
print(f'Na função teste, n vale {n}')
print(f'Na função teste, x vale {x}')
n = 2
print(f'No programa principal, n vale {n}')
teste()
print(f'No programa principal, x vale {x}')'''
'''def funcao():
global n2
n1 = 4
n2 = 6
print(f'N1 dentro vale {n1}')
print(f'N2 dentro vale {n2}')
n1 = 2
n2 = 4
funcao()
print(f'N2 fora vale {n2}')
print(f'N1 fora vale {n1}')'''
# Retorno de Valores
'''def somar(a=0, b=0, c=0):
s = a + b + c
return s
resp = somar(3, 2, 5)
resp2 = somar(8, 4)
resp3 = somar()
print(f'Meus cálculos deram {resp}, {resp2}, e {resp3}')'''
# Brincando
'''def fatorial(num=1):
f = 1
for c in range(num, 0, -1):
f *= c
return f
f1 = fatorial(5)
f2 = fatorial(4)
f3 = fatorial()
print(f'Os resultados são {f1}, {f2} e {f3}')
n = int(input('Digite um número: '))
print(f'O fatorial de {n} é igual a {fatorial(n)}')'''
'''def parouimpar(n=0):
if n % 2 == 0:
return True
else:
return False
num = int(input('Digite um número: '))
if parouimpar(num):
print('É par!')
else:
print('Não é par!')'''
| python |
from .sql import SQL
from .sac import SAC
from .drsac import DRSAC
| python |
import jaxopt
import numpy as np
import pandas as pd
import tinygp
import jax
import jax.numpy as jnp
from io import StringIO
import matplotlib.pyplot as plt
from plotting import *
import pickle
from jax.config import config
config.update("jax_enable_x64", True)
bands = 'ugrizY'
N_bands = len(bands)
class Multiband(tinygp.kernels.Kernel):
"""Short summary.
Parameters
----------
time_kernel : type
Description of parameter `time_kernel`.
diagonal : type
Description of parameter `diagonal`.
off_diagonal : type
Description of parameter `off_diagonal`.
Attributes
----------
band_kernel : type
Description of attribute `band_kernel`.
time_kernel
"""
def __init__(self, time_kernel, diagonal, off_diagonal):
ndim = diagonal.size
if off_diagonal.size != ((ndim - 1) * ndim) // 2:
raise ValueError(
"Dimension mismatch: expected "
f"(ndim-1)*ndim/2 = {((ndim - 1) * ndim) // 2} elements in "
f"'off_diagonal'; got {off_diagonal.size}"
)
factor = jnp.zeros((ndim, ndim))
factor = factor.at[jnp.diag_indices(ndim)].add(diagonal)
factor = factor.at[jnp.tril_indices(ndim, -1)].add(off_diagonal)
self.band_kernel = factor @ factor.T
self.time_kernel = time_kernel
def evaluate(self, X1, X2):
t1, b1 = X1
t2, b2 = X2
return self.band_kernel[b1, b2] * self.time_kernel.evaluate(t1, t2)
# Right now the GP DOES NOT have a pad. What it SHOULD have (I think) is the following:
# Define newTime = np.linspace(-30, 150, 100)
# Initialize gpF_err = np.ones(100)
# gpF = np.zeros(100)
# Go band-by-band:
# For band i, get t0 as np.nanmax(-30, min(bandT))
# tf as np.nanmin(150, max(bandT))
# Define gpTime as newTime truncated between t0 and tf, get index i of first value of gpTime in newTime
# Run GP and evaluate at gpTime values
# Fill gpF and gpF_err starting at i for all values in gpTime
# DONE if all goes well, you should have 100 points for every band between -30 and 150 days
# evenly spaced for all observations and without needing to extrapolate.
# then you don't even need to pass in the time parameter for every LC, reducing the dimensionality!
#def gp_withPad(df, savepath='./',plotpath='./', bands='ugrizY', Ntstp=100, ts='0000000', fn='GPSet'):
#will this cause issues on the front end? Potentially. Let's find out.
def gp_withPad(df, savepath='./',plotpath='./', bands='ugrizY', Ntstp=100, ts='0000000', fn='GPSet'):
"""Short summary.
Parameters
----------
df : type
Description of parameter `df`.
savepath : type
Description of parameter `savepath`.
plotpath : type
Description of parameter `plotpath`.
bands : type
Description of parameter `bands`.
ts : type
Description of parameter `ts`.
fn : type
Description of parameter `fn`.
Returns
-------
type
Description of returned object.
"""
#num_bands = len(np.unique(band_idx))
num_bands = len(bands)
GP_dict = {}
#only interpolate from tmin to tmax, and then pad the ends in order to get to 100 points!
for idx, row in df.iterrows():
t = np.array(row["T"])
f = np.array(row["Flux"])
f[f<0.] = 0. #getting rid of negative flux
#the magnitude-like array for the sake of the conversion
y = np.log(f + 1)
yerr = np.array(row["Flux_Err"]) / np.array(row["Flux"])
t_test = np.linspace(np.nanmin(t), np.nanmax(t), Ntstp) #only go from tmin to tmax
band = row["Filter"]
band_idx = pd.Series(row['Filter']).astype('category').cat.codes.values
#padL = Ntstp - len(t_test) #how many observations to we need to tack onto the end?
##generate spacing
#padT = np.arange(padL)+1 #one-day spacing tacked onto the end of the interpolated sequence
#df_T = np.concatenate([t_test, padT])
#matrix = [df_T]
#we shouldn't need to pad -- figure this out later
padL = 0 # don't do any padding for now
matrix = [t_test]
def build_gp(params):
time_kernel = tinygp.kernels.Matern32(jnp.exp(params["log_scale"]))
kernel = Multiband(time_kernel, jnp.exp(params["log_diagonal"]), params["off_diagonal"])
diag = yerr ** 2 + jnp.exp(2 * params["log_jitter"][X[1]])
return tinygp.GaussianProcess(kernel, X, diag=diag, mean=lambda x: params["mean"][x[1]])
#the GP parameters
params = {
"mean": np.zeros(num_bands),
"log_scale": np.log(100.0),
"log_diagonal": np.zeros(num_bands),
"off_diagonal": np.zeros(((num_bands - 1) * num_bands) // 2),
"log_jitter": np.zeros(num_bands),
}
@jax.jit
def loss(params):
return -build_gp(params).condition(y)
X = (t, band_idx)
solver = jaxopt.ScipyMinimize(fun=loss)
soln = solver.run(params)
gp = build_gp(soln.params)
df_t = []
df_flux = []
df_flux_err = []
df_filt = []
if idx%50 == 0:
print("Plotting %i...\n"%idx)
plt.figure(figsize=(10,7))
for n in np.unique(band_idx):
m = band_idx == n
plt.errorbar(t[m], np.exp(y[m])-1,yerr=row['Flux_Err'][m], fmt="o", color=f"C{n}")
mu, var = gp.predict(y, X_test=(t_test, np.full_like(t_test, n, dtype=int)), return_var=True)
std = np.sqrt(var)
if idx%50 == 0:
plt.plot(t_test, np.exp(mu)-1, '.-', ms=2, color=f"C{n}")
plt.fill_between(t_test,np.exp(mu - std)-1, np.exp(mu + std)+1, color=f"C{n}", alpha=0.3, label=bands[n])
#going in order of band here--don't forget it! (ugrizY)
#now pad the end
padF = np.zeros(padL) #one-day spacing tacked onto the end of the interpolated sequence
padFerr = np.ones(padL)
gp_f = np.concatenate([np.exp(mu)-1, padF])
gp_f_err = np.concatenate([std, padFerr])
matrix.append(gp_f)
matrix.append(gp_f_err)
df_t.append(t_test)
df_flux.append(gp_f)
df_flux_err.append(gp_f_err)
df_filt.append([bands[n]]*len(gp_f_err))
if idx%50 == 0:
plotmin = np.nanmin([t_test[0], -30])
plotmax = np.nanmax([t_test[-1], 150])
plt.xlim((plotmin, plotmax))
plt.xlabel("Phase from Trigger (Days)")
plt.ylabel("Flux")
plt.legend()
plt.savefig(plotpath + "/GP_%i.png"%row.CID,dpi=200, bbox_inches='tight')
stacked = np.vstack(matrix)
GP_dict[row.CID] = stacked
# overwrite the original data (not a great solution, but potentially better
# for all the other functions that will use these column names)
df.at[idx, 'T'] = np.concatenate(df_t)
df.at[idx, 'Filter'] = np.concatenate(df_filt)
df.at[idx, 'Flux'] = np.concatenate(df_flux)
df.at[idx, 'Flux_Err'] = np.concatenate(df_flux_err)
#save the dictionary separately just to have them
with open(savepath + '/%s_%i.pkl'%(fn, ts), 'wb') as f:
pickle.dump(GP_dict, f)
return df
def getGPLCs(df, savepath='./',plotpath='./', bands='ugrizY', ts='0000000', fn='GPSet'):
"""Short summary.
Parameters
----------
df : type
Description of parameter `df`.
savepath : type
Description of parameter `savepath`.
plotpath : type
Description of parameter `plotpath`.
bands : type
Description of parameter `bands`.
ts : type
Description of parameter `ts`.
fn : type
Description of parameter `fn`.
Returns
-------
type
Description of returned object.
"""
#num_bands = len(np.unique(band_idx))
Npt = 100
tmin = -30
tmax = 150
num_bands = len(bands)
GP_dict = {}
# make our plots look nice
stylePlots()
for idx, row in df.iterrows():
t = np.array(row["T"])
f = np.array(row["Flux"])
f[f<0.] = 0. #getting rid of negative flux
#the magnitude-like array for the sake of the conversion
y = np.log(f + 1)
yerr = np.array(row["Flux_Err"]) / np.array(row["Flux"])
t_test = np.linspace(tmin, tmax, Npt)
band = row["Filter"]
band_idx = pd.Series(row['Filter']).astype('category').cat.codes.values
matrix = [t_test]
def build_gp(params):
time_kernel = tinygp.kernels.Matern32(jnp.exp(params["log_scale"]))
kernel = Multiband(time_kernel, jnp.exp(params["log_diagonal"]), params["off_diagonal"])
diag = yerr ** 2 + jnp.exp(2 * params["log_jitter"][X[1]])
return tinygp.GaussianProcess(kernel, X, diag=diag, mean=lambda x: params["mean"][x[1]])
#the GP parameters
@jax.jit
def loss(params):
return -build_gp(params).condition(y)
X = (t, band_idx)
solver = jaxopt.ScipyMinimize(fun=loss)
soln = solver.run(params)
gp = build_gp(soln.params)
df_t = []
df_flux = []
df_flux_err = []
df_filt = []
if idx%50 == 0:
plt.figure(figsize=(10,7))
for n in np.unique(band_idx):
m = band_idx == n
plt.errorbar(t[m], np.exp(y[m])-1,yerr=row['Flux_Err'][m], fmt="o", color=f"C{n}")
mu, var = gp.predict(y, X_test=(t_test, np.full_like(t_test, n, dtype=int)), return_var=True)
std = np.sqrt(var)
if idx%50 == 0:
plt.plot(t_test, np.exp(mu)-1, '.-', ms=2, color=f"C{n}")
plt.fill_between(t_test,np.exp(mu - std)-1, np.exp(mu + std)+1, color=f"C{n}", alpha=0.3, label=bands[n])
#going in order of band here--don't forget it!
matrix.append(np.exp(mu)-1)
matrix.append(std)
if idx%50 == 0:
plt.xlim((t_test[0], t_test[-1]))
plt.xlabel("Phase from Trigger (Days)")
plt.ylabel("Flux")
plt.legend()
plt.savefig(plotpath + "/GP_%i.png"%row.CID,dpi=200, bbox_inches='tight')
stacked = np.vstack(matrix)
GP_dict[row.CID] = stacked
with open(savepath + '/%s_%i.pkl'%(fn, ts), 'wb') as f:
pickle.dump(GP_dict, f)
return GP_dict
| python |
# A function can return only one value.
#
# If the value is a tuple ...
# the effect is the same as returning multiple values.
# Quontient & Reminder:
#
# To compute the quontient and reminders it is better to ...
# compute both at the same time.
quot = 7//3
rem = 7%3
assert (quot, rem) == (2, 1)
quot, rem = divmod(7, 3) # built-in function
assert (quot, rem) == (2, 1)
# Function arguments:
#
# Functions can take a variable number of arguments.
# A parameter name that begins with * gathers arguments into a tuple.
t = (7, 3)
# divmod(t)
# TypeError: divmod expected 2 arguments, got 1
assert divmod(*t) == (2, 1) # it works! | python |
from llvmlite.ir import IdentifiedStructType
from rial.ir.metadata.StructDefinition import StructDefinition
class RIALIdentifiedStructType(IdentifiedStructType):
definition: StructDefinition
module_name: str
def __init__(self, context, name, packed=False):
super().__init__(context, name, packed)
self.definition = None
self.module_name = ""
| python |
# -*- coding: cp1254 -*-
#if external software is used for Analysis (Excel,Weka, R. etc)
#This script Convert excel file to raster (susceptibility map) and calculate ROC
#The excel file must be include x and y coordinates and Probability values as z
#To calculate AUC test and train data required. They were calculated with DATA PREPARATION script
#Ali POLAT (2018)
#////////////////////IMPORTING THE REQUIRED LIBRARIES/////////////////////////
import arcpy, os
from arcpy.sa import *
arcpy.env.overwriteOutput = True
from matplotlib import pyplot as plt
#////////////////////////////Getting Input Parameters//////////////////////////
out_folder_path=arcpy.GetParameterAsText(0)#The folder including exported files
exc=arcpy.GetParameterAsText(1)##excel file
train_1=arcpy.GetParameterAsText(2)#Train data where is in Rec_folder as train_1.shp
test_1=arcpy.GetParameterAsText(3)#Validation data where is in Rec_folder as test_1.shp
koordinat=arcpy.GetParameterAsText(4)#Coordinate system of map
raster_name=arcpy.GetParameterAsText(5)#The name of LSM map
cell_size=arcpy.GetParameterAsText(6)#Cell size
field=arcpy.GetParameterAsText(7)#probability field name. The column name including probability values. Defaults is "ones".
#////////////////////////////////////Starting Analysis/////////////////////////
arcpy.AddMessage(field)
arcpy.env.workspace=out_folder_path
arcpy.CreateFileGDB_management(out_folder_path, "g.gdb")
arcpy.AddMessage("{} file is imported".format(exc))
arcpy.ExcelToTable_conversion(exc,"g.gdb")
arcpy.MakeXYEventLayer_management("g.dbf","point_x","point_y","deneme",koordinat,field)
arcpy.FeatureToRaster_conversion("deneme",field,raster_name,cell_size)
arcpy.AddMessage("Susceptibility map is saved as {}".format(raster_name))
#///////////////////Calculating AUC Values/////////////////////////////////////
arcpy.AddMessage("ROC is calculating")
mx=float (arcpy.GetRasterProperties_management (raster_name, "MAXIMUM").getOutput (0))
mn=float (arcpy.GetRasterProperties_management (raster_name, "MINIMUM").getOutput (0))
e=(float(mx)-float(mn))/100
d=[]
x=0
y=0
z=0
for f in range (100):
x=x+1
y=mn+e
z=z+mn
q=[]
q.append(z)
q.append(y)
q.append(x)
d.append(q)
mn=y
z=0
total=Reclassify(raster_name,"VALUE",RemapRange(d),"NODATA")
total_exp="total.tif"
total.save(total_exp)
trn=ExtractByMask(total,train_1)
train_exp="train.tif"
trn.save(train_exp)
tes=ExtractByMask(total,test_1)
test_exp="test.tif"
tes.save(test_exp)
##.............................................
arcpy.AddField_management(total_exp,"total","DOUBLE")
arcpy.AddField_management(total_exp,"NID","LONG")
block="""rec=0
def yaz():
global rec
pstart=1
pinterval=1
if(rec==0):
rec=pstart
else:
rec+=pinterval
return rec"""
expression="yaz()"
arcpy.CalculateField_management(total_exp,"NID",expression,"PYTHON",block)
lst_nid=list()
with arcpy.da.SearchCursor(total_exp,"NID") as dd:
for row in dd:
lst_nid.append(row[0])
del row
del dd
mx=max(lst_nid)
crs=arcpy.da.InsertCursor(total_exp,["NID"])
for i in range(mx+1,101):
crs.insertRow("0")
arcpy.CalculateField_management(total_exp,"NID",expression,"PYTHON",block)
lst_value=[]
lst_count=[]
lst_nc=[]
lst_nid_2=[]
sc_fields="value","count","total","NID"
with arcpy.da.SearchCursor(total_exp,sc_fields) as scur:
for row in scur:
lst_value.append(row[0])
lst_count.append(row[1])
lst_nc.append(row[2])
lst_nid_2.append(row[3])
del row
for i in range(len(lst_nid_2)):
if lst_value[i]!=i+1:
lst_value.insert(i,0)
h=0
for k in range (len(lst_nid_2)):
if lst_value[k]!=lst_nid_2[k]:
d=lst_count.insert(lst_nid_2[k]-1,0)
with arcpy.da.UpdateCursor(total_exp,"total") as ucur:
for row in ucur:
row[0]=lst_count[h]
ucur.updateRow(row)
h=h+1
del row
##...........................................................................
arcpy.AddField_management(train_exp,"train","DOUBLE")
arcpy.AddField_management(train_exp,"NID","LONG")
block="""rec=0
def yaz():
global rec
pstart=1
pinterval=1
if(rec==0):
rec=pstart
else:
rec+=pinterval
return rec"""
expression="yaz()"
arcpy.CalculateField_management(train_exp,"NID",expression,"PYTHON",block)
lst_nid=list()
with arcpy.da.SearchCursor(train_exp,"NID") as dd:
for row in dd:
lst_nid.append(row[0])
del row
del dd
mx=max(lst_nid)
crs=arcpy.da.InsertCursor(train_exp,["NID"])
for i in range(mx+1,101):
crs.insertRow("0")
arcpy.CalculateField_management(train_exp,"NID",expression,"PYTHON",block)
lst_value=[]
lst_count=[]
lst_nc=[]
lst_nid_2=[]
sc_fields="value","count","train","NID"
with arcpy.da.SearchCursor(train_exp,sc_fields) as scur:
for row in scur:
lst_value.append(row[0])
lst_count.append(row[1])
lst_nc.append(row[2])
lst_nid_2.append(row[3])
del row
for i in range(len(lst_nid_2)):
if lst_value[i]!=i+1:
lst_value.insert(i,0)
h=0
for k in range (len(lst_nid_2)):
if lst_value[k]!=lst_nid_2[k]:
d=lst_count.insert(lst_nid_2[k]-1,0)
with arcpy.da.UpdateCursor(train_exp,"train") as ucur:
for row in ucur:
row[0]=lst_count[h]
ucur.updateRow(row)
h=h+1
del row
##...........................................................
arcpy.AddField_management(test_exp,"test","DOUBLE")
arcpy.AddField_management(test_exp,"NID","LONG")
block="""rec=0
def yaz():
global rec
pstart=1
pinterval=1
if(rec==0):
rec=pstart
else:
rec+=pinterval
return rec"""
expression="yaz()"
arcpy.CalculateField_management(test_exp,"NID",expression,"PYTHON",block)
lst_nid=list()
with arcpy.da.SearchCursor(test_exp,"NID") as dd:
for row in dd:
lst_nid.append(row[0])
del row
del dd
mx=max(lst_nid)
crs=arcpy.da.InsertCursor(test_exp,["NID"])
for i in range(mx+1,101):
crs.insertRow("0")
arcpy.CalculateField_management(test_exp,"NID",expression,"PYTHON",block)
lst_value=[]
lst_count=[]
lst_nc=[]
lst_nid_2=[]
sc_fields="value","count","test","NID"
with arcpy.da.SearchCursor(test_exp,sc_fields) as scur:
for row in scur:
lst_value.append(row[0])
lst_count.append(row[1])
lst_nc.append(row[2])
lst_nid_2.append(row[3])
del row
for i in range(len(lst_nid_2)):
if lst_value[i]!=i+1:
lst_value.insert(i,0)
h=0
for k in range (len(lst_nid_2)):
if lst_value[k]!=lst_nid_2[k]:
d=lst_count.insert(lst_nid_2[k]-1,0)
with arcpy.da.UpdateCursor(test_exp,"test") as ucur:
for row in ucur:
row[0]=lst_count[h]
ucur.updateRow(row)
h=h+1
del row
##..........................................................................
arcpy.JoinField_management(total_exp,"NID",train_exp,"NID","train")
arcpy.JoinField_management(total_exp,"NID",test_exp,"NID","test")
#/////////////////Calculating Sum of Cumulative ///////////////////////////////
arcpy.AddField_management(total_exp,"kum_total","DOUBLE")
arcpy.AddField_management(total_exp,"kum_train","DOUBLE")
arcpy.AddField_management(total_exp,"kum_test","DOUBLE")
block2="""rec=0
def kum_tot(r):
global rec
pstart=r
pinterval=r
if(rec==0):
rec=pstart
else:
rec+=pinterval
return rec"""
expression2="kum_tot(!total!)"
arcpy.CalculateField_management(total_exp,"kum_total",expression2,"PYTHON",block2)
arcpy.CalculateField_management(total_exp,"kum_train","kum_tot(!train!)","PYTHON",block2)
arcpy.CalculateField_management(total_exp,"kum_test","kum_tot(!test!)","PYTHON",block2)
tot_fields="kum_total","kum_train","kum_test"
lst_tot=[]
lst_tr=[]
lst_tst=[]
with arcpy.da.SearchCursor(total_exp,tot_fields) as scur2:
for row in scur2:
lst_tot.append(row[0])
lst_tr.append(row[1])
lst_tst.append(row[2])
del row
del scur2
toplam_tot=max(lst_tot)
toplam_tr=max(lst_tr)
toplam_tst=max(lst_tst)
##......................................................................
arcpy.AddField_management(total_exp,"c_tot","DOUBLE")
arcpy.AddField_management(total_exp,"c_tr","DOUBLE")
arcpy.AddField_management(total_exp,"c_tst","DOUBLE")
c="kum_total","kum_train","kum_test","c_tot","c_tr","c_tst"
with arcpy.da.UpdateCursor(total_exp,c) as ucur2:
for row in ucur2:
v=row[0]/toplam_tot
k=row[1]/toplam_tr
l=row[2]/toplam_tst
row[3]=1-v
row[4]=1-k
row[5]=1-l
ucur2.updateRow(row)
y="c_tot","c_tr","c_tst"
tot=[]
tr=[]
ts=[]
with arcpy.da.SearchCursor(total_exp,y) as scur2:
for row in scur2:
tot.append(row[0])
tr.append(row[1])
ts.append(row[2])
del row
del scur2
tot.insert(0,1)
tr.insert(0,1)
ts.insert(0,1)
tr_son=[]
ts_son=[]
for i in range(100):
b=tot[i]-tot[i+1]
n=tr[i]
m=ts[i]
p=b*n
t=b*m
tr_son.append(p)
ts_son.append(t)
f=round(sum(tr_son)*100,2)
g=round(sum(ts_son)*100,2)
arcpy.AddMessage("Success rate is: {}".format(sum(tr_son)*100))
arcpy.AddMessage("prediction rate is: {}".format(sum(ts_son)*100))
sc=plt.plot(tot,tr,color="red",label=":Success Rate"+"("+str(f)+")")
#///////////////////////////////AUC graph is plotting//////////////////////////
pr=plt.plot(tot,ts,color="blue",label=":Prediction Rate"+"("+str(g)+")")
plt.xlabel("1-Specifity")
plt.ylabel("Sensitivity")
plt.legend(loc="lower right")
arcpy.AddMessage("AUC Graph is saved as auc.png")
auc_graph=os.path.join(out_folder_path,"auc.png")
plt.savefig(auc_graph,dpi=150)
plt.close("all")
arcpy.AddMessage("FINISHED")
#//////////////////////////FINISHED////////////////////////////////////////////
| python |
objConstructors = {'dyn_vals.get' : {'constructor' : 'DynamicValuec',
'type' : 'DynamicValuei',
'fields' : ['bucket_size', 'bucket_time']}}
typeConstructors = {'DynamicValuec' : 'DynamicValuei'}
stateObjects = {'flow_emap' : emap,
'dyn_vals' : vector}
| python |
"""
This is CoLA Bot code that uses slurk interface.
CoLA bot handles the dialogue between two players
who need to collaborate togther to solve a task.
In each game room, we show the players - images, text
information, logical rules. They need to discuss together
and reach an agreement.
So, the two important commands here are:
/answer: provide a description / reasoning
/agree: If you agree with other player's answer.
"""
# import packages
import configparser
import os
import json
import random
import sys
import string
import argparse
from threading import Timer
import requests
from time import sleep
from socketIO_client import SocketIO, BaseNamespace
from game_db import ColaGameDb
# Global variables
TASK_ID = None
# --- class implementation --------------------------------------------------------
# ChatNamespace
# ---------------------------------------------------------------------------------
class ChatNamespace(BaseNamespace):
""" Moderates dialogues between players and handles the commands in the game"""
# Called when connected
def __init__(self, io, path):
super().__init__(io, path)
self.WAITING_TIMER = Timer(1, print, args=["Timer"])
self.id = None
self.COLA_GAME_DB = []
self.emit('ready')
def on_text_message(self, data):
if data['user']['name'] != 'Cola Bot':
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
if data['msg'] == "ready":
self._command_ready(data)
elif data["msg"].startswith("answer"):
data["command"] = data["msg"]
self._command_answer(data)
elif data["msg"] == "agree":
self._command_agree(data)
elif data["msg"] == "noreply" or data["msg"] == "no reply":
self._command_noreply(data)
each_room_db.count_msg += 1
def on_new_task_room(self, data):
"""
This gets called as soon as new task (cola) room is created.
:param
data: A dict. Information about the new room.
"""
#global COLA_GAME_DB
print("new task room: ", data)
# As a new room opens, an instance of cola game class is created
cola_db = ColaGameDb(data['room'])
# add both players information
for user in data['users']:
cola_db.add_users(user)
# Generate the data for each game instance
print("generate data every time cola is called")
cola_db.generate_cola_data()
self.WAITING_TIMER.cancel()
cola_db.ready_timer = Timer(60*1, self.emit, args=['text',
{
'msg': "Are you ready? Please type **/ready** to begin the game.",
'room': cola_db.room,
'html': True
}
])
cola_db.ready_timer.start()
# Keeping information ofall the rooms i.e. each instance of COLA_GAME_DB class
self.COLA_GAME_DB.append(cola_db)
self.emit("join_room", {'user': self.id, 'room': data['room']}) # join cola
sys.stdout.flush()
# --- public functions -----------------------------------------------------------------
def on_joined_room(self, data):
""" This is called once, when the bot joins a room.
:param
data: A dict. All information about the new room.
"""
self.id = data['user']
#global COLA_GAME_DB
# Search for the correct database (accoording to the actual room)
for cola_db in self.COLA_GAME_DB:
if data['room'] == cola_db.room:
cola_db.add_users(data['user'])
print("on_joined_room", data)
sys.stdout.flush()
# Send a welcome message to both users (via the room-id).
if data['room'] != "waiting_room":
# Welcome message for the cola room #
sleep(.5)
self.emit('text', {'msg': ' **Welcome to the CoLa Game!**'
' Discussion and providing reason(s)'
' for your answer is crucial for this game.',
'room': data['room'],
'html': True})
sleep(.5)
self.emit('text', {'msg': ' Remember the following commands to play the game:'
' \n\n(1) Propose answer to your partner: Type "/answer'
' ...your description here...".'
' \n\n(2) Agree on the answer proposed by your partner:'
' Type "/agree".\n\n',
'room': data['room'],
'html': True})
sleep(.5)
self.emit('text', {'msg': ' Please type **/ready** to begin the game.',
'room': data['room'],
'html': True})
sleep(.5)
self.emit('set_text',{'room': data['room'],
'id': "status-box",
'text': 'Please type /ready to begin the game.'})
def on_command(self, data):
print("on_command", data)
sys.stdout.flush()
if data["command"].startswith("ready"):
self._command_ready(data)
elif data["command"].startswith("answer"):
self._command_answer(data)
elif data["command"].startswith("agree"):
self._command_agree(data)
elif data["command"].startswith("noreply"):
self._command_noreply(data)
#elif data["command"].startswith("change"):
# self._command_change(data)
else:
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
all_players = each_room_db.players
self_id = [all_players[i]['id'] for i in range(0, len(all_players))
if data['user']['id'] == all_players[i]['id']]
self.emit('text',
{
'msg': '{} is not a valid command. '.format(data["command"]),
'receiver_id': self_id,
'room': data['room']
})
def _command_ready(self, data):
""" Test slash command skills of the players """
print("_command_ready", data)
sys.stdout.flush()
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
self_id = [player['id'] for player in each_room_db.players
if player['id'] == data['user']['id']]
other_user = [player['id'] for player in each_room_db.players
if player['id'] != data['user']['id']]
if not each_room_db.ready_id:
each_room_db.ready_id.add(self_id[0])
self.emit('text', {
'msg': 'Now, waiting for your partner to type /ready. ',
'receiver_id': self_id[0],
'room': each_room_db.room
})
each_room_db.ready_timer.cancel()
each_room_db.ready_timer = Timer(60*.5,
self.emit,
args=['text', {
'msg': "Your partner is ready. Please, also type /ready!",
'room': each_room_db.room,
'receiver_id': other_user
}
]
)
each_room_db.ready_timer.start()
elif self_id[0] not in each_room_db.ready_id and len(each_room_db.ready_id) == 1:
# game starts #
self.emit('text', {
'msg': 'Woo-Hoo! Game begins now. ',
'room': each_room_db.room})
each_room_db.ready_id.add(self_id[0])
each_room_db.ready_flag = True
each_room_db.first_answer = False
self.on_show_and_query(each_room_db)
each_room_db.ready_timer.cancel()
# conversation timer starts
each_room_db.conversation_timer = Timer(60*5,
self.emit,
args=['text',
{
'msg': 'You both seem to be having a discussion for a '
'long time. Could you reach an agreement and '
'provide an answer?',
'room': each_room_db.room
}
]
)
each_room_db.conversation_timer.start()
elif self_id[0] in each_room_db.ready_id:
self.emit('text', {
'msg': 'You have already typed /ready. ',
'receiver_id': self_id[0],
'room': each_room_db.room})
def on_show_and_query(self, game_room_db):
"""
Start the game by showing the images and asking questions
:param data: current room database dict
:return:
"""
# start the game and update the current state of game
# pop-out the current question from room data
curr_data = game_room_db.room_data.pop(0)
game_room_db.current_state = curr_data
print(curr_data)
sys.stdout.flush()
self.emit('set_attribute',
{
'room': game_room_db.room,
'id': "current-image",
'attribute': "src",
'value': curr_data['data']
})
self.emit('set_text',
{
'room': game_room_db.room,
'id': "status-box",
'text': curr_data['question']
})
def _command_answer(self, data):
"""
Providing your own (individual player's) answer / reason
:param data: dict of user data
:return:
"""
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
all_players = each_room_db.players
self_id = [all_players[i]['id'] for i in range(0, len(all_players))
if data['user']['id'] == all_players[i]['id']]
if not each_room_db.first_answer and each_room_db.count_msg < 5:
self.emit('text',
{
'msg': 'There is no discussion so far. You should discuss first, then suggest and update'
' your answers.',
'receiver_id': self_id[0],
'room': each_room_db.room
})
elif not each_room_db.ready_flag:
self.emit('text',
{
'msg': 'Both players have not typed /ready yet. ',
'receiver_id': self_id[0],
'room': each_room_db.room
})
elif not each_room_db.game_over_status:
sent_id = [all_players[i]['id'] for i in range(0, len(all_players))
if data['user']['id'] != all_players[i]['id']]
self_name = [all_players[i]['name'] for i in range(0, len(all_players))
if data['user']['id'] == all_players[i]['id']]
self_id = [all_players[i]['id'] for i in range(0, len(all_players))
if data['user']['id'] == all_players[i]['id']]
proposal = " ".join(data['command'].split("answer ")[1:]).strip()
if proposal:
each_room_db.answer_status = True
self.emit('text', {'msg': 'The current proposal from '
'{} is **"{}"** '.format(self_name[0]
, proposal),
'room': each_room_db.room,
'html': True})
each_room_db.curr_player_ans_id = self_id[0]
self.emit('text', {'msg': 'Do you agree with your partner\'s answer?'
' If not, please continue the discussion.',
'receiver_id': sent_id[0],
'room': each_room_db.room})
else:
self.emit('text', {
'msg': 'This command cannot be processed.\n\n Answer comes with a'
' description, for example, /answer This is a... because '
'...your description here...',
'receiver_id': self_id[0],
'room': each_room_db.room,
'html': True})
else:
self.emit('text', {
'msg': 'Cannot process this command. The game is already finished.'
' ',
'room': each_room_db.room})
def _command_agree(self, data):
"""
Function where one player can agree to another player's answer
new query automatically begins or the game ends.
:param data:
:return:
"""
#global COLA_GAME_DB
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
# ID of the user #
all_players = each_room_db.players
self_id = [all_players[i]['id'] for i in range(0, len(all_players))
if data['user']['id'] == all_players[i]['id']]
if not each_room_db.ready_flag:
self.emit('text', {
'msg': 'Both players have not typed /ready yet. ',
'receiver_id': self_id[0],
'room': each_room_db.room})
elif each_room_db.room_data:
if each_room_db.answer_status:
if self_id[0] == each_room_db.curr_player_ans_id:
self.emit('text', {
'msg': 'You cannot agree to your own answer. ',
'receiver_id': self_id[0],
'room': each_room_db.room})
return
# if the game list is non-empty, the game continues.
self.emit('text', {
'msg': 'Bravo! You have now moved to the next round. ',
'room': each_room_db.room})
# timer cancels
each_room_db.conversation_timer.cancel()
self.on_show_and_query(each_room_db)
each_room_db.answer_status = False
each_room_db.count_msg = 0
else:
self.emit('text',
{'msg': 'This command cannot be processed. You have not'
' started discussion with your partner. You have to '
'propose answers to each other and reach an agreement.',
'receiver_id': self_id[0],
'room': each_room_db.room})
else:
# as soon as the list is empty, game end #
if each_room_db.game_over_status is False and\
each_room_db.answer_status is False:
self.emit('text',
{'msg': 'This command cannot be processed. You have not '
'started discussion with your partner. You have to '
'propose answers to each other and reach an agreement.'
' ',
'receiver_id': self_id[0],
'room': each_room_db.room})
elif each_room_db.game_over_status is False and\
each_room_db.answer_status is True:
if self_id[0] == each_room_db.curr_player_ans_id:
self.emit('text', {
'msg': 'You cannot agree to your own answer. ',
'receiver_id': self_id[0],
'room': each_room_db.room})
return
self.game_over(each_room_db.room)
each_room_db.game_over_status = True
elif each_room_db.game_over_status is True:
self.emit('text', {
'msg': 'Cannot process this command. The game is already finished.'
' ',
'room': each_room_db.room})
else:
print("Something is wrong!!!")
# self.game_over(data)
# message to end the game #
def game_over(self, room):
""" Called when game gets over and token is genrated for """
#global COLA_GAME_DB
self.emit('text', {'msg': 'Please enter the following token into' \
' the field on the HIT webpage, and close this' \
' browser window. ', 'room': room})
amt_token = self.confirmation_code(room)
self.emit('text', {'msg': 'Here\'s your token: {}'.format(f'{amt_token}'),
'room': room})
self.close_game(room)
# message to end the game #
def no_partner(self, room):
""" Called when game gets over and token is genrated for """
#global COLA_GAME_DB
self.emit('text', {'msg': 'Unfortunately we could not find a partner for you!', 'room': room})
self.emit('text', {'msg': 'Please enter the following token into' \
' the field on the HIT webpage, and close this' \
' browser window. ', 'room': room})
amt_token = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
status_txt = 'no_partner'
self.emit('log', {'room': room, 'type': "confirmation_log", 'amt_token':amt_token, 'status_txt':status_txt})
self.emit('text', {'msg': 'Here\'s your token: {}'.format(f'{amt_token}'),
'room': room})
self.close_game(room)
def confirmation_code(self, room):
""" Generate AMT token that will be sent to each player """
amt_token = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
status_txt = 'success'
#token_log = random.choices(string.ascii_uppercase + string.digits, k=6)
self.emit('log', {'room': room, 'type': "confirmation_log", 'amt_token':amt_token, 'status_txt':status_txt})
return amt_token
def _command_noreply(self, data):
""" If the partner does not reply """
#global COLA_GAME_DB
for each_room_db in self.COLA_GAME_DB:
if data['room'] == each_room_db.room:
room = each_room_db.room
# ID of the user #
all_players = each_room_db.players
self_id = [player['id'] for player in all_players
if data['user']['id'] == player['id']]
other_id = [player['id'] for player in all_players
if player['id'] != data['user']['id']]
# generate AMT token that will be sent to each player
amt_token = ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
status_txt = 'no_reply'
self.emit('log', {'room': room, 'type': "confirmation_log", 'amt_token':amt_token, 'status_txt':status_txt})
self.emit('text', {'msg': 'Here\'s your token: {}'.format(f'{amt_token}'),
'room': room,
'receiver_id': self_id[0]})
self.emit('text', {'msg': 'Your partner closed the game, because you were not responding for a while.',
'room': room,
'receiver_id': other_id[0]})
self.close_game(room)
def close_game(self, room):
self.emit('text', {'msg': 'The game is over! Thank you for your participation!',
'room': room})
self.emit('set_attribute', {
'room': room,
'id': "type-area",
'attribute': "style",
'value': 'visibility:hidden'
})
if room != "waiting_room":
response = requests.put(f"{uri}/room/{room}",
headers={'Authorization': f"Token {token}"},
json=dict(read_only=True)
)
print(response)
sys.stdout.flush()
for each_room_db in self.COLA_GAME_DB:
each_room_db.game_closed = True
if room == each_room_db.room:
if each_room_db.ready_timer:
each_room_db.ready_timer.cancel()
if each_room_db.conversation_timer:
each_room_db.conversation_timer.cancel()
if each_room_db.answer_timer:
each_room_db.answer_timer.cancel()
if each_room_db.join_timer:
each_room_db.join_timer.cancel()
# all_players = each_room_db.players
# for player in all_players:
# print(player)
# sys.stdout.flush()
# self.emit("leave_room", {'user': player["id"], 'room': room})
# user_id = player["id"]
# response = requests.get(f"{uri}/user/{user_id}",
# headers={'Authorization': f"Token {token}"})
# print(response.text)
# sys.stdout.flush()
# user_token = response.json()["token"]
# response = requests.delete(f"{uri}/token/{user_token}",
# headers={'Authorization': f"Token {token}"})
# print(response)
# sys.stdout.flush()
def on_status(self, data):
""" determine join/leave/rejoin status and display corresponding messages """
#global COLA_GAME_DB
print("status:", data)
sys.stdout.flush()
# If this function is called because a player joins the room ...
# Occurs when the player re-joins the room
if data['type'] == "join":
if data['room'] == "waiting_room":
if not self.WAITING_TIMER.is_alive():
self.WAITING_TIMER = Timer(5*60,
self.no_partner,
args=[data['room']]
)
self.WAITING_TIMER.start()
else:
# ... find the correct database.
for each_room_db in self.COLA_GAME_DB:
if each_room_db.room == data['room']:
# update the display for the rejoined user.
curr_data = each_room_db.current_state
if curr_data is not None:
rejoin_timer = Timer(3*1, self.emit, args=['set_attribute',
{
'room':data['room'],
'id': "current-image",
'attribute': "src",
'value': curr_data['data'],
'receiver_id': data['user']['id']
}
])
rejoin_timer.start()
rejoin_timer2 = Timer(3*1, self.emit, args=['set_text',
{
'room': data['room'],
'id': "status-box",
'text': curr_data['question'],
'receiver_id': data['user']['id']
}
])
rejoin_timer2.start()
other_user = [player for player in each_room_db.players
if player['id'] != data['user']['id']]
user_name = data['user']['name']
# Send a message to the other user, that the current user has
# rejoined the chat.
self.emit('text',
{
'msg': f'{user_name} has rejoined the game.',
'room': each_room_db.room,
'receiver_id': other_user[0]['id']
})
# If this function is called because a player left the room ...
if data['type'] == "leave":
for each_room_db in self.COLA_GAME_DB:
# ... find the correct database.
if each_room_db.room == data['room']:
# if data['user']['token']['task'] is not None:
other_user = [player for player in each_room_db.players if
player['id'] != data['user']['id']]
user_name = data['user']['name']
# Send a message to the other user, that the current user has left the chat.
self.emit('text', {'msg': f'{user_name} has left the game. Please wait a '
f'bit, your partner may rejoin.',
'room': each_room_db.room,
'receiver_id': other_user[0]['id']})
if __name__ == '__main__':
print("bot started")
parser = argparse.ArgumentParser(description='Cola Bot')
if 'TOKEN' in os.environ:
token = {'default': os.environ['TOKEN']}
else:
token = {'required': True}
if 'CHAT_HOST' in os.environ:
chat_host = {'default': os.environ['CHAT_HOST']}
else:
chat_host = {'default': 'http://localhost'}
if 'CHAT_PORT' in os.environ:
chat_port = {'default': os.environ['CHAT_PORT']}
else:
chat_port = {'default': None}
if 'COLA_TASK_ID' in os.environ:
task_id = {'default': os.environ['COLA_TASK_ID']}
else:
task_id = {'default': None}
parser.add_argument('-t', '--token',
help='token for logging in as bot (see SERVURL/token)',
**token)
parser.add_argument('-c', '--chat_host',
help='full URL (protocol, hostname; ending with /) of chat server',
**chat_host)
parser.add_argument('-p', '--chat_port',
type=int,
help='port of chat server',
**chat_port)
parser.add_argument('--task_id',
type=int,
help='Task to join',
**task_id)
args = parser.parse_args()
TASK_ID = args.task_id
uri = args.chat_host
if args.chat_port:
uri += f":{args.chat_port}"
print("running cola bot on", uri, "with token", args.token)
sys.stdout.flush()
uri += "/api/v2"
token = args.token
# We pass token and name in request header
socketIO = SocketIO(args.chat_host, args.chat_port,
headers={'Authorization': args.token, 'Name': 'Cola Bot'},
Namespace=ChatNamespace)
socketIO.wait()
| python |
from pykitml.testing import pktest_graph, pktest_nograph
@pktest_graph
def test_adult():
import os.path
import numpy as np
import pykitml as pk
from pykitml.datasets import adult
# Download the dataset
if(not os.path.exists('adult.data.pkl')): adult.get()
# Load adult data set
inputs_train, outputs_train, inputs_test, outputs_test = adult.load()
# Normalize dataset
array_min, array_max = pk.get_minmax(inputs_train)
inputs_train = pk.normalize_minmax(inputs_train, array_min, array_max, cols=[0, 2, 9, 10, 11])
inputs_test = pk.normalize_minmax(inputs_test, array_min, array_max, cols=[0, 2, 9, 10, 11])
# Convert categorical values to one-hot values
inputs_train, inputs_test = pk.onehot_cols_traintest(inputs_train, inputs_test, cols=[1, 3, 4, 5, 6, 7, 8, 9, 12])
# Create model
adult_classifier = pk.LogisticRegression(104, 1)
# Train the model
adult_classifier.train(
training_data=inputs_train,
targets=outputs_train,
batch_size=10,
epochs=1500,
optimizer=pk.Adam(learning_rate=0.015, decay_rate=0.99),
testing_data=inputs_test,
testing_targets=outputs_test,
testing_freq=30,
decay_freq=40
)
# Save it
pk.save(adult_classifier, 'adult_classifier.pkl')
# Plot performance
adult_classifier.plot_performance()
# Print accuracy
accuracy = adult_classifier.accuracy(inputs_train, outputs_train)
print('Train accuracy:', accuracy)
accuracy = adult_classifier.accuracy(inputs_test, outputs_test)
print('Test accuracy:', accuracy)
# Plot confusion matrix
adult_classifier.confusion_matrix(inputs_test, outputs_test)
# Assert if it has enough accuracy
assert adult_classifier.accuracy(inputs_test, outputs_test) >= 82
if __name__ == '__main__':
try:
test_adult.__wrapped__()
except AssertionError:
pass | python |
#!/usr/bin/env python
# encoding: utf-8
class OsfStorageError(Exception):
pass
class PathLockedError(OsfStorageError):
pass
class SignatureConsumedError(OsfStorageError):
pass
class VersionNotFoundError(OsfStorageError):
pass
class SignatureMismatchError(OsfStorageError):
pass
class VersionStatusError(OsfStorageError):
pass
class DeleteError(OsfStorageError):
pass
class UndeleteError(OsfStorageError):
pass
class InvalidVersionError(OsfStorageError):
pass
class MissingFieldError(OsfStorageError):
pass
class InvalidPath(OsfStorageError):
pass
| python |
"""
Suggest types for untyped code.
"""
import ast
from collections import defaultdict
from dataclasses import dataclass, field
from types import FunctionType
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple, Union
from .safe import safe_getattr, safe_isinstance
from .error_code import ErrorCode
from .node_visitor import Failure, ErrorContext
from .value import (
NO_RETURN_VALUE,
AnnotatedValue,
AnySource,
AnyValue,
CallableValue,
CanAssignError,
GenericValue,
KnownValue,
SequenceIncompleteValue,
SubclassValue,
TypedDictValue,
TypedValue,
Value,
MultiValuedValue,
VariableNameValue,
replace_known_sequence_value,
stringify_object,
unite_values,
)
from .signature import Signature
CallArgs = Mapping[str, Value]
FunctionNode = Union[ast.FunctionDef, ast.AsyncFunctionDef]
@dataclass
class CallableData:
node: FunctionNode
ctx: ErrorContext
sig: Signature
calls: List[CallArgs] = field(default_factory=list)
def check(self) -> Iterator[Failure]:
if not self.calls:
return
for param in _extract_params(self.node):
if param.annotation is not None:
continue
sig_param = self.sig.parameters.get(param.arg)
if sig_param is None or not isinstance(sig_param.annotation, AnyValue):
continue # e.g. inferred type for self
all_values = [call[param.arg] for call in self.calls]
all_values = [prepare_type(v) for v in all_values]
all_values = [v for v in all_values if not isinstance(v, AnyValue)]
if not all_values:
continue
suggested = unite_values(*all_values)
if not should_suggest_type(suggested):
continue
detail, metadata = display_suggested_type(suggested)
failure = self.ctx.show_error(
param,
f"Suggested type for parameter {param.arg}",
ErrorCode.suggested_parameter_type,
detail=detail,
# Otherwise we record it twice in tests. We should ultimately
# refactor error tracking to make it less hacky for things that
# show errors outside of files.
save=False,
extra_metadata=metadata,
)
if failure is not None:
yield failure
@dataclass
class CallableTracker:
callable_to_data: Dict[object, CallableData] = field(default_factory=dict)
callable_to_calls: Dict[object, List[CallArgs]] = field(
default_factory=lambda: defaultdict(list)
)
def record_callable(
self, node: FunctionNode, callable: object, sig: Signature, ctx: ErrorContext
) -> None:
"""Record when we encounter a callable."""
self.callable_to_data[callable] = CallableData(node, ctx, sig)
def record_call(self, callable: object, arguments: Mapping[str, Value]) -> None:
"""Record the actual arguments passed in in a call."""
self.callable_to_calls[callable].append(arguments)
def check(self) -> List[Failure]:
failures = []
for callable, calls in self.callable_to_calls.items():
if callable in self.callable_to_data:
data = self.callable_to_data[callable]
data.calls += calls
failures += data.check()
return failures
def display_suggested_type(value: Value) -> Tuple[str, Optional[Dict[str, Any]]]:
value = prepare_type(value)
if isinstance(value, MultiValuedValue) and value.vals:
cae = CanAssignError("Union", [CanAssignError(str(val)) for val in value.vals])
else:
cae = CanAssignError(str(value))
# If the type is simple enough, add extra_metadata for autotyping to apply.
if isinstance(value, TypedValue) and type(value) is TypedValue:
# For now, only for exactly TypedValue
if value.typ is FunctionType:
# It will end up suggesting builtins.function, which doesn't
# exist, and we should be using a Callable type instead anyway.
metadata = None
else:
suggested_type = stringify_object(value.typ)
imports = []
if isinstance(value.typ, str):
if "." in value.typ:
imports.append(value.typ)
elif safe_getattr(value.typ, "__module__", None) != "builtins":
imports.append(suggested_type.split(".")[0])
metadata = {"suggested_type": suggested_type, "imports": imports}
else:
metadata = None
return str(cae), metadata
def should_suggest_type(value: Value) -> bool:
# Literal[<some function>] isn't useful. In the future we should suggest a
# Callable type.
if isinstance(value, KnownValue) and isinstance(value.val, FunctionType):
return False
# These generally aren't useful.
if isinstance(value, TypedValue) and value.typ in (FunctionType, type):
return False
if isinstance(value, AnyValue):
return False
if isinstance(value, MultiValuedValue) and len(value.vals) > 5:
# Big unions probably aren't useful
return False
# We emptied out a Union
if value is NO_RETURN_VALUE:
return False
return True
def prepare_type(value: Value) -> Value:
"""Simplify a type to turn it into a suggestion."""
if isinstance(value, AnnotatedValue):
return prepare_type(value.value)
elif isinstance(value, SequenceIncompleteValue):
if value.typ is tuple:
return SequenceIncompleteValue(
tuple, [prepare_type(elt) for elt in value.members]
)
else:
return GenericValue(value.typ, [prepare_type(arg) for arg in value.args])
elif isinstance(value, (TypedDictValue, CallableValue)):
return value
elif isinstance(value, GenericValue):
# TODO maybe turn DictIncompleteValue into TypedDictValue?
return GenericValue(value.typ, [prepare_type(arg) for arg in value.args])
elif isinstance(value, VariableNameValue):
return AnyValue(AnySource.unannotated)
elif isinstance(value, KnownValue):
if value.val is None:
return value
elif safe_isinstance(value.val, type):
return SubclassValue(TypedValue(value.val))
elif callable(value.val):
return value # TODO get the signature instead and return a CallableValue?
value = replace_known_sequence_value(value)
if isinstance(value, KnownValue):
return TypedValue(type(value.val))
else:
return prepare_type(value)
elif isinstance(value, MultiValuedValue):
vals = [prepare_type(subval) for subval in value.vals]
# Throw out Anys
vals = [val for val in vals if not isinstance(val, AnyValue)]
type_literals: List[Tuple[Value, type]] = []
rest: List[Value] = []
for subval in vals:
if (
isinstance(subval, SubclassValue)
and isinstance(subval.typ, TypedValue)
and safe_isinstance(subval.typ.typ, type)
):
type_literals.append((subval, subval.typ.typ))
else:
rest.append(subval)
if type_literals:
shared_type = get_shared_type([typ for _, typ in type_literals])
if shared_type is object:
type_val = TypedValue(type)
else:
type_val = SubclassValue(TypedValue(shared_type))
return unite_values(type_val, *rest)
return unite_values(*[v for v, _ in type_literals], *rest)
else:
return value
def get_shared_type(types: Sequence[type]) -> type:
mros = [t.mro() for t in types]
first, *rest = mros
rest_sets = [set(mro) for mro in rest]
for candidate in first:
if all(candidate in mro for mro in rest_sets):
return candidate
assert False, "should at least have found object"
# We exclude *args and **kwargs by default because it's not currently possible
# to give useful types for them.
def _extract_params(
node: FunctionNode, *, include_var: bool = False
) -> Iterator[ast.arg]:
yield from node.args.args
if include_var and node.args.vararg is not None:
yield node.args.vararg
yield from node.args.kwonlyargs
if include_var and node.args.kwarg is not None:
yield node.args.kwarg
| python |
from __future__ import division
import casadi as ca
from planner import Planner
__author__ = 'belousov'
class Simulator:
# ========================================================================
# True noisy trajectory
# ========================================================================
@staticmethod
def simulate_trajectory(model, u_all):
xk = model.x0.cat
x_all = [xk]
for uk in u_all[:]:
[xk_next] = model.Fn([xk, uk])
x_all.append(xk_next)
xk = xk_next
x_all = model.x.repeated(ca.horzcat(x_all))
return x_all
# ========================================================================
# Observations
# ========================================================================
@staticmethod
def simulate_observed_trajectory(model, x_all):
z_all = []
for xk in x_all[:]:
[zk] = model.hn([xk])
z_all.append(zk)
z_all = model.z.repeated(ca.horzcat(z_all))
return z_all
# ========================================================================
# Filtered observations
# ========================================================================
@staticmethod
def filter_observed_trajectory(model, z_all, u_all):
n = len(u_all[:])
bk = model.b0
b_all = [bk]
for k in range(n):
[bk_next] = model.EKF([bk, u_all[k], z_all[k+1]])
b_all.append(bk_next)
bk = bk_next
b_all = model.b.repeated(ca.horzcat(b_all))
return b_all
# ========================================================================
# Extended belief trajectory
# ========================================================================
@staticmethod
def simulate_eb_trajectory(model, u_all):
ebk = model.eb0
eb_all = [ebk]
for uk in u_all[:]:
[ebk_next] = model.EBF([ebk, uk])
eb_all.append(ebk_next)
ebk = ebk_next
eb_all = model.eb.repeated(ca.horzcat(eb_all))
return eb_all
# ========================================================================
# Model predictive control
# ========================================================================
@classmethod
def mpc(cls, model, model_p):
# cls: simulate first n_delay time-steps with zero controls
u_all = model.u.repeated(ca.DMatrix.zeros(model.nu, model.n_delay))
x_all = cls.simulate_trajectory(model, u_all)
z_all = cls.simulate_observed_trajectory(model, x_all)
b_all = cls.filter_observed_trajectory(model_p, z_all, u_all)
# Store simulation results
X_all = x_all.cast()
Z_all = z_all.cast()
U_all = u_all.cast()
B_all = b_all.cast()
# Advance time
model.set_initial_state(x_all[-1], b_all[-1, 'm'], b_all[-1, 'S'])
# Iterate until the ball hits the ground
EB_all = []
k = 0 # pointer to current catcher observation (= now - n_delay)
while model.n != 0:
# Reaction delay compensation
eb_all_head = cls.simulate_eb_trajectory(
model_p,
model_p.u.repeated(U_all[:, k:k+model_p.n_delay])
)
model_p.set_initial_state(
eb_all_head[-1, 'm'],
eb_all_head[-1, 'm'],
eb_all_head[-1, 'L'] + eb_all_head[-1, 'S']
)
if model_p.n == 0:
break
# Planner: plan for model_p.n time steps
plan, lam_x, lam_g = Planner.create_plan(model_p)
# plan, lam_x, lam_g = Planner.create_plan(
# model_p, warm_start=True,
# x0=plan, lam_x0=lam_x, lam_g0=lam_g
# )
belief_plan, _, _ = Planner.create_belief_plan(
model_p, warm_start=True,
x0=plan, lam_x0=lam_x, lam_g0=lam_g
)
u_all = model_p.u.repeated(ca.horzcat(belief_plan['U']))
# u_all = model_p.u.repeated(ca.horzcat(plan['U']))
# cls: simulate ebelief trajectory for plotting
eb_all_tail = cls.simulate_eb_trajectory(model_p, u_all)
# cls: execute the first action
x_all = cls.simulate_trajectory(model, [u_all[0]])
z_all = cls.simulate_observed_trajectory(model, x_all)
b_all = cls.filter_observed_trajectory(
model_p, z_all, [u_all[0]]
)
# Save simulation results
X_all.appendColumns(x_all.cast()[:, 1:])
Z_all.appendColumns(z_all.cast()[:, 1:])
U_all.appendColumns(u_all.cast()[:, 0])
B_all.appendColumns(b_all.cast()[:, 1:])
EB_all.append([eb_all_head, eb_all_tail])
# Advance time
model.set_initial_state(x_all[-1], b_all[-1, 'm'], b_all[-1, 'S'])
model_p.set_initial_state(
model_p.b(B_all[:, k+1])['m'],
model_p.b(B_all[:, k+1])['m'],
model_p.b(B_all[:, k+1])['S']
)
k += 1
return X_all, U_all, Z_all, B_all, EB_all
| python |