content
stringlengths 7
928k
| avg_line_length
float64 3.5
33.8k
| max_line_length
int64 6
139k
| alphanum_fraction
float64 0.08
0.96
| licenses
sequence | repository_name
stringlengths 7
104
| path
stringlengths 4
230
| size
int64 7
928k
| lang
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|
# Created by MechAviv
# ID :: [140010000]
# Snow Island : Dangerous Forest
if not "o" in sm.getQuestEx(21019, "arr"):
sm.avatarOriented("Effect/OnUserEff.img/guideEffect/aranTutorial/tutorialArrow3")
sm.setQuestEx(21019, "arr", "o") | 40 | 85 | 0.720833 | [
"MIT"
] | Bia10/MapleEllinel-v203.4 | scripts/field/rienArrow.py | 240 | Python |
import json
import random
import uuid
import numpy as np
import time
import requests
import traceback
import pdb
import math
import ast
import pandas as pd
import pickle
from qwikidata.linked_data_interface import get_entity_dict_from_api
from qwikidata.sparql import return_sparql_query_results
from urllib3.exceptions import MaxRetryError, ConnectionError
from qwikidata.linked_data_interface import LdiResponseNotOk
import hashlib
class CachedWikidataAPI():
def __init__(self, cache_path = 'entity_cache.p', save_every_x_queries=1):
self.save_every_x_queries = save_every_x_queries
self.x_queries_passed = 0
self.languages = ['en','fr','es','pt','pt-br','it','de']
self.cache_path = cache_path
try:
with open(self.cache_path,'rb') as f:
self.entity_cache = pickle.load(f)
except FileNotFoundError:
self.entity_cache = {}
def get_unique_id_from_str(self, my_str):
return hashlib.md5(str.encode(my_str)).hexdigest()
def save_entity_cache(self, force=False):
if force:
self.x_queries_passed = self.save_every_x_queries
self.x_queries_passed = self.x_queries_passed+1
if self.x_queries_passed >= self.save_every_x_queries:
with open(self.cache_path,'wb') as f:
pickle.dump(self.entity_cache,f)
self.x_queries_passed = 0
def get_entity(self, item_id):
if item_id in self.entity_cache:
return self.entity_cache[item_id]
while True:
try:
entity = get_entity_dict_from_api(item_id)
self.entity_cache[item_id] = entity
self.save_entity_cache()
return entity
except (ConnectionError, MaxRetryError) as e:
#traceback.print_exc()
time.sleep(1)
continue
except LdiResponseNotOk:
#traceback.print_exc()
self.entity_cache[item_id] = 'deleted'
self.save_entity_cache()
return 'deleted'
def get_label(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
labels = entity['labels' if 'labels' in entity else 'lemmas']
elif type(item) == dict:
if 'labels' in item:
labels = item['labels']
elif 'lemmas' in item:
labels = item['lemmas']
for l in self.languages:
if l in labels:
return (labels[l]['value'], l)
if non_language_set:
all_labels = list(labels.keys())
if len(all_labels)>0:
return (labels[all_labels[0]]['value'], all_labels[0])
return ('no-label', 'none')
def get_desc(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return (entity, 'none')
descriptions = entity['descriptions']
elif type(item) == dict:
if 'descriptions' in item:
descriptions = item['descriptions']
for l in self.languages:
if l in descriptions:
return (descriptions[l]['value'], l)
if non_language_set:
all_descriptions = list(descriptions.keys())
if len(all_descriptions)>0:
return (descriptions[all_descriptions[0]]['value'], all_descriptions[0])
return ('no-desc', 'none')
def get_alias(self, item, non_language_set=False):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return ([entity], 'none')
aliases = entity['aliases']
elif type(item) == dict:
if 'aliases' in item:
aliases = item['aliases']
for l in self.languages:
if l in aliases:
return ([alias['value'] for alias in aliases[l]], l)
if non_language_set:
all_aliases = list(aliases.keys())
if len(all_aliases)>0:
return (aliases[all_aliases[0]]['value'], all_aliases[0])
return ([alias['value'] for alias in aliases[all_aliases[0]]], all_aliases[0])
return ('no-alias', 'none')
def get_datatype(self, item):
try:
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
datatype = entity['datatype']
elif type(item) == dict:
datatype = item['datatype']
return datatype
except KeyError:
return 'none'
def get_claim_values_of(self, item, property_id):
if type(item) == str:
entity = self.get_entity(item)
if entity == 'deleted':
return entity
claims = entity['claims']
elif type(item) == dict:
claims = item['claims']
if property_id in claims:
instance_of_claims = claims[property_id]
return [i['mainsnak']['datavalue']['value']['id'] for i in instance_of_claims]
else:
return []
def query_sparql_endpoint(self, sparql_query):
sparql_query_id = self.get_unique_id_from_str(sparql_query)
if sparql_query_id in self.entity_cache:
return self.entity_cache[sparql_query_id]
else:
wikidata_sparql_url = 'https://query.wikidata.org/sparql'
try:
while True:
res = requests.get(wikidata_sparql_url, params={"query": sparql_query, "format": "json"})
if res.status_code in (429,504):
time.sleep(1)
continue
elif res.status_code == 200:
res = res.json()
self.entity_cache[sparql_query_id] = res
self.save_entity_cache()
return res
else:
print(res.status_code)
raise Exception
except json.JSONDecodeError as e:
#pdb.set_trace()
print(res, res.__dict__)
raise e
| 37.780347 | 109 | 0.54896 | [
"CC0-1.0"
] | gabrielmaia7/WDV | WikidataClaims/wikidata_utils.py | 6,536 | Python |
# Generated by Django 3.2.3 on 2021-05-26 11:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('spa', '0003_alter_service_master'),
('user_profile', '0002_auto_20210526_1647'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='age',
field=models.IntegerField(),
),
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True)),
('status', models.CharField(max_length=40)),
('service', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='spa.service')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
),
]
| 35.484848 | 130 | 0.624253 | [
"MIT"
] | zhumakova/Django | django_project/user_profile/migrations/0003_auto_20210526_1731.py | 1,171 | Python |
from pypy.module.clr.interp_clr import split_fullname
def test_split_fullname():
split = split_fullname
assert split('Foo') == ('', 'Foo')
assert split('System.Foo') == ('System', 'Foo')
assert split('System.Foo.Bar') == ('System.Foo', 'Bar')
assert split('System.Foo.A+B') == ('System.Foo', 'A+B')
assert split('System.') == ('System', '')
| 33.727273 | 59 | 0.606469 | [
"MIT"
] | benoitc/pypy | pypy/module/clr/test/test_interp_clr.py | 371 | Python |
#climber.py
#Robot Code For BlueCrew 6153
import wpilib
#Commands to make the robot climb.
class Climber:
climb_motor = wpilib.Talon
#Set robot to climb when motor is on.
def climb(self):
self.climb_motor.set(1)
#Stops the robot from climbing when motor is off.
def stop_climb(self):
self.climb_motor.set(0)
#Execute is a necessary method for robotpy
#DO NO DELETE
def execute(self):
pass
| 21.952381 | 53 | 0.657267 | [
"MIT"
] | BlueCrewRobotics/2017Robot | components/climber.py | 461 | Python |
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import numbers
from abc import ABCMeta
from itertools import chain
from typing import Any, Optional, Union
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
from pyspark.sql import functions as F, Column
from pyspark.sql.types import (
ArrayType,
BinaryType,
BooleanType,
DataType,
DateType,
DecimalType,
FractionalType,
IntegralType,
MapType,
NullType,
NumericType,
StringType,
StructType,
TimestampType,
TimestampNTZType,
UserDefinedType,
)
from pyspark.pandas._typing import Dtype, IndexOpsLike, SeriesOrIndex
from pyspark.pandas.spark import functions as SF
from pyspark.pandas.typedef import extension_dtypes
from pyspark.pandas.typedef.typehints import (
extension_dtypes_available,
extension_float_dtypes_available,
extension_object_dtypes_available,
spark_type_to_pandas_dtype,
)
if extension_dtypes_available:
from pandas import Int8Dtype, Int16Dtype, Int32Dtype, Int64Dtype
if extension_float_dtypes_available:
from pandas import Float32Dtype, Float64Dtype
if extension_object_dtypes_available:
from pandas import BooleanDtype, StringDtype
def is_valid_operand_for_numeric_arithmetic(operand: Any, *, allow_bool: bool = True) -> bool:
"""Check whether the `operand` is valid for arithmetic operations against numerics."""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, numbers.Number):
return not isinstance(operand, bool) or allow_bool
elif isinstance(operand, IndexOpsMixin):
if isinstance(operand.dtype, CategoricalDtype):
return False
else:
return isinstance(operand.spark.data_type, NumericType) or (
allow_bool and isinstance(operand.spark.data_type, BooleanType)
)
else:
return False
def transform_boolean_operand_to_numeric(
operand: Any, *, spark_type: Optional[DataType] = None
) -> Any:
"""Transform boolean operand to numeric.
If the `operand` is:
- a boolean IndexOpsMixin, transform the `operand` to the `spark_type`.
- a boolean literal, transform to the int value.
Otherwise, return the operand as it is.
"""
from pyspark.pandas.base import IndexOpsMixin
if isinstance(operand, IndexOpsMixin) and isinstance(operand.spark.data_type, BooleanType):
assert spark_type, "spark_type must be provided if the operand is a boolean IndexOpsMixin"
assert isinstance(spark_type, NumericType), "spark_type must be NumericType"
dtype = spark_type_to_pandas_dtype(
spark_type, use_extension_dtypes=operand._internal.data_fields[0].is_extension_dtype
)
return operand._with_new_scol(
operand.spark.column.cast(spark_type),
field=operand._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type),
)
elif isinstance(operand, bool):
return int(operand)
else:
return operand
def _as_categorical_type(
index_ops: IndexOpsLike, dtype: CategoricalDtype, spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to categorical dtype, given `dtype` and `spark_type`."""
assert isinstance(dtype, CategoricalDtype)
if dtype.categories is None:
codes, uniques = index_ops.factorize()
return codes._with_new_scol(
codes.spark.column,
field=codes._internal.data_fields[0].copy(dtype=CategoricalDtype(categories=uniques)),
)
else:
categories = dtype.categories
if len(categories) == 0:
scol = SF.lit(-1)
else:
kvs = chain(
*[(SF.lit(category), SF.lit(code)) for code, category in enumerate(categories)]
)
map_scol = F.create_map(*kvs)
scol = F.coalesce(map_scol[index_ops.spark.column], SF.lit(-1))
return index_ops._with_new_scol(
scol.cast(spark_type),
field=index_ops._internal.data_fields[0].copy(
dtype=dtype, spark_type=spark_type, nullable=False
),
)
def _as_bool_type(index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
"""Cast `index_ops` to BooleanType Spark type, given `dtype`."""
spark_type = BooleanType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
scol = F.when(index_ops.spark.column.isNull(), SF.lit(False)).otherwise(
index_ops.spark.column.cast(spark_type)
)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_string_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], *, null_str: str = str(None)
) -> IndexOpsLike:
"""Cast `index_ops` to StringType Spark type, given `dtype` and `null_str`,
representing null Spark column. Note that `null_str` is for non-extension dtypes only.
"""
spark_type = StringType()
if isinstance(dtype, extension_dtypes):
scol = index_ops.spark.column.cast(spark_type)
else:
casted = index_ops.spark.column.cast(spark_type)
scol = F.when(index_ops.spark.column.isNull(), null_str).otherwise(casted)
return index_ops._with_new_scol(
scol, field=index_ops._internal.data_fields[0].copy(dtype=dtype, spark_type=spark_type)
)
def _as_other_type(
index_ops: IndexOpsLike, dtype: Union[str, type, Dtype], spark_type: DataType
) -> IndexOpsLike:
"""Cast `index_ops` to a `dtype` (`spark_type`) that needs no pre-processing.
Destination types that need pre-processing: CategoricalDtype, BooleanType, and StringType.
"""
from pyspark.pandas.internal import InternalField
need_pre_process = (
isinstance(dtype, CategoricalDtype)
or isinstance(spark_type, BooleanType)
or isinstance(spark_type, StringType)
)
assert not need_pre_process, "Pre-processing is needed before the type casting."
scol = index_ops.spark.column.cast(spark_type)
return index_ops._with_new_scol(scol, field=InternalField(dtype=dtype))
def _sanitize_list_like(operand: Any) -> None:
"""Raise TypeError if operand is list-like."""
if isinstance(operand, (list, tuple, dict, set)):
raise TypeError("The operation can not be applied to %s." % type(operand).__name__)
def _is_valid_for_logical_operator(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, (int, bool)) or (
isinstance(right, IndexOpsMixin)
and (
isinstance(right.spark.data_type, BooleanType)
or isinstance(right.spark.data_type, IntegralType)
)
)
def _is_boolean_type(right: Any) -> bool:
from pyspark.pandas.base import IndexOpsMixin
return isinstance(right, bool) or (
isinstance(right, IndexOpsMixin) and isinstance(right.spark.data_type, BooleanType)
)
class DataTypeOps(object, metaclass=ABCMeta):
"""The base class for binary operations of pandas-on-Spark objects (of different data types)."""
def __new__(cls, dtype: Dtype, spark_type: DataType) -> "DataTypeOps":
from pyspark.pandas.data_type_ops.binary_ops import BinaryOps
from pyspark.pandas.data_type_ops.boolean_ops import BooleanOps, BooleanExtensionOps
from pyspark.pandas.data_type_ops.categorical_ops import CategoricalOps
from pyspark.pandas.data_type_ops.complex_ops import ArrayOps, MapOps, StructOps
from pyspark.pandas.data_type_ops.date_ops import DateOps
from pyspark.pandas.data_type_ops.datetime_ops import DatetimeOps, DatetimeNTZOps
from pyspark.pandas.data_type_ops.null_ops import NullOps
from pyspark.pandas.data_type_ops.num_ops import (
DecimalOps,
FractionalExtensionOps,
FractionalOps,
IntegralExtensionOps,
IntegralOps,
)
from pyspark.pandas.data_type_ops.string_ops import StringOps, StringExtensionOps
from pyspark.pandas.data_type_ops.udt_ops import UDTOps
if isinstance(dtype, CategoricalDtype):
return object.__new__(CategoricalOps)
elif isinstance(spark_type, DecimalType):
return object.__new__(DecimalOps)
elif isinstance(spark_type, FractionalType):
if extension_float_dtypes_available and type(dtype) in [Float32Dtype, Float64Dtype]:
return object.__new__(FractionalExtensionOps)
else:
return object.__new__(FractionalOps)
elif isinstance(spark_type, IntegralType):
if extension_dtypes_available and type(dtype) in [
Int8Dtype,
Int16Dtype,
Int32Dtype,
Int64Dtype,
]:
return object.__new__(IntegralExtensionOps)
else:
return object.__new__(IntegralOps)
elif isinstance(spark_type, StringType):
if extension_object_dtypes_available and isinstance(dtype, StringDtype):
return object.__new__(StringExtensionOps)
else:
return object.__new__(StringOps)
elif isinstance(spark_type, BooleanType):
if extension_object_dtypes_available and isinstance(dtype, BooleanDtype):
return object.__new__(BooleanExtensionOps)
else:
return object.__new__(BooleanOps)
elif isinstance(spark_type, TimestampType):
return object.__new__(DatetimeOps)
elif isinstance(spark_type, TimestampNTZType):
return object.__new__(DatetimeNTZOps)
elif isinstance(spark_type, DateType):
return object.__new__(DateOps)
elif isinstance(spark_type, BinaryType):
return object.__new__(BinaryOps)
elif isinstance(spark_type, ArrayType):
return object.__new__(ArrayOps)
elif isinstance(spark_type, MapType):
return object.__new__(MapOps)
elif isinstance(spark_type, StructType):
return object.__new__(StructOps)
elif isinstance(spark_type, NullType):
return object.__new__(NullOps)
elif isinstance(spark_type, UserDefinedType):
return object.__new__(UDTOps)
else:
raise TypeError("Type %s was not understood." % dtype)
def __init__(self, dtype: Dtype, spark_type: DataType):
self.dtype = dtype
self.spark_type = spark_type
@property
def pretty_name(self) -> str:
raise NotImplementedError()
def add(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def sub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def mul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def truediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def floordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def mod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def pow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def radd(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Addition can not be applied to %s." % self.pretty_name)
def rsub(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Subtraction can not be applied to %s." % self.pretty_name)
def rmul(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Multiplication can not be applied to %s." % self.pretty_name)
def rtruediv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("True division can not be applied to %s." % self.pretty_name)
def rfloordiv(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Floor division can not be applied to %s." % self.pretty_name)
def rmod(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Modulo can not be applied to %s." % self.pretty_name)
def rpow(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Exponentiation can not be applied to %s." % self.pretty_name)
def __and__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise and can not be applied to %s." % self.pretty_name)
def xor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise xor can not be applied to %s." % self.pretty_name)
def __or__(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("Bitwise or can not be applied to %s." % self.pretty_name)
def rand(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__and__(right)
def rxor(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left ^ right
def ror(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
_sanitize_list_like(right)
return left.__or__(right)
def neg(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary - can not be applied to %s." % self.pretty_name)
def abs(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("abs() can not be applied to %s." % self.pretty_name)
def lt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("< can not be applied to %s." % self.pretty_name)
def le(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("<= can not be applied to %s." % self.pretty_name)
def gt(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError("> can not be applied to %s." % self.pretty_name)
def ge(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
raise TypeError(">= can not be applied to %s." % self.pretty_name)
def eq(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__eq__)(left, right)
def ne(self, left: IndexOpsLike, right: Any) -> SeriesOrIndex:
from pyspark.pandas.base import column_op
_sanitize_list_like(right)
return column_op(Column.__ne__)(left, right)
def invert(self, operand: IndexOpsLike) -> IndexOpsLike:
raise TypeError("Unary ~ can not be applied to %s." % self.pretty_name)
def restore(self, col: pd.Series) -> pd.Series:
"""Restore column when to_pandas."""
return col
def prepare(self, col: pd.Series) -> pd.Series:
"""Prepare column when from_pandas."""
return col.replace({np.nan: None})
def isnull(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops._with_new_scol(
index_ops.spark.column.isNull(),
field=index_ops._internal.data_fields[0].copy(
dtype=np.dtype("bool"), spark_type=BooleanType(), nullable=False
),
)
def nan_to_null(self, index_ops: IndexOpsLike) -> IndexOpsLike:
return index_ops.copy()
def astype(self, index_ops: IndexOpsLike, dtype: Union[str, type, Dtype]) -> IndexOpsLike:
raise TypeError("astype can not be applied to %s." % self.pretty_name)
| 40.271635 | 100 | 0.685429 | [
"BSD-2-Clause",
"Apache-2.0",
"CC0-1.0",
"MIT",
"MIT-0",
"ECL-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | Chinazhanhuli/spark | python/pyspark/pandas/data_type_ops/base.py | 16,753 | Python |
#! python
from nose.tools import assert_true, assert_raises
import random
from networkx import random_reference, lattice_reference, sigma, omega
import networkx as nx
rng = random.Random(0)
rng = 42
def test_random_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = random_reference(G, niter=1, seed=rng)
C = nx.average_clustering(G)
Cr = nx.average_clustering(Gr)
assert_true(C > Cr)
assert_raises(nx.NetworkXError, random_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, random_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = random_reference(H, niter=1, seed=rng)
def test_lattice_reference():
G = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gl = lattice_reference(G, niter=1, seed=rng)
L = nx.average_shortest_path_length(G)
Ll = nx.average_shortest_path_length(Gl)
assert_true(Ll > L)
assert_raises(nx.NetworkXError, lattice_reference, nx.Graph())
assert_raises(nx.NetworkXNotImplemented, lattice_reference, nx.DiGraph())
H = nx.Graph(((0, 1), (2, 3)))
Hl = lattice_reference(H, niter=1)
def test_sigma():
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
sigmas = sigma(Gs, niter=1, nrand=2, seed=rng)
sigmar = sigma(Gr, niter=1, nrand=2, seed=rng)
assert_true(sigmar < sigmas)
def test_omega():
Gl = nx.connected_watts_strogatz_graph(50, 6, 0, seed=rng)
Gr = nx.connected_watts_strogatz_graph(50, 6, 1, seed=rng)
Gs = nx.connected_watts_strogatz_graph(50, 6, 0.1, seed=rng)
omegal = omega(Gl, niter=1, nrand=1, seed=rng)
omegar = omega(Gr, niter=1, nrand=1, seed=rng)
omegas = omega(Gs, niter=1, nrand=1, seed=rng)
print("omegas, omegal, omegar")
print(omegas, omegal, omegar)
assert_true(omegal < omegas and omegas < omegar)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import numpy
except:
raise SkipTest("NumPy not available")
| 31.059701 | 77 | 0.694858 | [
"BSD-3-Clause"
] | fanglab/6mASCOPE | SLpackage/private/thirdparty/pythonpkgs/networkx/networkx_2.2/lib/python2.7/site-packages/networkx/algorithms/tests/test_smallworld.py | 2,081 | Python |
from setuptools import find_packages, setup
def find_required():
with open("requirements.txt") as f:
return f.read().splitlines()
def find_dev_required():
with open("requirements-dev.txt") as f:
return f.read().splitlines()
setup(
name="vedro-allure-reporter",
version="0.2.4",
description="Allure reporter for Vedro framework",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
author="Nikita Tsvetkov",
author_email="[email protected]",
python_requires=">=3.7",
url="https://github.com/nikitanovosibirsk/vedro-allure-reporter",
license="Apache-2.0",
packages=find_packages(exclude=("tests",)),
package_data={"vedro_allure_reporter": ["py.typed"]},
install_requires=find_required(),
tests_require=find_dev_required(),
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Typing :: Typed",
],
)
| 30.243243 | 69 | 0.662198 | [
"Apache-2.0"
] | nikitanovosibirsk/vedro-allure-reporter | setup.py | 1,119 | Python |
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
from sgtk.platform.qt import QtCore, QtGui
import sgtk
class WorkAreaButton(QtGui.QToolButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
WIDGET_WIDTH_COLLAPSED = 30
WIDGET_HEIGHT = 30
NON_WORK_AREA_TYPES = [
"PublishedFile",
"Project",
"TankPublishedFile",
"Version",
"Note",
"Group",
"HumanUser",
"ScriptUser",
"ApiUser",
"ClientUser",
"Department",
"Cut",
"CutItem",
"Delivery",
"Playlist",
"Ticket"
]
change_work_area = QtCore.Signal(str, int)
def __init__(self, parent):
"""
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(WorkAreaButton, self).__init__(parent)
# an icon to represent all items which
# aren't the current work area
self._normal_icon = QtGui.QIcon()
self._normal_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin.png"),
QtGui.QIcon.Normal,
QtGui.QIcon.Off
)
# an icon to represent the current work area
self._current_work_area_icon = QtGui.QIcon()
self._current_work_area_icon.addPixmap(
QtGui.QPixmap(":/tk_multi_infopanel/pin_blue.png"),
QtGui.QIcon.Disabled,
QtGui.QIcon.Off
)
self.setIcon(self._normal_icon)
self.setIconSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self._bundle = sgtk.platform.current_bundle()
self._entity_type = None
self._entity_id = None
self._is_static = False
self._caption = "Set Work Area"
self._width = 120
self.clicked.connect(self._on_click)
self.setVisible(False)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
self._entity_id = entity_id
self._entity_type = entity_type
if not self._bundle.get_setting("enable_context_switch"):
# context switch button not enabled
return
# figure out if this is the current project
context = self._bundle.context
context_entity = context.task or context.entity or context.project or None
self.setVisible(True)
self.setEnabled(True)
self.setIcon(self._normal_icon)
self._is_static = False
if context_entity and context_entity["type"] == entity_type and context_entity["id"] == entity_id:
# the current work area
self.setPopupMode(QtGui.QToolButton.DelayedPopup)
self.setToolTip(
"This is your current work area.\n"
"The work you do will be associated with this item in Shotgun."
)
# set blue icon
self.setIcon(self._current_work_area_icon)
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouseover
self._is_static = True
elif entity_type in self.NON_WORK_AREA_TYPES:
# don't show the ctx selector for some types
self.setToolTip("This cannot be a work area.")
# disable the button
self.setEnabled(False)
# make sure it doesn't pop on mouse over
self._is_static = True
else:
if entity_type == "Task":
self._caption = "Set Work Area"
self.setToolTip("Click to set your work area to the current task.")
else:
self._caption = "Pick Work Area"
self.setToolTip("Click to select a task.")
self._init_default_state()
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
self.setText("")
self.setToolButtonStyle(QtCore.Qt.ToolButtonIconOnly)
self.setMinimumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self.WIDGET_WIDTH_COLLAPSED, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", False)
self.style().unpolish(self)
self.style().polish(self)
def _on_click(self):
"""
Executed when the button is clicked
"""
self.change_work_area.emit(self._entity_type, self._entity_id)
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
if not self._is_static:
# not the current work area. so expand the button
self.setText(self._caption)
self.setToolButtonStyle(QtCore.Qt.ToolButtonTextBesideIcon)
self.setMinimumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
self.setMaximumSize(QtCore.QSize(self._width, self.WIDGET_HEIGHT))
# tell the style sheet to adjust
self.setProperty("is_expanded", True)
self.style().unpolish(self)
self.style().polish(self)
return super(WorkAreaButton, self).enterEvent(evt)
def leaveEvent(self, evt):
"""
QT Mouse leave event
"""
if not self._is_static:
# collapse button after a delay
QtCore.QTimer.singleShot(300, self._init_default_state)
return super(WorkAreaButton, self).leaveEvent(evt)
class FloatingWorkAreaButton(WorkAreaButton):
"""
UX for switching work area.
This displays a "change work area" button which a user can interact with
The button is designed to expand so that it is subtle until a user
hovers over it.
Derives from :class:`WorkAreaButton` and positions the widget
relative to the bottom-right corner of the parent widget.
:signal clicked(str, int): Fires when someone clicks the change
work area button. Arguments passed are the entity type and entity id
"""
RIGHT_OFFSET = 6
BOTTOM_OFFSET = 6
def __init__(self, parent):
"""
:param right_side_offset: Right hand side offset in pixels
:param bottom_offset: Bottom offset in pixels
:param parent: The model parent.
:type parent: :class:`~PySide.QtGui.QObject`
"""
super(FloatingWorkAreaButton, self).__init__(parent)
# hook up a listener to the parent window so this widget
# follows along when the parent window changes size
filter = ResizeEventFilter(parent)
filter.resized.connect(self._on_parent_resized)
parent.installEventFilter(filter)
def set_up(self, entity_type, entity_id):
"""
Sets up the button for a given entity.
:param entity_type: Entity type to set up button for
:param entity_id: Entity id to set up button for
"""
if entity_type in self.NON_WORK_AREA_TYPES:
# hide the widget
self.setVisible(False)
else:
# base class implementation
super(FloatingWorkAreaButton, self).set_up(entity_type, entity_id)
def __position_widget(self):
"""
Moves the widget to the bottom-right corner of the parent widget.
"""
self.move(
self.parentWidget().width() - self.width() - self.RIGHT_OFFSET,
self.parentWidget().height() - self.height() - self.BOTTOM_OFFSET
)
def _init_default_state(self):
"""
Sets up the default collapsed state of the button
"""
super(FloatingWorkAreaButton, self)._init_default_state()
self.__position_widget()
def enterEvent(self, evt):
"""
QT Mouse enter event
"""
status = super(FloatingWorkAreaButton, self).enterEvent(evt)
if not self._is_static:
self.__position_widget()
return status
def _on_parent_resized(self):
"""
Special slot hooked up to the event filter.
When associated widget is resized this slot is being called.
"""
self.__position_widget()
class ResizeEventFilter(QtCore.QObject):
"""
Utility and helper.
Event filter which emits a resized signal whenever
the monitored widget resizes.
You use it like this:
# create the filter object. Typically, it's
# it's easiest to parent it to the object that is
# being monitored (in this case self.ui.thumbnail)
filter = ResizeEventFilter(self.ui.thumbnail)
# now set up a signal/slot connection so that the
# __on_thumb_resized slot gets called every time
# the widget is resized
filter.resized.connect(self.__on_thumb_resized)
# finally, install the event filter into the QT
# event system
self.ui.thumbnail.installEventFilter(filter)
"""
resized = QtCore.Signal()
def eventFilter(self, obj, event):
"""
Event filter implementation.
For information, see the QT docs:
http://doc.qt.io/qt-4.8/qobject.html#eventFilter
This will emit the resized signal (in this class)
whenever the linked up object is being resized.
:param obj: The object that is being watched for events
:param event: Event object that the object has emitted
:returns: Always returns False to indicate that no events
should ever be discarded by the filter.
"""
# peek at the message
if event.type() == QtCore.QEvent.Resize:
# re-broadcast any resize events
self.resized.emit()
# pass it on!
return False
| 32.583851 | 106 | 0.629527 | [
"MIT"
] | JoanAzpeitia/lp_sg | install/app_store/tk-multi-shotgunpanel/v1.4.8/python/app/work_area_button.py | 10,492 | Python |
#!/usr/bin/env python
"""The setup script."""
from setuptools import find_packages, setup
with open('requirements.txt') as f:
INSTALL_REQUIREs = f.read().strip().split('\n')
with open('README.md', encoding='utf8') as f:
LONG_DESCRIPTION = f.read()
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering',
]
setup(
name='repo2singularity',
description='Repo2singularity: Wrapper around repo2docker producing producing Jupyter enabled Singularity images.',
long_description=LONG_DESCRIPTION,
python_requires='>=3.6',
maintainer='Anderson Banihirwe',
classifiers=CLASSIFIERS,
url='https://github.com/andersy005/repo2singularity',
packages=find_packages(exclude=('tests',)),
include_package_data=True,
install_requires=INSTALL_REQUIREs,
license='Apache 2.0',
zip_safe=False,
entry_points={'console_scripts': ['repo2singularity = repo2singularity.core:main']},
keywords='reproducible science environments docker singularity',
use_scm_version={'version_scheme': 'post-release', 'local_scheme': 'dirty-tag'},
setup_requires=['setuptools_scm', 'setuptools>=30.3.0'],
)
| 37.390244 | 119 | 0.700587 | [
"BSD-3-Clause"
] | andersy005/repo2singularity | setup.py | 1,533 | Python |
from functools import partial
from itertools import product
import numpy as np
from tlz import curry
from ..base import tokenize
from ..utils import funcname
from .blockwise import BlockwiseCreateArray
from .core import Array, normalize_chunks
from .utils import (
meta_from_array,
empty_like_safe,
full_like_safe,
ones_like_safe,
zeros_like_safe,
)
def _parse_wrap_args(func, args, kwargs, shape):
if isinstance(shape, np.ndarray):
shape = shape.tolist()
if not isinstance(shape, (tuple, list)):
shape = (shape,)
name = kwargs.pop("name", None)
chunks = kwargs.pop("chunks", "auto")
dtype = kwargs.pop("dtype", None)
if dtype is None:
dtype = func(shape, *args, **kwargs).dtype
dtype = np.dtype(dtype)
chunks = normalize_chunks(chunks, shape, dtype=dtype)
name = name or funcname(func) + "-" + tokenize(
func, shape, chunks, dtype, args, kwargs
)
return {
"shape": shape,
"dtype": dtype,
"kwargs": kwargs,
"chunks": chunks,
"name": name,
}
def wrap_func_shape_as_first_arg(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, Array):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
func = partial(func, dtype=dtype, **kwargs)
graph = BlockwiseCreateArray(
name,
func,
shape,
chunks,
)
return Array(graph, name, chunks, dtype=dtype, meta=kwargs.get("meta", None))
def wrap_func_like(func, *args, **kwargs):
"""
Transform np creation function into blocked version
"""
x = args[0]
meta = meta_from_array(x)
shape = kwargs.get("shape", x.shape)
parsed = _parse_wrap_args(func, args, kwargs, shape)
shape = parsed["shape"]
dtype = parsed["dtype"]
chunks = parsed["chunks"]
name = parsed["name"]
kwargs = parsed["kwargs"]
keys = product([name], *[range(len(bd)) for bd in chunks])
shapes = product(*chunks)
shapes = list(shapes)
kw = [kwargs for _ in shapes]
for i, s in enumerate(list(shapes)):
kw[i]["shape"] = s
vals = ((partial(func, dtype=dtype, **k),) + args for (k, s) in zip(kw, shapes))
dsk = dict(zip(keys, vals))
return Array(dsk, name, chunks, meta=meta.astype(dtype))
def wrap_func_like_safe(func, func_like, *args, **kwargs):
"""
Safe implementation for wrap_func_like(), attempts to use func_like(),
if the shape keyword argument, falls back to func().
"""
try:
return func_like(*args, **kwargs)
except TypeError:
return func(*args, **kwargs)
@curry
def wrap(wrap_func, func, **kwargs):
func_like = kwargs.pop("func_like", None)
if func_like is None:
f = partial(wrap_func, func, **kwargs)
else:
f = partial(wrap_func, func_like, **kwargs)
template = """
Blocked variant of %(name)s
Follows the signature of %(name)s exactly except that it also features
optional keyword arguments ``chunks: int, tuple, or dict`` and ``name: str``.
Original signature follows below.
"""
if func.__doc__ is not None:
f.__doc__ = template % {"name": func.__name__} + func.__doc__
f.__name__ = "blocked_" + func.__name__
return f
w = wrap(wrap_func_shape_as_first_arg)
@curry
def _broadcast_trick_inner(func, shape, meta=(), *args, **kwargs):
if shape == ():
return np.broadcast_to(func(meta, shape=(), *args, **kwargs), shape)
else:
return np.broadcast_to(func(meta, shape=1, *args, **kwargs), shape)
def broadcast_trick(func):
"""
Provide a decorator to wrap common numpy function with a broadcast trick.
Dask arrays are currently immutable; thus when we know an array is uniform,
we can replace the actual data by a single value and have all elements point
to it, thus reducing the size.
>>> x = np.broadcast_to(1, (100,100,100))
>>> x.base.nbytes
8
Those array are not only more efficient locally, but dask serialisation is
aware of the _real_ size of those array and thus can send them around
efficiently and schedule accordingly.
Note that those array are read-only and numpy will refuse to assign to them,
so should be safe.
"""
inner = _broadcast_trick_inner(func)
if func.__doc__ is not None:
inner.__doc__ = func.__doc__
inner.__name__ = func.__name__
if inner.__name__.endswith("_like_safe"):
inner.__name__ = inner.__name__[:-10]
return inner
ones = w(broadcast_trick(ones_like_safe), dtype="f8")
zeros = w(broadcast_trick(zeros_like_safe), dtype="f8")
empty = w(broadcast_trick(empty_like_safe), dtype="f8")
w_like = wrap(wrap_func_like_safe)
empty_like = w_like(np.empty, func_like=np.empty_like)
# full and full_like require special casing due to argument check on fill_value
# Generate wrapped functions only once
_full = w(broadcast_trick(full_like_safe))
_full_like = w_like(np.full, func_like=np.full_like)
# workaround for numpy doctest failure: https://github.com/numpy/numpy/pull/17472
_full.__doc__ = _full.__doc__.replace(
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
"array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])",
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full(shape=shape, fill_value=fill_value, *args, **kwargs)
def full_like(a, fill_value, *args, **kwargs):
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
return _full_like(
a=a,
fill_value=fill_value,
*args,
**kwargs,
)
full.__doc__ = _full.__doc__
full_like.__doc__ = _full_like.__doc__
| 27.896552 | 87 | 0.645705 | [
"BSD-3-Clause"
] | BlueOwlDev/dask | dask/array/wrap.py | 6,472 | Python |
"""
Tests for lyrics_tagger
"""
from __future__ import unicode_literals
from __future__ import print_function
import unittest
import mock
import lyricstagger.misc as misc
import test.fakers as fakers
# pylint: disable=R0904
class MiscCheck(unittest.TestCase):
"""Test miscelanous functions"""
def test_get_tags_multi(self):
"""Test get_tags with multi-tag file"""
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, ['Artist'], ['Album'],
['Title'], 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_single(self):
"""Test get_tags with single-tag file"""
for mime in ['audio/mp3', 'audio/ogg']:
audio = fakers.FakeFile(mime, 'Artist', 'Album', 'Title', 'Lyrics')
tags = misc.get_tags(audio)
self.assertEqual(tags['album'], "Album")
self.assertEqual(tags['artist'], "Artist")
self.assertEqual(tags['title'], "Title")
self.assertEqual(tags['lyrics'], "Lyrics")
def test_get_tags_broken(self):
"""Test get_tags with broken tags"""
audio = fakers.BrokenFile('audio/ogg', {'test': 'Test',
'album': 'Album',
'title': 'Title'})
tags = misc.get_tags(audio)
self.assertEqual(tags, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_empty_ok(self):
"""Test edit_lyrics with empty lyrics and correct edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_empty_fail(self):
"""Test edit_lyrics with empty lyrics and errored edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album', 'Title')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_ok)
def test_edit_lyrics_nonempty_ok(self):
"""Test edit_lyrics with non-empty lyrics and correct edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, "Lyrics")
@mock.patch('lyricstagger.misc.click.edit', fakers.mock_edit_fail)
def test_edit_lyrics_nonempty_fail(self):
"""Test edit_lyrics with non-empty lyrics and errored edit"""
audio = fakers.FakeFile('audio/ogg', 'Artist', 'Album',
'Title', 'Lyrics')
lyrics = misc.edit_lyrics(audio)
self.assertEqual(lyrics, None)
def test_get_file_list(self):
file_list = list(misc.get_file_list(["test/test_data"]))
self.assertIn("test/test_data/test_dir_0/test_file_0.ogg", file_list)
self.assertIn("test/test_data/test_dir_1/test_file_1.ogg", file_list)
# pylint: enable=R0904
if __name__ == '__main__':
unittest.main()
| 41.280488 | 79 | 0.612703 | [
"MIT"
] | abulimov/lyricstagger | test/test_misc.py | 3,385 | Python |
import copy
import torch
import logging
import numpy as np
from sacred import Experiment
from noge.data_loaders import get_datasets, get_test_loader, get_train_generator
from noge.factory import make_env, make_memory
from noge.network import make_network
from noge.agent import Actor, main_loop, loop_ing
from noge.trainers import DQNTrainer, Replay
from noge.policies import LinearSchedule, GraphDQNPolicy
from noge.preprocessors import Preprocessor
from noge.evaluation import Evaluator, eval_ing
from noge.constants import CONFIGS_DIR, EVAL_DIR
from xlog.utils import get_logger
from xlog.mlflow_observer import MlflowObserver
ex = Experiment(name='NOGE_DQN', ingredients=[eval_ing, loop_ing])
ex.add_config(str(CONFIGS_DIR / 'dqn.yaml'))
ex.logger = get_logger(__name__, level=logging.INFO)
ex.observers = [MlflowObserver(tracking_uri=str(EVAL_DIR.absolute()))]
@ex.automain
def train(dataset, test_size, max_episode_steps, reward_type, input_meas_type, meas_transform,
target_transform, node_history, gamma, target_update_freq,
cat_features, feature_range, replay_capacity, min_horizon, epsilon_start, epsilon_end,
exploration_frac, n_train_steps, train_freq, loss, batch_size, lr, n_test_episodes, init_eval,
n_eval_artifacts, test_freq, log_freq, device, seed, data_seed, save_model, _log, _run, _config):
np.set_printoptions(precision=2, suppress=True)
if device.startswith('cuda'):
assert torch.cuda.is_available()
logger = _log
device = torch.device(device)
# data source
train_set, test_set = get_datasets(dataset, seed=data_seed, test_size=test_size)
max_nodes = max(train_set.max_nodes, test_set.max_nodes)
max_edges = 2 * max(train_set.max_edges, test_set.max_edges) # for undirected graphs, consider both directions
test_loader = get_test_loader(test_set, seed=seed, num_samples=n_test_episodes)
train_gen = get_train_generator(train_set, seed=seed)
preprocessor = Preprocessor(input_meas_type=input_meas_type,
output_meas_type=input_meas_type,
feature_range=feature_range,
meas_transform=meas_transform,
target_transform=target_transform,
temporal_offsets=[1.],
max_nodes=max_nodes,
device=device)
# environment
train_env_config = dict(
max_episode_steps=max_episode_steps,
reward_type=reward_type,
max_nodes=max_nodes,
max_edges=max_edges,
nn_feat='N' in cat_features,
)
train_env = make_env(**train_env_config, data_generator=train_gen, seed=seed)
test_env_config = copy.deepcopy(train_env_config)
test_env_config.update(sample_goals=False, data_generator=None)
test_env = make_env(**test_env_config, seed=seed)
# graph memory + graph preprocessing
neg_label, pos_label = feature_range
mem_features = dict(cat=cat_features)
graph_mem_config = dict(
max_episode_steps=max_episode_steps,
max_nodes=max_nodes,
max_edges=max_edges,
history=node_history,
memory_type='cat',
features=mem_features,
neg_label=neg_label,
pos_label=pos_label
)
eval_memory = make_memory(online=True, **graph_mem_config)
acting_memory = make_memory(online=True, **graph_mem_config)
# model
model_config = dict(
dim_node=eval_memory.dim_node,
dim_meas=preprocessor.dim_input_meas,
dim_goal=1,
max_edges=max_edges,
**_config['model']
)
network = make_network(**model_config).to(device)
# evaluation
eval_policy = GraphDQNPolicy(network, eval_memory, preprocessor=preprocessor, device=device)
evaluator = Evaluator(test_loader, test_env, eval_policy)
# experience collecting policy
exploration_steps = int(exploration_frac * n_train_steps)
exploration_schedule = LinearSchedule(epsilon_start, epsilon_end, exploration_steps)
acting_policy = GraphDQNPolicy(network,
graph_memory=acting_memory,
preprocessor=preprocessor,
exploration_schedule=exploration_schedule,
device=device)
# replay buffer
replay_buffer = Replay(capacity=replay_capacity,
ob_space=train_env.observation_space,
graph_mem_config=graph_mem_config,
min_horizon=min_horizon)
# actor: runs the simulation forward and stores to the replay buffer
actor = Actor(train_env, acting_policy, replay_buffer)
# trainer
optimizer = torch.optim.Adam(network.parameters(), lr=lr)
if loss == 'mse':
criterion = torch.nn.MSELoss()
else:
raise ValueError(f"Unsupported loss: {loss}")
trainer = DQNTrainer(gamma=gamma,
target_update_freq=target_update_freq,
replay_buffer=replay_buffer,
batch_size=batch_size,
network=network,
preprocessor=preprocessor,
criterion=criterion,
optimizer=optimizer,
device=device)
# fill up the replay buffer
network.eval()
logger.info(f"Filling up the replay buffer...")
actor.step(n=replay_capacity, use_tqdm=True)
logger.info(f"Replay buffer filled: [{len(replay_buffer)} / {replay_capacity}]")
# fit the preprocessor with buffer data
preprocessor.fit(replay_buffer._measurements)
best_perf = main_loop(actor, trainer, evaluator, network, exploration_schedule,
init_eval, n_eval_artifacts, n_train_steps, train_freq, log_freq, test_freq, save_model)
train_env.close()
evaluator.close()
return best_perf
| 38.987013 | 115 | 0.673051 | [
"MIT"
] | johny-c/noge | scripts/train_dqn.py | 6,004 | Python |
from django.shortcuts import render
# Create your views here.
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from Constants import *
from Gifts.getRecommendations.RS import Users, Recommendations
import json
# Create your views here.
def check_input(request, mandatory_fields, optional_fields=None):
if not optional_fields: optional_fields = []
for key in request.keys():
if key not in mandatory_fields and key not in optional_fields:
return {'result': 'Error', 'message': key + ' is not a valid field'}
for field in mandatory_fields:
if field not in request.keys():
return {'result': 'Error', 'message': field + ' do not presented'}
return {"result": "Success"}
def add_user(request):
if 'userProfile' not in request:
return JsonResponse({'result': 'Error', 'message': 'userProfile do not presented'})
result = check_input(request['userProfile'], ["sex", "age", "hobbies", "userType"],
["alreadyGifted", "lovedCategories"])
if result['result'] == "Error":
return JsonResponse(result)
if request['userProfile']['sex'] not in ['Female', 'Male']:
return JsonResponse({'result': 'Error', 'message': request['userProfile']['sex'] +
' is not a valid sex'})
if 'alreadyGifted' not in request['userProfile']:
request['userProfile']['alreadyGifted'] = []
if 'lovedCategories' not in request['userProfile']:
request['userProfile']['lovedCategories'] = []
try:
user_id = Users.add_user(request['userProfile'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error while adding user'})
return JsonResponse({'result': 'Success', 'data': {'userId': user_id}})
def make_list(request):
result = check_input(request, ["userId"], ["filter"])
if result['result'] == "Error":
return JsonResponse(result)
if 'filter' in request:
result = check_input(request['filter'], [], ["minPrice", "maxPrice"])
if result['result'] == "Error":
return JsonResponse(result)
min_price = None
max_price = None
if 'filter' in request:
if 'minPrice' in request['filter']:
min_price = request['filter']['minPrice']
if 'maxPrice' in request['filter']:
max_price = request['filter']['maxPrice']
try:
Recommendations.generate_list(request['userId'], min_price, max_price)
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'error', 'message': 'error while making list'})
return JsonResponse({'result': 'Success', 'data': {'numberOfPages': number_of_pages}})
def get_suggestions(request):
result = check_input(request, ["page", "userId"])
if result['result'] == "Error":
return JsonResponse(result)
try:
items = Recommendations.get_page(request['userId'], request['page'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': 'error during getting list'})
if items:
request = {'result': 'Success', 'data': {'items': items, "numberOfPages": number_of_pages}}
elif items == []:
request = {'result': 'Error', 'message': 'page out of range'}
else:
request = {'result': 'Error', 'message': 'error during getting list'}
return JsonResponse(request)
def rate_item(request):
result = check_input(request, ["userId", "itemId", "rating"])
if result['result'] == "Error":
return JsonResponse(result)
try:
Recommendations.rate_and_remove(request['userId'], request['itemId'], request['rating'])
number_of_pages = Recommendations.get_number_of_pages(request['userId'])
except Exception as e:
print e
return JsonResponse({"result": "Error", "message": "error during rating item"})
return JsonResponse({"result": "Success", 'data': {'numberOfPages': number_of_pages}})
@csrf_exempt
def home(request):
if request.method == "POST":
try:
request_dict = json.loads(request.body)
print(request_dict)
if 'task' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'task do not presented'})
if 'data' not in request_dict:
return JsonResponse({'result': 'Error', 'message': 'data do not presented'})
if request_dict['task'] == 'addUser':
return add_user(request_dict['data'])
if request_dict['task'] == 'makeList':
return make_list(request_dict['data'])
if request_dict['task'] == 'getSuggestions':
return get_suggestions(request_dict['data'])
if request_dict['task'] == 'rateItem':
return rate_item(request_dict['data'])
return JsonResponse({'result': 'Error', 'message':
request_dict['task'] + " is not a valid task"})
except Exception as e:
print e
return JsonResponse({'result': 'Error', 'message': "strange error"})
return HttpResponse('''
<h1>Welcome on GRS</h1>
''')
| 39.085714 | 99 | 0.6197 | [
"Apache-2.0"
] | exarus/GiftRecommenderSystem | backend/Gifts/views.py | 5,472 | Python |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from runner.koan import *
class AboutTuples(Koan):
def test_creating_a_tuple(self):
count_of_three = (1, 2, 5)
self.assertEqual(5, count_of_three[2])
def test_tuples_are_immutable_so_item_assignment_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three[2] = "three"
except TypeError as ex:
self.assertMatch('upl', ex[0])
def test_tuples_are_immutable_so_appending_is_not_possible(self):
count_of_three = (1, 2, 5)
try:
count_of_three.append("boom")
except Exception as ex:
self.assertEqual(AttributeError, type(ex))
# Note, assertMatch() uses regular expression pattern matching,
# so you don't have to copy the whole message.
self.assertMatch('object', ex[0])
# Tuples are less flexible than lists, but faster.
def test_tuples_can_only_be_changed_through_replacement(self):
count_of_three = (1, 2, 5)
list_count = list(count_of_three)
list_count.append("boom")
count_of_three = tuple(list_count)
self.assertEqual((1, 2, 5, 'boom'), count_of_three)
def test_tuples_of_one_look_peculiar(self):
self.assertEqual(type(int(1)), (1).__class__)
self.assertEqual(type((1,2)), (1,).__class__)
self.assertEqual(('Hello comma!',), ("Hello comma!", ))
def test_tuple_constructor_can_be_surprising(self):
self.assertEqual(('S', 'u', 'r', 'p', 'r', 'i', 's', 'e', '!'), tuple("Surprise!"))
def test_creating_empty_tuples(self):
self.assertEqual(tuple(), ())
self.assertEqual((), tuple()) # Sometimes less confusing
def test_tuples_can_be_embedded(self):
lat = (37, 14, 6, 'N')
lon = (115, 48, 40, 'W')
place = ('Area 51', lat, lon)
self.assertEqual(('Area 51',(37,14,6,'N'),(115,48,40,'W')), place)
def test_tuples_are_good_for_representing_records(self):
locations = [
("Illuminati HQ", (38, 52, 15.56, 'N'), (77, 3, 21.46, 'W')),
("Stargate B", (41, 10, 43.92, 'N'), (1, 49, 34.29, 'W')),
]
locations.append(
("Cthulhu", (26, 40, 1, 'N'), (70, 45, 7, 'W'))
)
self.assertEqual('Cthulhu', locations[2][0])
self.assertEqual(15.56, locations[0][1][2])
| 33.985915 | 91 | 0.590137 | [
"MIT"
] | rhgraysonii/python_koan_solutions | koans/koans/about_tuples.py | 2,413 | Python |
# NLP written by GAMS Convert at 04/21/18 13:51:47
#
# Equation counts
# Total E G L N X C B
# 73 73 0 0 0 0 0 0
#
# Variable counts
# x b i s1s s2s sc si
# Total cont binary integer sos1 sos2 scont sint
# 116 116 0 0 0 0 0 0
# FX 0 0 0 0 0 0 0 0
#
# Nonzero counts
# Total const NL DLL
# 576 128 448 0
#
# Reformulation has removed 1 variable and 1 equation
from pyomo.environ import *
model = m = ConcreteModel()
m.x2 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x3 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x4 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x5 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x6 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x7 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x8 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x9 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x10 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x11 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x12 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x13 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x14 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x15 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x16 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x17 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x18 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x19 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x20 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x21 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x22 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x23 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x24 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x25 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x26 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x27 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x28 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x29 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x30 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x31 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x32 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x33 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x34 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x35 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x36 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x37 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x38 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x39 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x40 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x41 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x42 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x43 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x44 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x45 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x46 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x47 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x48 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x49 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x50 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x51 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x52 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x53 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x54 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x55 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x56 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x57 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x58 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x59 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x60 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x61 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x62 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x63 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x64 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x65 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x66 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x67 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x68 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x69 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x70 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x71 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x72 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x73 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x74 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x75 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x76 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x77 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x78 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x79 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x80 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x81 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x82 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x83 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x84 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x85 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x86 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x87 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x88 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x89 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x90 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x91 = Var(within=Reals,bounds=(0,1000),initialize=50)
m.x92 = Var(within=Reals,bounds=(0,1000),initialize=100)
m.x93 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x94 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x95 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x96 = Var(within=Reals,bounds=(0,1),initialize=0.2)
m.x97 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x98 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x99 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x100 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x101 = Var(within=Reals,bounds=(0,10000),initialize=1)
m.x102 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x103 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x104 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x105 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x106 = Var(within=Reals,bounds=(300,800),initialize=400)
m.x107 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x108 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x109 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x110 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x111 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x112 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x113 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x114 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x115 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.x116 = Var(within=Reals,bounds=(0,10000),initialize=0)
m.obj = Objective(expr= - 100*m.x95 + m.x97 + m.x98 + m.x99 + m.x100 + m.x101, sense=minimize)
m.c2 = Constraint(expr= - m.x2 - m.x3 - m.x4 - m.x5 - m.x6 == -50)
m.c3 = Constraint(expr= - m.x7 - m.x8 - m.x9 - m.x10 - m.x11 == -50)
m.c4 = Constraint(expr= - m.x2 - m.x7 + m.x12 - m.x62 - m.x67 - m.x72 - m.x77 - m.x82 == 0)
m.c5 = Constraint(expr= - m.x3 - m.x8 + m.x13 - m.x63 - m.x68 - m.x73 - m.x78 - m.x83 == 0)
m.c6 = Constraint(expr= - m.x4 - m.x9 + m.x14 - m.x64 - m.x69 - m.x74 - m.x79 - m.x84 == 0)
m.c7 = Constraint(expr= - m.x5 - m.x10 + m.x15 - m.x65 - m.x70 - m.x75 - m.x80 - m.x85 == 0)
m.c8 = Constraint(expr= - m.x6 - m.x11 + m.x16 - m.x66 - m.x71 - m.x76 - m.x81 - m.x86 == 0)
m.c9 = Constraint(expr=m.x17*m.x12 - (m.x42*m.x62 + m.x46*m.x67 + m.x50*m.x72 + m.x54*m.x77 + m.x58*m.x82) - m.x2 == 0)
m.c10 = Constraint(expr=m.x18*m.x12 - (m.x43*m.x62 + m.x47*m.x67 + m.x51*m.x72 + m.x55*m.x77 + m.x59*m.x82) - m.x7 == 0)
m.c11 = Constraint(expr=m.x19*m.x12 - (m.x44*m.x62 + m.x48*m.x67 + m.x52*m.x72 + m.x56*m.x77 + m.x60*m.x82) == 0)
m.c12 = Constraint(expr=m.x20*m.x12 - (m.x45*m.x62 + m.x49*m.x67 + m.x53*m.x72 + m.x57*m.x77 + m.x61*m.x82) == 0)
m.c13 = Constraint(expr=m.x21*m.x13 - (m.x42*m.x63 + m.x46*m.x68 + m.x50*m.x73 + m.x54*m.x78 + m.x58*m.x83) - m.x3 == 0)
m.c14 = Constraint(expr=m.x22*m.x13 - (m.x43*m.x63 + m.x47*m.x68 + m.x51*m.x73 + m.x55*m.x78 + m.x59*m.x83) - m.x8 == 0)
m.c15 = Constraint(expr=m.x23*m.x13 - (m.x44*m.x63 + m.x48*m.x68 + m.x52*m.x73 + m.x56*m.x78 + m.x60*m.x83) == 0)
m.c16 = Constraint(expr=m.x24*m.x13 - (m.x45*m.x63 + m.x49*m.x68 + m.x53*m.x73 + m.x57*m.x78 + m.x61*m.x83) == 0)
m.c17 = Constraint(expr=m.x25*m.x14 - (m.x42*m.x64 + m.x46*m.x69 + m.x50*m.x74 + m.x54*m.x79 + m.x58*m.x84) - m.x4 == 0)
m.c18 = Constraint(expr=m.x26*m.x14 - (m.x43*m.x64 + m.x47*m.x69 + m.x51*m.x74 + m.x55*m.x79 + m.x59*m.x84) - m.x9 == 0)
m.c19 = Constraint(expr=m.x27*m.x14 - (m.x44*m.x64 + m.x48*m.x69 + m.x52*m.x74 + m.x56*m.x79 + m.x60*m.x84) == 0)
m.c20 = Constraint(expr=m.x28*m.x14 - (m.x45*m.x64 + m.x49*m.x69 + m.x53*m.x74 + m.x57*m.x79 + m.x61*m.x84) == 0)
m.c21 = Constraint(expr=m.x29*m.x15 - (m.x42*m.x65 + m.x46*m.x70 + m.x50*m.x75 + m.x54*m.x80 + m.x58*m.x85) - m.x5 == 0)
m.c22 = Constraint(expr=m.x30*m.x15 - (m.x43*m.x65 + m.x47*m.x70 + m.x51*m.x75 + m.x55*m.x80 + m.x59*m.x85) - m.x10
== 0)
m.c23 = Constraint(expr=m.x31*m.x15 - (m.x44*m.x65 + m.x48*m.x70 + m.x52*m.x75 + m.x56*m.x80 + m.x60*m.x85) == 0)
m.c24 = Constraint(expr=m.x32*m.x15 - (m.x45*m.x65 + m.x49*m.x70 + m.x53*m.x75 + m.x57*m.x80 + m.x61*m.x85) == 0)
m.c25 = Constraint(expr=m.x33*m.x16 - (m.x42*m.x66 + m.x46*m.x71 + m.x50*m.x76 + m.x54*m.x81 + m.x58*m.x86) - m.x6 == 0)
m.c26 = Constraint(expr=m.x34*m.x16 - (m.x43*m.x66 + m.x47*m.x71 + m.x51*m.x76 + m.x55*m.x81 + m.x59*m.x86) - m.x11
== 0)
m.c27 = Constraint(expr=m.x35*m.x16 - (m.x44*m.x66 + m.x48*m.x71 + m.x52*m.x76 + m.x56*m.x81 + m.x60*m.x86) == 0)
m.c28 = Constraint(expr=m.x36*m.x16 - (m.x45*m.x66 + m.x49*m.x71 + m.x53*m.x76 + m.x57*m.x81 + m.x61*m.x86) == 0)
m.c29 = Constraint(expr= - m.x12 + m.x37 == 0)
m.c30 = Constraint(expr= - m.x13 + m.x38 == 0)
m.c31 = Constraint(expr= - m.x14 + m.x39 == 0)
m.c32 = Constraint(expr= - m.x15 + m.x40 == 0)
m.c33 = Constraint(expr= - m.x16 + m.x41 == 0)
m.c34 = Constraint(expr=m.x42*m.x37 - (m.x17*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c35 = Constraint(expr=m.x43*m.x37 - (m.x18*m.x12 + m.x97*(-m.x107 - m.x108)) == 0)
m.c36 = Constraint(expr=m.x44*m.x37 - (m.x19*m.x12 + m.x97*m.x107) == 0)
m.c37 = Constraint(expr=m.x45*m.x37 - (m.x20*m.x12 + m.x97*m.x108) == 0)
m.c38 = Constraint(expr=m.x46*m.x38 - (m.x21*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c39 = Constraint(expr=m.x47*m.x38 - (m.x22*m.x13 + m.x98*(-m.x109 - m.x110)) == 0)
m.c40 = Constraint(expr=m.x48*m.x38 - (m.x23*m.x13 + m.x98*m.x109) == 0)
m.c41 = Constraint(expr=m.x49*m.x38 - (m.x24*m.x13 + m.x98*m.x110) == 0)
m.c42 = Constraint(expr=m.x50*m.x39 - (m.x25*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c43 = Constraint(expr=m.x51*m.x39 - (m.x26*m.x14 + m.x99*(-m.x111 - m.x112)) == 0)
m.c44 = Constraint(expr=m.x52*m.x39 - (m.x27*m.x14 + m.x99*m.x111) == 0)
m.c45 = Constraint(expr=m.x53*m.x39 - (m.x28*m.x14 + m.x99*m.x112) == 0)
m.c46 = Constraint(expr=m.x54*m.x40 - (m.x29*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c47 = Constraint(expr=m.x55*m.x40 - (m.x30*m.x15 + m.x100*(-m.x113 - m.x114)) == 0)
m.c48 = Constraint(expr=m.x56*m.x40 - (m.x31*m.x15 + m.x100*m.x113) == 0)
m.c49 = Constraint(expr=m.x57*m.x40 - (m.x32*m.x15 + m.x100*m.x114) == 0)
m.c50 = Constraint(expr=m.x58*m.x41 - (m.x33*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c51 = Constraint(expr=m.x59*m.x41 - (m.x34*m.x16 + m.x101*(-m.x115 - m.x116)) == 0)
m.c52 = Constraint(expr=m.x60*m.x41 - (m.x35*m.x16 + m.x101*m.x115) == 0)
m.c53 = Constraint(expr=m.x61*m.x41 - (m.x36*m.x16 + m.x101*m.x116) == 0)
m.c54 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x102)*m.x42*m.x43**0.3 + m.x107 == 0)
m.c55 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x103)*m.x46*m.x47**0.3 + m.x109 == 0)
m.c56 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x104)*m.x50*m.x51**0.3 + m.x111 == 0)
m.c57 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x105)*m.x54*m.x55**0.3 + m.x113 == 0)
m.c58 = Constraint(expr=-54000000*exp(-9631.60543532964/m.x106)*m.x58*m.x59**0.3 + m.x115 == 0)
m.c59 = Constraint(expr=-360000*exp(-4815.80271766482/m.x102)*m.x42**0.5*m.x43**1.8 + m.x108 == 0)
m.c60 = Constraint(expr=-360000*exp(-4815.80271766482/m.x103)*m.x46**0.5*m.x47**1.8 + m.x110 == 0)
m.c61 = Constraint(expr=-360000*exp(-4815.80271766482/m.x104)*m.x50**0.5*m.x51**1.8 + m.x112 == 0)
m.c62 = Constraint(expr=-360000*exp(-4815.80271766482/m.x105)*m.x54**0.5*m.x55**1.8 + m.x114 == 0)
m.c63 = Constraint(expr=-360000*exp(-4815.80271766482/m.x106)*m.x58**0.5*m.x59**1.8 + m.x116 == 0)
m.c64 = Constraint(expr= m.x37 - m.x62 - m.x63 - m.x64 - m.x65 - m.x66 - m.x87 == 0)
m.c65 = Constraint(expr= m.x38 - m.x67 - m.x68 - m.x69 - m.x70 - m.x71 - m.x88 == 0)
m.c66 = Constraint(expr= m.x39 - m.x72 - m.x73 - m.x74 - m.x75 - m.x76 - m.x89 == 0)
m.c67 = Constraint(expr= m.x40 - m.x77 - m.x78 - m.x79 - m.x80 - m.x81 - m.x90 == 0)
m.c68 = Constraint(expr= m.x41 - m.x82 - m.x83 - m.x84 - m.x85 - m.x86 - m.x91 == 0)
m.c69 = Constraint(expr= - m.x87 - m.x88 - m.x89 - m.x90 - m.x91 + m.x92 == 0)
m.c70 = Constraint(expr=m.x92*m.x93 - (m.x87*m.x42 + m.x88*m.x46 + m.x89*m.x50 + m.x90*m.x54 + m.x91*m.x58) == 0)
m.c71 = Constraint(expr=m.x92*m.x94 - (m.x87*m.x43 + m.x88*m.x47 + m.x89*m.x51 + m.x90*m.x55 + m.x91*m.x59) == 0)
m.c72 = Constraint(expr=m.x92*m.x95 - (m.x87*m.x44 + m.x88*m.x48 + m.x89*m.x52 + m.x90*m.x56 + m.x91*m.x60) == 0)
m.c73 = Constraint(expr=m.x92*m.x96 - (m.x87*m.x45 + m.x88*m.x49 + m.x89*m.x53 + m.x90*m.x57 + m.x91*m.x61) == 0)
| 48.913194 | 120 | 0.632001 | [
"MIT"
] | ouyang-w-19/decogo | tests/examples/minlplib/ex8_3_13.py | 14,087 | Python |
from setuptools import setup
from requests_tor import __version__
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name="requests_tor",
version=__version__,
author="deedy5",
description="Multithreading requests via TOR with automatic TOR new identity",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/deedy5/requests_tor",
license="MIT",
py_modules=["requests_tor"],
install_requires=["requests>=2.25.0", "stem>=1.8.0"],
classifiers=[
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: Implementation :: CPython",
],
python_requires=">=3.6",
zip_safe=False,
)
| 35.393939 | 82 | 0.638699 | [
"MIT"
] | Omarnabk/requests_tor | setup.py | 1,168 | Python |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Hashing function to make a stochastic classifier deterministic."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
from absl import app
import numpy as np
def compute_hash(features, hash_matrix, hash_vector):
"""Compute hash values for features using the hash function (A * x + c) mod 2.
Args:
features: NumPy float array of shape (n, d), the features to hash.
hash_matrix: NumPy float array of shape (num_feature_bits, num_hash_bits),
a random matrix A to construct the hash function.
hash_vector: NumPy float array of shape (1, num_hash_bits),
a random vector c to construct the hash function.
Returns:
NumPy float array of shape (n, 1) containing the hashed values in [0, 1].
"""
# Helper function to convert an int array to a bit string array.
def convert_int_to_bin(x, dimension):
# Converts x to an array of bit strings of size dimension.
return '{:b}'.format(x).zfill(dimension)[-dimension:]
convert_int_to_bin = np.vectorize(convert_int_to_bin)
# Helper function to convert a bit string array to an into array.
convert_bin_to_int = np.vectorize(lambda x: int(x, 2))
# Number of features and hash bits.
num_features = features.shape[0]
num_feature_bits, num_hash_bits = hash_matrix.shape
# Concatenate features and apply MD5 hash to get a fixed length encoding.
feature_sum_str = [''.join(x) for x in features.astype('str')]
feature_sum_hex = [hashlib.md5(s).hexdigest() for s in feature_sum_str]
feature_sum_int = [int(h, 16) for h in feature_sum_hex]
# Binarize features
feature_sum_bin = convert_int_to_bin(
feature_sum_int, dimension=num_feature_bits)
feature_sum_bin_matrix = np.array(
[[int(c) for c in s] for s in feature_sum_bin])
# Compute hash (Ax + c) mod 2.
feature_hashed = (
np.dot(feature_sum_bin_matrix, hash_matrix) +
np.repeat(hash_vector, repeats=num_features, axis=0))
feature_hashed_bits = np.mod(feature_hashed, 2)
# Convert hash to bit string.
feature_hashed_bit_char = convert_int_to_bin(feature_hashed_bits, 1)
feature_hashed_bit_str = [''.join(s) for s in feature_hashed_bit_char]
feature_hashed_int = convert_bin_to_int(feature_hashed_bit_str)
hashed_val = feature_hashed_int * 1. / 2 ** num_hash_bits
# Return normalized hashed values in [0, 1].
return hashed_val.reshape(-1, 1)
def main(argv):
"""Example usage of hash function."""
del argv
num_feature_bits = 128
num_hash_bits = 32
# Random hash matrix and vector to construct hash function.
hash_matrix = (np.random.rand(
num_feature_bits, num_hash_bits) > 0.5).astype('int')
hash_vector = (np.random.rand(1, num_hash_bits) > 0.5).astype('int')
# Generate random features.
num_examples = 10
dimension = 4
features = np.random.normal(size=(num_examples, dimension)).astype(np.float32)
# Compute hash.
hash_val = compute_hash(features, hash_matrix, hash_vector)
print('Feature matrix:')
print(features)
print('\nHashed values:')
print(hash_val)
if __name__ == '__main__':
app.run(main)
| 34.330275 | 80 | 0.736772 | [
"Apache-2.0"
] | 3rd/google-research | stochastic_to_deterministic/hashing.py | 3,742 | Python |
# Copyright (C) 2018-2022 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import numpy as np
from openvino.tools.mo.front.common.partial_infer.elemental import copy_shape_infer
from openvino.tools.mo.graph.graph import Graph
from openvino.tools.mo.ops.op import Op
class PReLU(Op):
op = 'PReLU'
enabled = True
def __init__(self, graph: Graph, attrs: dict):
super().__init__(graph, {
'op': self.op,
'type': self.op,
'version': 'opset1',
'infer': self.infer,
'force_precision_in_ports': {1: 'float'},
'in_ports_count': 2,
'out_ports_count': 1,
}, attrs)
@staticmethod
def infer(node):
if len(node.in_nodes()) == 2:
gamma_vector = node.in_node(1)
if np.all(gamma_vector.shape == [1]):
node['channel_shared'] = 1
else:
node['channel_shared'] = 0
node.in_node(1)['correct_data_type'] = True
copy_shape_infer(node)
| 26.075 | 83 | 0.57814 | [
"Apache-2.0"
] | 3Demonica/openvino | tools/mo/openvino/tools/mo/ops/prelu.py | 1,043 | Python |
import sys, os
import pytest
import tenseal.sealapi as sealapi
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
from utils import *
@pytest.mark.parametrize(
"compr_type",
[sealapi.COMPR_MODE_TYPE.NONE, sealapi.COMPR_MODE_TYPE.ZLIB, sealapi.COMPR_MODE_TYPE.ZSTD],
)
def test_serialization_compression(compr_type):
assert sealapi.Serialization.IsSupportedComprMode(compr_type) is True
assert sealapi.Serialization.ComprSizeEstimate(8, compr_type) > 0
def test_serialization_sanity():
assert int(sealapi.COMPR_MODE_TYPE.NONE) == 0
assert int(sealapi.COMPR_MODE_TYPE.ZLIB) == 1
assert int(sealapi.COMPR_MODE_TYPE.ZSTD) == 2
header = sealapi.Serialization.SEALHeader()
assert header.magic == 0xA15E
assert header.header_size == 0x10
assert header.version_major == 3
assert header.version_minor == 0x6
assert header.compr_mode == sealapi.COMPR_MODE_TYPE.NONE
assert header.size == 0
assert header.reserved == 0
assert sealapi.Serialization.IsSupportedComprMode(15) is False
header = sealapi.Serialization.SEALHeader()
assert sealapi.Serialization.IsValidHeader(header) is True
header = sealapi.Serialization.SEALHeader()
header.compr_mode = sealapi.COMPR_MODE_TYPE.ZLIB
def save_load(path):
sealapi.Serialization.SaveHeader(header, path)
save_test = sealapi.Serialization.SEALHeader()
sealapi.Serialization.LoadHeader(path, save_test, True)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
sealapi.Serialization.LoadHeader(path, save_test, False)
assert save_test.compr_mode == sealapi.COMPR_MODE_TYPE.ZLIB
tmp_file(save_load)
@pytest.mark.parametrize(
"factory",
[
sealapi.Blake2xbPRNGFactory.DefaultFactory(),
sealapi.Blake2xbPRNGFactory(),
sealapi.Blake2xbPRNGFactory([sealapi.random_uint64() for i in range(8)]),
sealapi.Shake256PRNGFactory.DefaultFactory(),
sealapi.Shake256PRNGFactory(),
sealapi.Shake256PRNGFactory([sealapi.random_uint64() for i in range(8)]),
],
)
def test_randomgen(factory):
assert sealapi.random_uint64() != sealapi.random_uint64()
for generator in [
factory.create(),
factory.create([sealapi.random_uint64() for i in range(8)]),
]:
assert generator.generate() != generator.generate()
adapter = sealapi.RandomToStandardAdapter(generator)
assert adapter() != adapter()
for i in range(1024):
generator.refresh()
generator.generate()
def test_intarray():
testcase = sealapi.Plaintext("3x^3 + 1x^1 + 3")
int_arr = testcase.dyn_array()
assert int_arr[0] == 3
assert int_arr.at(3) == 3
assert int_arr.empty() is False
assert int_arr.max_size() == 2 ** 64 - 1
assert int_arr.size() == 4
assert int_arr.capacity() == 4
def save_load(path):
int_arr.save(path)
save_test = sealapi.DynArray()
save_test.load(path)
assert save_test[0] == 3
tmp_file(save_load)
int_arr.resize(10, True)
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.reserve(30)
assert int_arr.capacity() == 30
assert int_arr.capacity() == 30
int_arr.shrink_to_fit()
assert int_arr.capacity() == 10
assert int_arr.size() == 10
int_arr.clear()
assert int_arr.size() == 0
assert int_arr.capacity() == 10
assert int_arr.empty() is True
int_arr.release()
assert int_arr.capacity() == 0
def test_plaintext():
testcase = sealapi.Plaintext()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext(15)
assert testcase.coeff_count() == 15
testcase = sealapi.Plaintext(100, 15)
assert testcase.coeff_count() == 15
assert testcase.capacity() == 100
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.significant_coeff_count() == 4
assert testcase.capacity() == 4
testcase2 = testcase
assert testcase2.coeff_count() == 4
assert testcase2.capacity() == 4
testcase = sealapi.Plaintext(100, 15)
testcase.reserve(200)
assert testcase.capacity() == 200
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.capacity() == 4
testcase.reserve(200)
assert testcase.capacity() == 200
testcase.shrink_to_fit()
assert testcase.capacity() == 4
assert testcase.dyn_array()[3] == 0x7FF
assert testcase.data(3) == 0x7FF
assert testcase.parms_id() == [0, 0, 0, 0]
assert testcase.scale == 1.0
assert testcase[3] == 0x7FF
assert testcase.to_string() == "7FFx^3 + 1x^1 + 3"
testcase.release()
assert testcase.coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 1x^1 + 3")
assert testcase.coeff_count() == 4
assert testcase.nonzero_coeff_count() == 3
testcase.resize(10)
assert testcase.coeff_count() == 10
testcase.set_zero()
assert testcase.is_zero()
assert testcase.nonzero_coeff_count() == 0
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
assert testcase.is_ntt_form() is False
def save_load(path):
testcase = sealapi.Plaintext("7FFx^3 + 2x^1 + 3")
testcase.save(path)
ctx = helper_context_bfv()
save_test = sealapi.Plaintext()
save_test.load(ctx, path)
assert save_test.coeff_count() == 4
tmp_file(save_load)
@pytest.mark.parametrize("testcase", [[1, 2, 3, 4, 5, 6, 7, 8], [i for i in range(200)]])
@pytest.mark.parametrize(
"scheme,ctx",
[
(sealapi.SCHEME_TYPE.BFV, helper_context_bfv()),
(sealapi.SCHEME_TYPE.CKKS, helper_context_ckks()),
],
)
def test_ciphertext(testcase, scheme, ctx):
poly_modulus_degree = helper_poly_modulus_degree(ctx)
ctx_data = ctx.key_context_data()
parms = ctx_data.parms()
coeff_mod_count = len(parms.coeff_modulus())
keygen = sealapi.KeyGenerator(ctx)
ciphertext = sealapi.Ciphertext(ctx)
plaintext = helper_encode(scheme, ctx, testcase)
pk = sealapi.PublicKey()
keygen.create_public_key(pk)
encryptor = sealapi.Encryptor(ctx, pk)
decryptor = sealapi.Decryptor(ctx, keygen.secret_key())
encryptor.encrypt(plaintext, ciphertext)
assert len(ciphertext.parms_id()) > 0
assert ciphertext.scale > 0
assert ciphertext.coeff_modulus_size() == coeff_mod_count - 1
assert ciphertext.poly_modulus_degree() == poly_modulus_degree
assert ciphertext.dyn_array().size() > 0
assert ciphertext.size() == 2
assert ciphertext.size_capacity() == 2
assert ciphertext.is_transparent() is False
assert ciphertext.is_ntt_form() is (scheme == sealapi.SCHEME_TYPE.CKKS)
def save_load(path):
ciphertext.save(path)
save_test = sealapi.Ciphertext(ctx)
save_test.load(ctx, path)
decryptor.decrypt(save_test, plaintext)
decoded = helper_decode(scheme, ctx, plaintext)
is_close_enough(decoded[: len(testcase)], testcase)
tmp_file(save_load)
ciphertext.resize(ctx, 10)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 10
ciphertext.reserve(15)
assert ciphertext.size() == 10
assert ciphertext.size_capacity() == 15
| 30.472803 | 95 | 0.679665 | [
"Apache-2.0"
] | CerineBnsd/TenSEAL | tests/python/sealapi/test_sanity.py | 7,283 | Python |
import assemblyline_client
import mocks
import mock
from base64 import b64decode
def test_bad_cert():
"""Make sure that the client detects that the test cert is self signed."""
with mocks.Server() as server:
try:
assemblyline_client.get_client(server.address)
assert False
except assemblyline_client.ClientError as ce:
assert 'CERTIFICATE_VERIFY_FAILED' in str(ce) or 'certificate verify failed' in str(ce)
def test_noauth():
"""The test server should let us login with no authentication."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False)
assert len(server.logins) == 1
def test_noauth_submit(mocker):
"""Submit a file and ensure that the same file is unpacked."""
with mocks.Server() as server:
client = assemblyline_client.get_client(server.address, verify=False)
submits = server.submits
# Submit a file with contents
client.submit(path='readme.txt', contents=b'abc123')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
# Submit a file from a file
mocker.patch('os.path.exists', return_value=True)
mocker.patch('assemblyline_client.v3_client.open', mock.mock_open(read_data=b'abc123'), create=True)
client.submit(path='readme.txt')
assert len(submits) == 1
assert b64decode(submits[0]['binary']) == b'abc123'
assert submits[0]['name'] == 'readme.txt'
submits.pop()
def test_encrypt_password_auth():
"""Send an encryped password and decrypt it."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, auth=('username', 'password'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['password'] != 'password'
assert server.private_key.decrypt(b64decode(server.logins[0]['password']), 'ERROR') == b'password'
def test_encrypt_apikey_auth():
"""Send an encryped apikey and decrypt it."""
with mocks.Server() as server:
assemblyline_client.get_client(server.address, verify=False, apikey=('username', 'ANAPIKEY'))
assert len(server.logins) == 1
assert server.logins[0]['user'] == 'username'
assert server.logins[0]['apikey'] != 'ANAPIKEY'
assert server.private_key.decrypt(b64decode(server.logins[0]['apikey']), 'ERROR') == b'ANAPIKEY'
| 38 | 108 | 0.662152 | [
"MIT"
] | IanLee1521/assemblyline_client | test/test_v3_client.py | 2,584 | Python |
from datetime import datetime, timedelta
from typing import List, Optional
from sqlalchemy import or_
from dispatch.plugin import service as plugin_service
from dispatch.event import service as event_service
from dispatch.incident import flows as incident_flows
from dispatch.incident.flows import incident_service
from dispatch.ticket import service as ticket_service
from .models import Task, TaskStatus, TaskUpdate, TaskCreate
def get(*, db_session, task_id: int) -> Optional[Task]:
"""Get a single task by ID."""
return db_session.query(Task).filter(Task.id == task_id).first()
def get_by_resource_id(*, db_session, resource_id: str) -> Optional[Task]:
"""Get a single task by resource id."""
return db_session.query(Task).filter(Task.resource_id == resource_id).first()
def get_all(*, db_session) -> List[Optional[Task]]:
"""Return all tasks."""
return db_session.query(Task)
def get_all_by_incident_id(*, db_session, incident_id: int) -> List[Optional[Task]]:
"""Get all tasks by incident id."""
return db_session.query(Task).filter(Task.incident_id == incident_id)
def get_all_by_incident_id_and_status(
*, db_session, incident_id: int, status: str
) -> List[Optional[Task]]:
"""Get all tasks by incident id and status."""
return (
db_session.query(Task).filter(Task.incident_id == incident_id).filter(Task.status == status)
)
def get_overdue_tasks(*, db_session) -> List[Optional[Task]]:
"""Returns all tasks that have not been resolved and are past due date."""
# TODO ensure that we don't send reminders more than their interval
return (
db_session.query(Task)
.filter(Task.status == TaskStatus.open)
.filter(Task.reminders == True) # noqa
.filter(Task.resolve_by < datetime.utcnow())
.filter(
or_(
Task.last_reminder_at + timedelta(days=1)
< datetime.utcnow(), # daily reminders after due date.
Task.last_reminder_at == None,
)
)
.all()
)
def create(*, db_session, task_in: TaskCreate) -> Task:
"""Create a new task."""
incident = incident_service.get(db_session=db_session, incident_id=task_in.incident.id)
tickets = [
ticket_service.get_or_create_by_weblink(
db_session=db_session, weblink=t.weblink, resource_type="task-ticket"
)
for t in task_in.tickets
]
assignees = []
for i in task_in.assignees:
assignee = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=i.individual.email,
)
# due to the freeform nature of task assignment, we can sometimes pick up other emails
# e.g. a google group that we cannont resolve to an individual assignee
if assignee:
assignees.append(assignee)
creator_email = None
if not task_in.creator:
creator_email = task_in.owner.individual.email
else:
creator_email = task_in.creator.individual.email
# add creator as a participant if they are not one already
creator = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=creator_email,
)
# if we cannot find any assignees, the creator becomes the default assignee
if not assignees:
assignees.append(creator)
# we add owner as a participant if they are not one already
if task_in.owner:
owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=incident.id,
user_email=task_in.owner.individual.email,
)
else:
owner = incident.commander
task = Task(
**task_in.dict(exclude={"assignees", "owner", "incident", "creator", "tickets"}),
creator=creator,
owner=owner,
assignees=assignees,
incident=incident,
tickets=tickets,
)
event_service.log(
db_session=db_session,
source="Dispatch Core App",
description="New incident task created",
details={"weblink": task.weblink},
incident_id=incident.id,
)
db_session.add(task)
db_session.commit()
return task
def update(*, db_session, task: Task, task_in: TaskUpdate, sync_external: bool = True) -> Task:
"""Update an existing task."""
# ensure we add assignee as participant if they are not one already
assignees = []
for i in task_in.assignees:
assignees.append(
incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=i.individual.email,
)
)
task.assignees = assignees
# we add owner as a participant if they are not one already
if task_in.owner:
task.owner = incident_flows.incident_add_or_reactivate_participant_flow(
db_session=db_session,
incident_id=task.incident.id,
user_email=task_in.owner.individual.email,
)
update_data = task_in.dict(
skip_defaults=True, exclude={"assignees", "owner", "creator", "incident", "tickets"}
)
for field in update_data.keys():
setattr(task, field, update_data[field])
# if we have an external task plugin enabled, attempt to update the external resource as well
# we don't currently have a good way to get the correct file_id (we don't store a task <-> relationship)
# lets try in both the incident doc and PIR doc
drive_task_plugin = plugin_service.get_active(db_session=db_session, plugin_type="task")
if drive_task_plugin:
if sync_external:
try:
if task.incident.incident_document:
file_id = task.incident.incident_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
except Exception:
if task.incident.incident_review_document:
file_id = task.incident.incident_review_document.resource_id
drive_task_plugin.instance.update(
file_id, task.resource_id, resolved=task.status
)
db_session.add(task)
db_session.commit()
return task
def delete(*, db_session, task_id: int):
"""Delete an existing task."""
task = db_session.query(Task).filter(Task.id == task_id).first()
db_session.delete(task)
db_session.commit()
| 34.613402 | 108 | 0.656739 | [
"Apache-2.0"
] | WouldYouKindly/dispatch | src/dispatch/task/service.py | 6,715 | Python |
import logging
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import LoggingIntegration
from sentry_sdk.integrations.celery import CeleryIntegration
from .base import * # noqa
from .base import env
# GENERAL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
SECRET_KEY = env("DJANGO_SECRET_KEY")
# https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = env.list("DJANGO_ALLOWED_HOSTS", default=["veritas.ke"])
# DATABASES
# ------------------------------------------------------------------------------
DATABASES["default"] = env.db("DATABASE_URL") # noqa F405
DATABASES["default"]["ATOMIC_REQUESTS"] = True # noqa F405
DATABASES["default"]["CONN_MAX_AGE"] = env.int("CONN_MAX_AGE", default=60) # noqa F405
# CACHES
# ------------------------------------------------------------------------------
CACHES = {
"default": {
"BACKEND": "django_redis.cache.RedisCache",
"LOCATION": env("REDIS_URL"),
"OPTIONS": {
"CLIENT_CLASS": "django_redis.client.DefaultClient",
# Mimicing memcache behavior.
# http://niwinz.github.io/django-redis/latest/#_memcached_exceptions_behavior
"IGNORE_EXCEPTIONS": True,
},
}
}
# SECURITY
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-proxy-ssl-header
SECURE_PROXY_SSL_HEADER = ("HTTP_X_FORWARDED_PROTO", "https")
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-ssl-redirect
SECURE_SSL_REDIRECT = env.bool("DJANGO_SECURE_SSL_REDIRECT", default=True)
# https://docs.djangoproject.com/en/dev/ref/settings/#session-cookie-secure
SESSION_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/ref/settings/#csrf-cookie-secure
CSRF_COOKIE_SECURE = True
# https://docs.djangoproject.com/en/dev/topics/security/#ssl-https
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-seconds
# TODO: set this to 60 seconds first and then to 518400 once you prove the former works
SECURE_HSTS_SECONDS = 60
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-include-subdomains
SECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(
"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS", default=True
)
# https://docs.djangoproject.com/en/dev/ref/settings/#secure-hsts-preload
SECURE_HSTS_PRELOAD = env.bool("DJANGO_SECURE_HSTS_PRELOAD", default=True)
# https://docs.djangoproject.com/en/dev/ref/middleware/#x-content-type-options-nosniff
SECURE_CONTENT_TYPE_NOSNIFF = env.bool(
"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF", default=True
)
# STORAGES
# ------------------------------------------------------------------------------
# https://django-storages.readthedocs.io/en/latest/#installation
INSTALLED_APPS += ["storages"] # noqa F405
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = env("DJANGO_AWS_ACCESS_KEY_ID")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_SECRET_ACCESS_KEY = env("DJANGO_AWS_SECRET_ACCESS_KEY")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_STORAGE_BUCKET_NAME = env("DJANGO_AWS_STORAGE_BUCKET_NAME")
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_QUERYSTRING_AUTH = False
# DO NOT change these unless you know what you're doing.
_AWS_EXPIRY = 60 * 60 * 24 * 7
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_OBJECT_PARAMETERS = {
'CacheControl': 'max-age=86400',
}
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_DEFAULT_ACL = None
# https://django-storages.readthedocs.io/en/latest/backends/amazon-S3.html#settings
AWS_S3_REGION_NAME = env("DJANGO_AWS_S3_REGION_NAME", default=None)
AWS_S3_ENDPOINT_URL = env("AWS_S3_ENDPOINT_URL")
# STATIC
# ------------------------
STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# MEDIA
# ------------------------------------------------------------------------------
MEDIAFILES_STORAGE="storages.backends.s3boto3.S3Boto3Storage"
# region http://stackoverflow.com/questions/10390244/
# Full-fledge class: https://stackoverflow.com/a/18046120/104731
from storages.backends.s3boto3 import S3Boto3Storage # noqa E402
class StaticRootS3Boto3Storage(S3Boto3Storage):
location = "static"
default_acl = "public-read"
class MediaRootS3Boto3Storage(S3Boto3Storage):
location = "media"
file_overwrite = False
# endregion
DEFAULT_FILE_STORAGE = "config.settings.production.MediaRootS3Boto3Storage"
MEDIA_URL = f"https://{AWS_STORAGE_BUCKET_NAME}.s3.amazonaws.com/media/"
# TEMPLATES
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#templates
TEMPLATES[0]["OPTIONS"]["loaders"] = [ # noqa F405
(
"django.template.loaders.cached.Loader",
[
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
],
)
]
# EMAIL
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#default-from-email
DEFAULT_FROM_EMAIL = env(
"DJANGO_DEFAULT_FROM_EMAIL", default="Veritas <[email protected]>"
)
# https://docs.djangoproject.com/en/dev/ref/settings/#server-email
SERVER_EMAIL = env("DJANGO_SERVER_EMAIL", default=DEFAULT_FROM_EMAIL)
# https://docs.djangoproject.com/en/dev/ref/settings/#email-subject-prefix
EMAIL_SUBJECT_PREFIX = env(
"DJANGO_EMAIL_SUBJECT_PREFIX", default="[Veritas]"
)
# ADMIN
# ------------------------------------------------------------------------------
# Django Admin URL regex.
ADMIN_URL = env("DJANGO_ADMIN_URL")
# Anymail (Mailgun)
# ------------------------------------------------------------------------------
# https://anymail.readthedocs.io/en/stable/installation/#installing-anymail
INSTALLED_APPS += ["anymail"] # noqa F405
EMAIL_BACKEND = "anymail.backends.mailgun.EmailBackend"
# https://anymail.readthedocs.io/en/stable/installation/#anymail-settings-reference
ANYMAIL = {
"MAILGUN_API_KEY": env("MAILGUN_API_KEY"),
"MAILGUN_SENDER_DOMAIN": env("MAILGUN_DOMAIN"),
"MAILGUN_API_URL": env("MAILGUN_API_URL", default="https://api.mailgun.net/v3"),
}
# WhiteNoise
# ------------------------------------------------------------------------------
# http://whitenoise.evans.io/en/latest/django.html#enable-whitenoise
MIDDLEWARE.insert(1, "whitenoise.middleware.WhiteNoiseMiddleware") # noqa F405
# LOGGING
# ------------------------------------------------------------------------------
# https://docs.djangoproject.com/en/dev/ref/settings/#logging
# See https://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
"version": 1,
"disable_existing_loggers": True,
"formatters": {
"verbose": {
"format": "%(levelname)s %(asctime)s %(module)s "
"%(process)d %(thread)d %(message)s"
}
},
"handlers": {
"console": {
"level": "DEBUG",
"class": "logging.StreamHandler",
"formatter": "verbose",
}
},
"root": {"level": "INFO", "handlers": ["console"]},
"loggers": {
"django.db.backends": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
# Errors logged by the SDK itself
"sentry_sdk": {"level": "ERROR", "handlers": ["console"], "propagate": False},
"django.security.DisallowedHost": {
"level": "ERROR",
"handlers": ["console"],
"propagate": False,
},
},
}
# Sentry
# ------------------------------------------------------------------------------
SENTRY_DSN = env("SENTRY_DSN")
SENTRY_LOG_LEVEL = env.int("DJANGO_SENTRY_LOG_LEVEL", logging.INFO)
sentry_logging = LoggingIntegration(
level=SENTRY_LOG_LEVEL, # Capture info and above as breadcrumbs
event_level=logging.ERROR, # Send errors as events
)
sentry_sdk.init(
dsn=SENTRY_DSN,
integrations=[sentry_logging, DjangoIntegration(), CeleryIntegration()],
)
# Your stuff...
# ------------------------------------------------------------------------------
| 39.256881 | 89 | 0.6276 | [
"MIT"
] | Musyimi97/veritasLtd | config/settings/production.py | 8,558 | Python |
#!/usr/bin/env python3
import boto3
s3_client = boto3.client('s3')
raw_response = s3_client.list_buckets()
for bucket in raw_response['Buckets']:
print(bucket['Name'])
| 24.571429 | 39 | 0.738372 | [
"MIT"
] | sw33tr0ll/aws-training | labs/lab2.py | 172 | Python |
# -*- coding: utf-8 -*-
import os
import sys
import datetime
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from django.conf import settings
from app1.models import Thing
class Command(BaseCommand):
args = '<id name>'
help = 'create or update thing model.'
use_settings = 'settings'
def handle(self, *args, **options):
"""
finished when raise CommandError, exit code = 1.
other exit code = 0
"""
_retcode = 1
_dbname = 'default'
try:
print('settings.ENV_MODE = %s' % (settings.ENV_MODE))
print('settings.DATABASES = %s' % (settings.DATABASES))
_id = int(args[0])
_name = args[1]
print('id: %s, name:%s' % (_id, _name))
qs = Thing.objects.filter(id=_id)
_nowdt = timezone.now()
if 0 < len(qs):
print('do update.')
_r = qs[0]
# _r.id
_r.name = _name
# _r.create_at
_r.update_at = _nowdt
_r.save(using=_dbname)
else:
print('do insert.')
if _id < 1:
_id = None
_t = Thing(
id=_id,
name=_name,
create_at=_nowdt,
update_at=_nowdt)
_t.save(using=_dbname)
except:
print('EXCEPT: %s(%s)' % (sys.exc_info()[0], sys.exc_info()[1]))
print('finished(ng)')
raise CommandError('ng')
# raise CommandError('ok')
print('finished(ok)')
sys.exit(0)
| 27.318182 | 76 | 0.460344 | [
"BSD-2-Clause"
] | dictoss/proto | python-django/djmultidb/app1/management/commands/set_thing.py | 1,803 | Python |
import os
import yaml
from .cassette import Cassette
def load_cassette(cassette_path):
try:
pc = yaml.load(open(cassette_path))
cassette = Cassette(pc)
return cassette
except IOError:
return None
def save_cassette(cassette_path, cassette):
dirname, filename = os.path.split(cassette_path)
if not os.path.exists(dirname):
os.makedirs(dirname)
with open(cassette_path, 'wc') as cassette_file:
cassette_file.write(yaml.dump(cassette.serialize()))
| 24.571429 | 60 | 0.689922 | [
"MIT"
] | charlax/vcrpy | vcr/files.py | 516 | Python |
from audio import Stream, AudioSettings
class PhraseRecognizer(object):
def __init__(self, config, audio_settings: AudioSettings):
self._config = config
self._audio_settings = audio_settings
def get_config(self):
return self._config
def get_audio_settings(self) -> AudioSettings:
return self._audio_settings
async def recognize(self, stream: Stream, recv_callback):
raise Exception('Not implemented "recognize"')
class HotwordRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def start(self):
pass
def is_hotword(self, raw_frames) -> bool:
raise Exception('Not implemented "is_hotword"')
class VADRecognizer(object):
def __init__(self, config):
self._config = config
def get_audio_settings(self) -> AudioSettings:
raise Exception('Not implemented "get_audio_settings"')
def is_speech(self, raw_frames) -> bool:
raise Exception('Not implemented "is_speech"')
class PhraseRecognizerConfig(object):
def create_phrase_recognizer(self) -> PhraseRecognizer:
raise Exception('Not implemented "create_phrase_recognizer"')
class HotwordRecognizerConfig(object):
def create_hotword_recognizer(self) -> HotwordRecognizer:
raise Exception('Not implemented "create_hotword_recognizer"')
class VADRecognizerConfig(object):
def create_vad_recognizer(self) -> VADRecognizer:
raise Exception('Not implemented "create_vad_recognizer"')
| 28.719298 | 70 | 0.717776 | [
"Apache-2.0"
] | ReanGD/smart-home | recognition/base.py | 1,637 | Python |
import datetime as dt
from airflow.models import DAG
from airflow.operators.dummy import DummyOperator
from airflow.operators.python import PythonOperator
default_args = {
'start_date': dt.datetime.now() - dt.timedelta(days=7),
'owner': 'airflow'
}
def throw_error():
raise Exception('It failed!')
with DAG(dag_id='test_dag_failure', description='A DAG that always fail.', default_args=default_args, tags=['test'], schedule_interval=None) as dag:
should_succeed = DummyOperator(
task_id='should_succeed'
)
should_fail = PythonOperator(
task_id='should_fail',
python_callable=throw_error
)
should_succeed >> should_fail
| 23.586207 | 148 | 0.717836 | [
"MIT"
] | GrokData/grok-airflow-dags | dags/test_dag_failure.py | 684 | Python |
from django.db import models
from django.contrib.auth.models import User
from django.conf import settings
from django.utils import timezone
# Create your models here.
class Article(models.Model):
title=models.CharField(max_length=100)
slug=models.SlugField(blank=True)
body= models.TextField()
date= models.DateTimeField(default=timezone.now)
thumb=models.ImageField(default='default.jpg',blank=True)
Author= models.ForeignKey(User,default=None,on_delete=models.CASCADE)
#Thumbnails
def __str__(self):
return self.title
def snippets(self):
return self.body[:80] + '...'
| 31.809524 | 77 | 0.685629 | [
"MIT"
] | Blaise-design/Django-Hospital-Project | articles/models.py | 668 | Python |
import pygad
import functools
import operator
import numpy
def fitness_func(genes, solution_idx):
group1Sum = sum(genes[0:5])
group2Product = functools.reduce(operator.mul, genes[5:10])
duplicateCount = (len(genes) - len(set(genes)))
return 1 / ((abs(36 - group1Sum) + abs(360 - group2Product)) + 1) - duplicateCount
geneset = numpy.array([[i + 1 for i in range(10)], [i + 1 for i in range(10)]])
ga_instance = pygad.GA(num_generations=50,
num_parents_mating=1,
sol_per_pop=50,
fitness_func=fitness_func,
initial_population=None,
num_genes=10,
gene_type=int,
init_range_low=1,
init_range_high=10,
parent_selection_type="rank",
keep_parents=-1,
crossover_type=None,
mutation_type="swap",
mutation_percent_genes=40,
gene_space=[i + 1 for i in range(10)],
allow_duplicate_genes=False,
stop_criteria="reach_1")
ga_instance.run()
solution, solution_fitness, solution_idx = ga_instance.best_solution()
print("Parameters of the best solution : {solution}".format(solution=solution))
print("Fitness value of the best solution = {solution_fitness}".format(
solution_fitness=solution_fitness))
print("Solution index of best solution = {solution_idx}".format(
solution_idx=solution_idx))
| 35.863636 | 86 | 0.581749 | [
"Apache-2.0"
] | monfared01/GeneticAlgorithmsWithPython | withPyGAD/ch06/cardTests.py | 1,578 | Python |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
from botbuilder.dialogs.memory import scope_path
from .memory_scope import MemoryScope
class SettingsMemoryScope(MemoryScope):
def __init__(self):
super().__init__(scope_path.SETTINGS)
self._empty_settings = {}
self.include_in_snapshot = False
def get_memory(self, dialog_context: "DialogContext") -> object:
if not dialog_context:
raise TypeError(f"Expecting: DialogContext, but received None")
settings: dict = dialog_context.context.turn_state.get(
scope_path.SETTINGS, None
)
if not settings:
settings = self._empty_settings
return settings
def set_memory(self, dialog_context: "DialogContext", memory: object):
raise Exception(
f"{self.__class__.__name__}.set_memory not supported (read only)"
)
| 29.5 | 77 | 0.680085 | [
"MIT"
] | Ask-Waldo/botbuilder-python | libraries/botbuilder-dialogs/botbuilder/dialogs/memory/scopes/settings_memory_scope.py | 944 | Python |
# coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.11
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_11 import models
class PortCommon(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'iqn': 'str',
'nqn': 'str',
'portal': 'str',
'wwn': 'str'
}
attribute_map = {
'iqn': 'iqn',
'nqn': 'nqn',
'portal': 'portal',
'wwn': 'wwn'
}
required_args = {
}
def __init__(
self,
iqn=None, # type: str
nqn=None, # type: str
portal=None, # type: str
wwn=None, # type: str
):
"""
Keyword args:
iqn (str): The iSCSI Qualified Name (or `null` if target is not iSCSI).
nqn (str): NVMe Qualified Name (or `null` if target is not NVMeoF).
portal (str): IP and port number (or `null` if target is not iSCSI).
wwn (str): Fibre Channel World Wide Name (or `null` if target is not Fibre Channel).
"""
if iqn is not None:
self.iqn = iqn
if nqn is not None:
self.nqn = nqn
if portal is not None:
self.portal = portal
if wwn is not None:
self.wwn = wwn
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `PortCommon`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PortCommon, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PortCommon):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.992308 | 105 | 0.529053 | [
"BSD-2-Clause"
] | Flav-STOR-WL/py-pure-client | pypureclient/flasharray/FA_2_11/models/port_common.py | 3,769 | Python |
# Generated by Django 3.0.6 on 2020-06-22 13:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('src', '0010_temporaryuser'),
]
operations = [
migrations.RemoveField(
model_name='admin',
name='username',
),
migrations.AlterField(
model_name='temporaryuser',
name='mail',
field=models.EmailField(max_length=320),
),
]
| 21.217391 | 52 | 0.567623 | [
"MIT"
] | DipikaPawar12/Visitor-Management-System | visitor_manage/src/migrations/0011_auto_20200622_1909.py | 488 | Python |
# # Python Week-7 Day-42
# Python Classes and Objects 2
print(" -- Let us create a method in the Person class --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name )
p1 = Person("John", "36")
p1.myfunc()
print("----")
class Car:
def __init__(self, brand, price):
self.brand = brand
self.price = price
def myfunc(self):
print("Car brand Is: " + self.brand, "\nCar Price Is: " + self.price)
p1 = Car("Kia", "10000")
p1.myfunc()
print("\n -- Modify Object Properties -- ")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
p1.age = 40
print(p1.age)
print("\n -- Delete Object Properties --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
try :
del p1.age
print(p1.age)
except AttributeError as err:
print("Properties 'age' not Exist")
print("\n -- Delete Objects --")
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
def myfunc(self):
print("Hello my name is " + self.name)
p1 = Person("John", 36)
del p1
try :
print(p1.age)
except NameError as err:
print("p1 is not Defined")
| 20.847222 | 77 | 0.588941 | [
"Unlicense"
] | abusamrah2005/Python | Week-7/Day-42.py | 1,501 | Python |
"""
@brief Pure python implementation of the Bayesian Blocks algorithm
described by Jackson, Scargle et al. 2005, IEEE Signal Processing
Letters, 12, 105. (http://arxiv.org/abs/math/0309285)
@author J. Chiang <[email protected]>
"""
#
# $Id: BayesianBlocks_python.py,v 1.1.1.1 2011/09/03 00:55:59 jchiang Exp $
#
import copy
import numpy as num
def gammln(xx):
cof = [76.18009172947146, -86.50532032941677,
24.01409824083091, -1.231739572450155,
0.1208650973866179e-2, -0.5395239384953e-5]
y = xx
x = xx
tmp = x + 5.5
tmp -= (x + 0.5)*num.log(tmp)
ser = 1.000000000190015
for j in range(6):
y += 1
ser += cof[j]/y
return -tmp + num.log(2.5066282746310005*ser/x)
class BayesianBlocks(object):
"""
Unbinned mode:
>>> bb = BayesianBlocks(arrival_times)
Binned:
>>> bb = BayesianBlocks(bin_content, bin_sizes, start_time)
Point measurements:
>>> bb = BayesianBlocks(time, flux, errors)
Obtaining the piecewise constant light curve:
>>> time, rate = bb.globalOpt(ncp_prior=1)
"""
def __init__(self, *argv):
self.point_mode = False
self.use_ml = True
if len(argv) == 1:
events = list(argv[0])
events.sort()
events = num.array(events)
self.cellContent = num.ones(len(argv[0]))
self.cellSizes = self._generateCells(events)
self.binned = False
else:
try:
self._readPointData(argv)
except TypeError:
self.cellContent = copy.deepcopy(argv[0])
self.cellSizes = copy.deepcopy(argv[1])
self.tstart = argv[2]
self.binned = True
def _readPointData(self, argv):
x, y, dy = (list(copy.deepcopy(argv[0])),
list(copy.deepcopy(argv[1])),
list(copy.deepcopy(argv[2])))
if len(x) != len(y) or len(y) != len(dy):
raise RuntimeError("Point measurement mode: " +
"input array sizes do not match")
x.insert(0, x[0] - (x[1] - x[0]))
x.append(x[-1] + (x[-1] - x[-2]))
x = num.array(x)
cell_bounds = (x[1:] + x[:-1])/2.
self.tstart = cell_bounds[0]
self.cellSizes = cell_bounds[1:] - cell_bounds[:-1]
self.cellContent = y
self.fluxes = num.array(y)
self.errors = num.array(dy)
self.point_mode = True
def lightCurve(self, ncp_prior=1, use_ml=True):
return self.globalOpt(ncp_prior, use_ml)
def globalOpt(self, ncp_prior=1, use_ml=True):
if self.point_mode:
blockCost = self.blockCost_point
else:
blockCost = self.blockCost
self.use_ml = use_ml
opt, last = [], []
opt.append(blockCost(0, 0) - ncp_prior)
last.append(0)
npts = len(self.cellContent)
for nn in range(1, npts):
max_opt = blockCost(0, nn) - ncp_prior
jmax = 0
for j in range(1, nn+1):
my_opt = opt[j-1] + blockCost(j, nn) - ncp_prior
if my_opt > max_opt:
max_opt = my_opt
jmax = j
opt.append(max_opt)
last.append(jmax)
changePoints = []
indx = last[-1]
while indx > 0:
changePoints.insert(0, indx)
indx = last[indx-1]
changePoints.insert(0, 0)
changePoints.append(npts)
return self._lightCurve(changePoints)
def _lightCurve(self, changePoints):
xx = []
yy = []
cell_sizes = self.cellSizes
for imin, imax in zip(changePoints[:-1], changePoints[1:]):
try:
xx.extend([self.tstart + sum(cell_sizes[:imin]),
self.tstart + sum(cell_sizes[:imax])])
except IndexError:
xx.extend([self.tstart + imin*cell_sizes,
self.tstart + imax*cell_sizes])
if self.point_mode:
f, sig, weights = self._point_block_data(imin, imax-1)
yval = sum(weights*f)
else:
yval = (sum(self.cellContent[imin:imax])
/sum(cell_sizes[imin:imax]))
yy.extend([yval, yval])
return xx, yy
def _point_block_data(self, imin, imax):
f, sig = self.fluxes[imin:imax+1], self.errors[imin:imax+1]
weights = 1./sig**2/sum(1./sig**2)
return f, sig, weights
def blockCost_point(self, imin, imax):
f, sig, weights = self._point_block_data(imin, imax)
sigx2 = sum(weights*f**2) - (sum(weights*f))**2
return -sigx2/2*sum(1./sig**2)
def blockCost(self, imin, imax):
size = self.blockSize(imin, imax)
content = self.blockContent(imin, imax)
if content == 0:
return 0
my_cost = content*(num.log(content/size) - 1)
return my_cost
def blockSize(self, imin, imax):
try:
return sum(self.cellSizes[imin:imax+1])
except IndexError:
return self.cellSizes*(imax - imin)
def blockContent(self, imin, imax):
return sum(self.cellContent[imin:imax+1])
def _generateCells(self, events):
self.tstart = (3*events[0] - events[1])/2.
bounds = ((events[1:] + events[:-1])/2.).tolist()
bounds.insert(0, self.tstart)
bounds.append((3*events[-1] - events[-2])/2.)
bounds = num.array(bounds)
return bounds[1:] - bounds[:-1]
if __name__ == '__main__':
# import hippoplotter as plot
# import distributions as dist
# nsamp = 200
# events = dist.sample(dist.stepFunction(0.5, 0.7, amp=0.7), nsamp)
#
# output = open('events.dat', 'w')
# for event in events:
# output.write("%12.4e\n" % event)
# output.close()
class Histogram(object):
def __init__(self, xmin, xmax, nx):
self.xmin = xmin
self.dx = (xmax - xmin)/float(nx)
self.binContent = num.zeros(nx)
self.binSizes = self.dx*num.ones(nx)
def add(self, xx, wt=1):
indx = int((xx - self.xmin)/self.dx)
self.binContent[indx] += wt
events = [float(x.strip()) for x in open('events.dat', 'r')]
hist = Histogram(0, 1, 50)
for event in events:
hist.add(event)
bb = BayesianBlocks(events)
xx, yy = bb.globalOpt(ncp_prior=1)
bb2 = BayesianBlocks(hist.binContent, hist.binSizes, 0)
xx2, yy2 = bb2.globalOpt(ncp_prior=1)
# plot.histogram(events)
# plot.scatter(xx, yy, oplot=1, pointRep='Line', color='red', autoscale=1)
# plot.scatter(xx2, yy2, oplot=1, pointRep='Line', color='blue')
| 35.171875 | 77 | 0.554568 | [
"BSD-3-Clause"
] | fermi-lat/BayesianBlocks | python/BayesianBlocks_python.py | 6,753 | Python |
#!/usr/bin/env python3
# PYTHON_ARGCOMPLETE_OK
"""Entry point for cwltool."""
import argparse
import copy
import functools
import io
import logging
import os
import signal
import subprocess # nosec
import sys
import time
import urllib
import warnings
from codecs import StreamWriter, getwriter
from collections.abc import MutableMapping, MutableSequence
from typing import (
IO,
Any,
Callable,
Dict,
List,
Mapping,
MutableMapping,
MutableSequence,
Optional,
Sized,
TextIO,
Tuple,
Union,
cast,
)
import argcomplete
import coloredlogs
import pkg_resources # part of setuptools
import ruamel.yaml
from ruamel.yaml.comments import CommentedMap, CommentedSeq
from ruamel.yaml.main import YAML
from schema_salad.exceptions import ValidationException
from schema_salad.ref_resolver import Loader, file_uri, uri_file_path
from schema_salad.sourceline import cmap, strip_dup_lineno
from schema_salad.utils import ContextType, FetcherCallableType, json_dumps, yaml_no_ts
from . import CWL_CONTENT_TYPES, workflow
from .argparser import arg_parser, generate_parser, get_default_args
from .context import LoadingContext, RuntimeContext, getdefault
from .cwlrdf import printdot, printrdf
from .errors import (
ArgumentException,
GraphTargetMissingException,
UnsupportedRequirement,
WorkflowException,
)
from .executors import JobExecutor, MultithreadedJobExecutor, SingleJobExecutor
from .load_tool import (
default_loader,
fetch_document,
jobloaderctx,
load_overrides,
make_tool,
resolve_and_validate_document,
resolve_overrides,
resolve_tool_uri,
)
from .loghandler import _logger, configure_logging, defaultStreamHandler
from .mpi import MpiConfig
from .mutation import MutationManager
from .pack import pack
from .process import (
CWL_IANA,
Process,
add_sizes,
mergedirs,
scandeps,
shortname,
use_custom_schema,
use_standard_schema,
)
from .procgenerator import ProcessGenerator
from .provenance import ResearchObject, WritableBagFile
from .resolver import ga4gh_tool_registries, tool_resolver
from .secrets import SecretStore
from .software_requirements import (
DependenciesConfiguration,
get_container_from_software_requirements,
)
from .stdfsaccess import StdFsAccess
from .subgraph import get_process, get_step, get_subgraph
from .update import ALLUPDATES, UPDATES
from .utils import (
DEFAULT_TMP_PREFIX,
CWLObjectType,
CWLOutputAtomType,
CWLOutputType,
HasReqsHints,
adjustDirObjs,
normalizeFilesDirs,
processes_to_kill,
trim_listing,
versionstring,
visit_class,
)
from .workflow import Workflow
def _terminate_processes() -> None:
"""Kill all spawned processes.
Processes to be killed must be appended to `utils.processes_to_kill`
as they are spawned.
An important caveat: since there's no supported way to kill another
thread in Python, this function cannot stop other threads from
continuing to execute while it kills the processes that they've
spawned. This may occasionally lead to unexpected behaviour.
"""
# It's possible that another thread will spawn a new task while
# we're executing, so it's not safe to use a for loop here.
while processes_to_kill:
process = processes_to_kill.popleft()
if isinstance(process.args, MutableSequence):
args = process.args
else:
args = [process.args]
cidfile = [str(arg).split("=")[1] for arg in args if "--cidfile" in str(arg)]
if cidfile: # Try to be nice
try:
with open(cidfile[0]) as inp_stream:
p = subprocess.Popen( # nosec
["docker", "kill", inp_stream.read()], shell=False # nosec
)
try:
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
except FileNotFoundError:
pass
if process.stdin:
process.stdin.close()
try:
process.wait(10)
except subprocess.TimeoutExpired:
pass
process.kill() # Always kill, even if we tried with the cidfile
def _signal_handler(signum: int, _: Any) -> None:
"""Kill all spawned processes and exit.
Note that it's possible for another thread to spawn a process after
all processes have been killed, but before Python exits.
Refer to the docstring for _terminate_processes() for other caveats.
"""
_terminate_processes()
sys.exit(signum)
def generate_example_input(
inptype: Optional[CWLOutputType],
default: Optional[CWLOutputType],
) -> Tuple[Any, str]:
"""Convert a single input schema into an example."""
example = None
comment = ""
defaults = {
"null": "null",
"Any": "null",
"boolean": False,
"int": 0,
"long": 0,
"float": 0.1,
"double": 0.1,
"string": "a_string",
"File": ruamel.yaml.comments.CommentedMap(
[("class", "File"), ("path", "a/file/path")]
),
"Directory": ruamel.yaml.comments.CommentedMap(
[("class", "Directory"), ("path", "a/directory/path")]
),
} # type: CWLObjectType
if isinstance(inptype, MutableSequence):
optional = False
if "null" in inptype:
inptype.remove("null")
optional = True
if len(inptype) == 1:
example, comment = generate_example_input(inptype[0], default)
if optional:
if comment:
comment = f"{comment} (optional)"
else:
comment = "optional"
else:
example = CommentedSeq()
for index, entry in enumerate(inptype):
value, e_comment = generate_example_input(entry, default)
example.append(value)
example.yaml_add_eol_comment(e_comment, index)
if optional:
comment = "optional"
elif isinstance(inptype, Mapping) and "type" in inptype:
if inptype["type"] == "array":
first_item = cast(MutableSequence[CWLObjectType], inptype["items"])[0]
items_len = len(cast(Sized, inptype["items"]))
if items_len == 1 and "type" in first_item and first_item["type"] == "enum":
# array of just an enum then list all the options
example = first_item["symbols"]
if "name" in first_item:
comment = 'array of type "{}".'.format(first_item["name"])
else:
value, comment = generate_example_input(inptype["items"], None)
comment = "array of " + comment
if items_len == 1:
example = [value]
else:
example = value
if default is not None:
example = default
elif inptype["type"] == "enum":
symbols = cast(List[str], inptype["symbols"])
if default is not None:
example = default
elif "default" in inptype:
example = inptype["default"]
elif len(cast(Sized, inptype["symbols"])) == 1:
example = symbols[0]
else:
example = "{}_enum_value".format(inptype.get("name", "valid"))
comment = 'enum; valid values: "{}"'.format('", "'.join(symbols))
elif inptype["type"] == "record":
example = ruamel.yaml.comments.CommentedMap()
if "name" in inptype:
comment = '"{}" record type.'.format(inptype["name"])
else:
comment = "Anonymous record type."
for field in cast(List[CWLObjectType], inptype["fields"]):
value, f_comment = generate_example_input(field["type"], None)
example.insert(0, shortname(cast(str, field["name"])), value, f_comment)
elif "default" in inptype:
example = inptype["default"]
comment = 'default value of type "{}".'.format(inptype["type"])
else:
example = defaults.get(cast(str, inptype["type"]), str(inptype))
comment = 'type "{}".'.format(inptype["type"])
else:
if not default:
example = defaults.get(str(inptype), str(inptype))
comment = f'type "{inptype}"'
else:
example = default
comment = f'default value of type "{inptype}".'
return example, comment
def realize_input_schema(
input_types: MutableSequence[Union[str, CWLObjectType]],
schema_defs: MutableMapping[str, CWLObjectType],
) -> MutableSequence[Union[str, CWLObjectType]]:
"""Replace references to named typed with the actual types."""
for index, entry in enumerate(input_types):
if isinstance(entry, str):
if "#" in entry:
_, input_type_name = entry.split("#")
else:
input_type_name = entry
if input_type_name in schema_defs:
entry = input_types[index] = schema_defs[input_type_name]
if isinstance(entry, MutableMapping):
if isinstance(entry["type"], str) and "#" in entry["type"]:
_, input_type_name = entry["type"].split("#")
if input_type_name in schema_defs:
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]],
schema_defs[input_type_name],
),
schema_defs,
),
)
if isinstance(entry["type"], MutableSequence):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], entry["type"]),
schema_defs,
),
)
if isinstance(entry["type"], Mapping):
entry["type"] = cast(
CWLOutputAtomType,
realize_input_schema(
[cast(CWLObjectType, entry["type"])], schema_defs
),
)
if entry["type"] == "array":
items = (
entry["items"]
if not isinstance(entry["items"], str)
else [entry["items"]]
)
entry["items"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(MutableSequence[Union[str, CWLObjectType]], items),
schema_defs,
),
)
if entry["type"] == "record":
entry["fields"] = cast(
CWLOutputAtomType,
realize_input_schema(
cast(
MutableSequence[Union[str, CWLObjectType]], entry["fields"]
),
schema_defs,
),
)
return input_types
def generate_input_template(tool: Process) -> CWLObjectType:
"""Generate an example input object for the given CWL process."""
template = ruamel.yaml.comments.CommentedMap()
for inp in cast(
List[MutableMapping[str, str]],
realize_input_schema(tool.tool["inputs"], tool.schemaDefs),
):
name = shortname(inp["id"])
value, comment = generate_example_input(inp["type"], inp.get("default", None))
template.insert(0, name, value, comment)
return template
def load_job_order(
args: argparse.Namespace,
stdin: IO[Any],
fetcher_constructor: Optional[FetcherCallableType],
overrides_list: List[CWLObjectType],
tool_file_uri: str,
) -> Tuple[Optional[CWLObjectType], str, Loader]:
job_order_object = None
job_order_file = None
_jobloaderctx = jobloaderctx.copy()
loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)
if len(args.job_order) == 1 and args.job_order[0][0] != "-":
job_order_file = args.job_order[0]
elif len(args.job_order) == 1 and args.job_order[0] == "-":
yaml = yaml_no_ts()
job_order_object = yaml.load(stdin)
job_order_object, _ = loader.resolve_all(
job_order_object, file_uri(os.getcwd()) + "/"
)
else:
job_order_file = None
if job_order_object is not None:
input_basedir = args.basedir if args.basedir else os.getcwd()
elif job_order_file is not None:
input_basedir = (
args.basedir
if args.basedir
else os.path.abspath(os.path.dirname(job_order_file))
)
job_order_object, _ = loader.resolve_ref(
job_order_file,
checklinks=False,
content_types=CWL_CONTENT_TYPES,
)
if (
job_order_object is not None
and "http://commonwl.org/cwltool#overrides" in job_order_object
):
ov_uri = file_uri(job_order_file or input_basedir)
overrides_list.extend(
resolve_overrides(job_order_object, ov_uri, tool_file_uri)
)
del job_order_object["http://commonwl.org/cwltool#overrides"]
if job_order_object is None:
input_basedir = args.basedir if args.basedir else os.getcwd()
if job_order_object is not None and not isinstance(
job_order_object, MutableMapping
):
_logger.error(
"CWL input object at %s is not formatted correctly, it should be a "
"JSON/YAML dictionay, not %s.\n"
"Raw input object:\n%s",
job_order_file or "stdin",
type(job_order_object),
job_order_object,
)
sys.exit(1)
return (job_order_object, input_basedir, loader)
def init_job_order(
job_order_object: Optional[CWLObjectType],
args: argparse.Namespace,
process: Process,
loader: Loader,
stdout: Union[TextIO, StreamWriter],
print_input_deps: bool = False,
relative_deps: str = "primary",
make_fs_access: Callable[[str], StdFsAccess] = StdFsAccess,
input_basedir: str = "",
secret_store: Optional[SecretStore] = None,
input_required: bool = True,
runtime_context: Optional[RuntimeContext] = None,
) -> CWLObjectType:
secrets_req, _ = process.get_requirement("http://commonwl.org/cwltool#Secrets")
if job_order_object is None:
namemap = {} # type: Dict[str, str]
records = [] # type: List[str]
toolparser = generate_parser(
argparse.ArgumentParser(prog=args.workflow),
process,
namemap,
records,
input_required,
loader.fetcher.urljoin,
file_uri(os.getcwd()) + "/",
)
if args.tool_help:
toolparser.print_help(cast(IO[str], stdout))
exit(0)
cmd_line = vars(toolparser.parse_args(args.job_order))
for record_name in records:
record = {}
record_items = {
k: v for k, v in cmd_line.items() if k.startswith(record_name)
}
for key, value in record_items.items():
record[key[len(record_name) + 1 :]] = value
del cmd_line[key]
cmd_line[str(record_name)] = record
if "job_order" in cmd_line and cmd_line["job_order"]:
try:
job_order_object = cast(
CWLObjectType,
loader.resolve_ref(cmd_line["job_order"])[0],
)
except Exception:
_logger.exception(
"Failed to resolv job_order: %s", cmd_line["job_order"]
)
exit(1)
else:
job_order_object = {"id": args.workflow}
del cmd_line["job_order"]
job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if _logger.isEnabledFor(logging.DEBUG):
_logger.debug(
"Parsed job order from command line: %s",
json_dumps(job_order_object, indent=4, default=str),
)
for inp in process.tool["inputs"]:
if "default" in inp and (
not job_order_object or shortname(inp["id"]) not in job_order_object
):
if not job_order_object:
job_order_object = {}
job_order_object[shortname(inp["id"])] = inp["default"]
def path_to_loc(p: CWLObjectType) -> None:
if "location" not in p and "path" in p:
p["location"] = p["path"]
del p["path"]
ns = {} # type: ContextType
ns.update(cast(ContextType, job_order_object.get("$namespaces", {})))
ns.update(cast(ContextType, process.metadata.get("$namespaces", {})))
ld = Loader(ns)
def expand_formats(p: CWLObjectType) -> None:
if "format" in p:
p["format"] = ld.expand_url(cast(str, p["format"]), "")
visit_class(job_order_object, ("File", "Directory"), path_to_loc)
visit_class(
job_order_object,
("File",),
functools.partial(add_sizes, make_fs_access(input_basedir)),
)
visit_class(job_order_object, ("File",), expand_formats)
adjustDirObjs(job_order_object, trim_listing)
normalizeFilesDirs(job_order_object)
if print_input_deps:
if not runtime_context:
raise RuntimeError("runtime_context is required for print_input_deps.")
runtime_context.toplevel = True
builder = process._init_job(job_order_object, runtime_context)
builder.loadListing = "no_listing"
builder.bind_input(
process.inputs_record_schema, job_order_object, discover_secondaryFiles=True
)
basedir: Optional[str] = None
uri = cast(str, job_order_object["id"])
if uri == args.workflow:
basedir = os.path.dirname(uri)
uri = ""
printdeps(
job_order_object,
loader,
stdout,
relative_deps,
uri,
basedir=basedir,
nestdirs=False,
)
exit(0)
if secret_store and secrets_req:
secret_store.store(
[shortname(sc) for sc in cast(List[str], secrets_req["secrets"])],
job_order_object,
)
if "cwl:tool" in job_order_object:
del job_order_object["cwl:tool"]
if "id" in job_order_object:
del job_order_object["id"]
return job_order_object
def make_relative(base: str, obj: CWLObjectType) -> None:
"""Relativize the location URI of a File or Directory object."""
uri = cast(str, obj.get("location", obj.get("path")))
if ":" in uri.split("/")[0] and not uri.startswith("file://"):
pass
else:
if uri.startswith("file://"):
uri = uri_file_path(uri)
obj["location"] = os.path.relpath(uri, base)
def printdeps(
obj: CWLObjectType,
document_loader: Loader,
stdout: Union[TextIO, StreamWriter],
relative_deps: str,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> None:
"""Print a JSON representation of the dependencies of the CWL document."""
deps = find_deps(obj, document_loader, uri, basedir=basedir, nestdirs=nestdirs)
if relative_deps == "primary":
base = basedir if basedir else os.path.dirname(uri_file_path(str(uri)))
elif relative_deps == "cwd":
base = os.getcwd()
visit_class(deps, ("File", "Directory"), functools.partial(make_relative, base))
print(json_dumps(deps, indent=4, default=str), file=stdout)
def prov_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
) -> CWLObjectType:
deps = find_deps(obj, document_loader, uri, basedir=basedir)
def remove_non_cwl(deps: CWLObjectType) -> None:
if "secondaryFiles" in deps:
sec_files = cast(List[CWLObjectType], deps["secondaryFiles"])
for index, entry in enumerate(sec_files):
if not ("format" in entry and entry["format"] == CWL_IANA):
del sec_files[index]
else:
remove_non_cwl(entry)
remove_non_cwl(deps)
return deps
def find_deps(
obj: CWLObjectType,
document_loader: Loader,
uri: str,
basedir: Optional[str] = None,
nestdirs: bool = True,
) -> CWLObjectType:
"""Find the dependencies of the CWL document."""
deps = {
"class": "File",
"location": uri,
"format": CWL_IANA,
} # type: CWLObjectType
def loadref(base: str, uri: str) -> Union[CommentedMap, CommentedSeq, str, None]:
return document_loader.fetch(document_loader.fetcher.urljoin(base, uri))
sfs = scandeps(
basedir if basedir else uri,
obj,
{"$import", "run"},
{"$include", "$schemas", "location"},
loadref,
nestdirs=nestdirs,
)
if sfs is not None:
deps["secondaryFiles"] = cast(
MutableSequence[CWLOutputAtomType], mergedirs(sfs)
)
return deps
def print_pack(
loadingContext: LoadingContext,
uri: str,
) -> str:
"""Return a CWL serialization of the CWL document in JSON."""
packed = pack(loadingContext, uri)
if len(cast(Sized, packed["$graph"])) > 1:
return json_dumps(packed, indent=4, default=str)
return json_dumps(
cast(MutableSequence[CWLObjectType], packed["$graph"])[0], indent=4, default=str
)
def supported_cwl_versions(enable_dev: bool) -> List[str]:
# ALLUPDATES and UPDATES are dicts
if enable_dev:
versions = list(ALLUPDATES)
else:
versions = list(UPDATES)
versions.sort()
return versions
def setup_schema(
args: argparse.Namespace, custom_schema_callback: Optional[Callable[[], None]]
) -> None:
if custom_schema_callback is not None:
custom_schema_callback()
elif args.enable_ext:
with pkg_resources.resource_stream(__name__, "extensions.yml") as res:
ext10 = res.read().decode("utf-8")
with pkg_resources.resource_stream(__name__, "extensions-v1.1.yml") as res:
ext11 = res.read().decode("utf-8")
use_custom_schema("v1.0", "http://commonwl.org/cwltool", ext10)
use_custom_schema("v1.1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev1", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev2", "http://commonwl.org/cwltool", ext11)
use_custom_schema("v1.2.0-dev3", "http://commonwl.org/cwltool", ext11)
else:
use_standard_schema("v1.0")
use_standard_schema("v1.1")
use_standard_schema("v1.2")
use_standard_schema("v1.2.0-dev1")
use_standard_schema("v1.2.0-dev2")
use_standard_schema("v1.2.0-dev3")
class ProvLogFormatter(logging.Formatter):
"""Enforce ISO8601 with both T and Z."""
def __init__(self) -> None:
"""Use the default formatter with our custom formatstring."""
super().__init__("[%(asctime)sZ] %(message)s")
def formatTime(
self, record: logging.LogRecord, datefmt: Optional[str] = None
) -> str:
formatted_time = time.strftime(
"%Y-%m-%dT%H:%M:%S", time.gmtime(float(record.created))
)
with_msecs = f"{formatted_time},{record.msecs:03f}"
return with_msecs
ProvOut = Union[io.TextIOWrapper, WritableBagFile]
def setup_provenance(
args: argparse.Namespace,
argsl: List[str],
runtimeContext: RuntimeContext,
) -> Tuple[ProvOut, "logging.StreamHandler[ProvOut]"]:
if not args.compute_checksum:
_logger.error("--provenance incompatible with --no-compute-checksum")
raise ArgumentException()
ro = ResearchObject(
getdefault(runtimeContext.make_fs_access, StdFsAccess)(""),
temp_prefix_ro=args.tmpdir_prefix,
orcid=args.orcid,
full_name=args.cwl_full_name,
)
runtimeContext.research_obj = ro
log_file_io = ro.open_log_file_for_activity(ro.engine_uuid)
prov_log_handler = logging.StreamHandler(log_file_io)
prov_log_handler.setFormatter(ProvLogFormatter())
_logger.addHandler(prov_log_handler)
_logger.debug("[provenance] Logging to %s", log_file_io)
if argsl is not None:
# Log cwltool command line options to provenance file
_logger.info("[cwltool] %s %s", sys.argv[0], " ".join(argsl))
_logger.debug("[cwltool] Arguments: %s", args)
return log_file_io, prov_log_handler
def setup_loadingContext(
loadingContext: Optional[LoadingContext],
runtimeContext: RuntimeContext,
args: argparse.Namespace,
) -> LoadingContext:
"""Prepare a LoadingContext from the given arguments."""
if loadingContext is None:
loadingContext = LoadingContext(vars(args))
loadingContext.singularity = runtimeContext.singularity
loadingContext.podman = runtimeContext.podman
else:
loadingContext = loadingContext.copy()
loadingContext.loader = default_loader(
loadingContext.fetcher_constructor,
enable_dev=args.enable_dev,
doc_cache=args.doc_cache,
)
loadingContext.research_obj = runtimeContext.research_obj
loadingContext.disable_js_validation = args.disable_js_validation or (
not args.do_validate
)
loadingContext.construct_tool_object = getdefault(
loadingContext.construct_tool_object, workflow.default_make_tool
)
loadingContext.resolver = getdefault(loadingContext.resolver, tool_resolver)
if loadingContext.do_update is None:
loadingContext.do_update = not (args.pack or args.print_subgraph)
return loadingContext
def make_template(
tool: Process,
) -> None:
"""Make a template CWL input object for the give Process."""
def my_represent_none(
self: Any, data: Any
) -> Any: # pylint: disable=unused-argument
"""Force clean representation of 'null'."""
return self.represent_scalar("tag:yaml.org,2002:null", "null")
ruamel.yaml.representer.RoundTripRepresenter.add_representer(
type(None), my_represent_none
)
yaml = YAML()
yaml.default_flow_style = False
yaml.indent = 4
yaml.block_seq_indent = 2
yaml.dump(
generate_input_template(tool),
sys.stdout,
)
def inherit_reqshints(tool: Process, parent: Process) -> None:
"""Copy down requirements and hints from ancestors of a given process."""
for parent_req in parent.requirements:
found = False
for tool_req in tool.requirements:
if parent_req["class"] == tool_req["class"]:
found = True
break
if not found:
tool.requirements.append(parent_req)
for parent_hint in parent.hints:
found = False
for tool_req in tool.requirements:
if parent_hint["class"] == tool_req["class"]:
found = True
break
if not found:
for tool_hint in tool.hints:
if parent_hint["class"] == tool_hint["class"]:
found = True
break
if not found:
tool.hints.append(parent_hint)
def choose_target(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the Workflow, extract the subset matches all the args.targets."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
extracted = get_subgraph(
[tool.tool["id"] + "/" + r for r in args.target], tool, loading_context
)
else:
extracted = get_subgraph(
[
loading_context.loader.fetcher.urljoin(tool.tool["id"], "#" + r)
for r in args.target
],
tool,
loading_context,
)
else:
_logger.error("Can only use --target on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = extracted
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_step(
args: argparse.Namespace,
tool: Process,
loading_context: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_step."""
if loading_context.loader is None:
raise Exception("loading_context.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_step
else:
step_id = loading_context.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_step
)
extracted = get_step(tool, step_id, loading_context)
else:
_logger.error("Can only use --single-step on Workflows")
return None
if isinstance(loading_context.loader.idx, MutableMapping):
loading_context.loader.idx[extracted["id"]] = cast(
Union[CommentedMap, CommentedSeq, str, None], cmap(extracted)
)
tool = make_tool(extracted["id"], loading_context)
else:
raise Exception("Missing loading_context.loader.idx!")
return tool
def choose_process(
args: argparse.Namespace,
tool: Process,
loadingContext: LoadingContext,
) -> Optional[Process]:
"""Walk the given Workflow and extract just args.single_process."""
if loadingContext.loader is None:
raise Exception("loadingContext.loader cannot be None")
if isinstance(tool, Workflow):
url = urllib.parse.urlparse(tool.tool["id"])
if url.fragment:
step_id = tool.tool["id"] + "/" + args.single_process
else:
step_id = loadingContext.loader.fetcher.urljoin(
tool.tool["id"], "#" + args.single_process
)
extracted, workflow_step = get_process(
tool,
step_id,
loadingContext,
)
else:
_logger.error("Can only use --single-process on Workflows")
return None
if isinstance(loadingContext.loader.idx, MutableMapping):
loadingContext.loader.idx[extracted["id"]] = extracted
new_tool = make_tool(extracted["id"], loadingContext)
else:
raise Exception("Missing loadingContext.loader.idx!")
inherit_reqshints(new_tool, workflow_step)
return new_tool
def check_working_directories(
runtimeContext: RuntimeContext,
) -> Optional[int]:
"""Make any needed working directories."""
for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
if (
getattr(runtimeContext, dirprefix)
and getattr(runtimeContext, dirprefix) != DEFAULT_TMP_PREFIX
):
sl = (
"/"
if getattr(runtimeContext, dirprefix).endswith("/")
or dirprefix == "cachedir"
else ""
)
setattr(
runtimeContext,
dirprefix,
os.path.abspath(getattr(runtimeContext, dirprefix)) + sl,
)
if not os.path.exists(os.path.dirname(getattr(runtimeContext, dirprefix))):
try:
os.makedirs(os.path.dirname(getattr(runtimeContext, dirprefix)))
except Exception:
_logger.exception("Failed to create directory.")
return 1
return None
def print_targets(
tool: Process,
stdout: Union[TextIO, StreamWriter],
loading_context: LoadingContext,
prefix: str = "",
) -> None:
"""Recursively find targets for --subgraph and friends."""
for f in ("outputs", "inputs"):
if tool.tool[f]:
_logger.info("%s %s%s targets:", prefix[:-1], f[0].upper(), f[1:-1])
print(
" "
+ "\n ".join([f"{prefix}{shortname(t['id'])}" for t in tool.tool[f]]),
file=stdout,
)
if "steps" in tool.tool:
loading_context = copy.copy(loading_context)
loading_context.requirements = tool.requirements
loading_context.hints = tool.hints
_logger.info("%s steps targets:", prefix[:-1])
for t in tool.tool["steps"]:
print(f" {prefix}{shortname(t['id'])}", file=stdout)
run: Union[str, Process, Dict[str, Any]] = t["run"]
if isinstance(run, str):
process = make_tool(run, loading_context)
elif isinstance(run, dict):
process = make_tool(cast(CommentedMap, cmap(run)), loading_context)
else:
process = run
print_targets(
process, stdout, loading_context, f"{prefix}{shortname(t['id'])}/"
)
def main(
argsl: Optional[List[str]] = None,
args: Optional[argparse.Namespace] = None,
job_order_object: Optional[CWLObjectType] = None,
stdin: IO[Any] = sys.stdin,
stdout: Optional[Union[TextIO, StreamWriter]] = None,
stderr: IO[Any] = sys.stderr,
versionfunc: Callable[[], str] = versionstring,
logger_handler: Optional[logging.Handler] = None,
custom_schema_callback: Optional[Callable[[], None]] = None,
executor: Optional[JobExecutor] = None,
loadingContext: Optional[LoadingContext] = None,
runtimeContext: Optional[RuntimeContext] = None,
input_required: bool = True,
) -> int:
if not stdout: # force UTF-8 even if the console is configured differently
if hasattr(sys.stdout, "encoding") and sys.stdout.encoding.upper() not in (
"UTF-8",
"UTF8",
):
if hasattr(sys.stdout, "detach"):
stdout = io.TextIOWrapper(sys.stdout.buffer, encoding="utf-8")
else:
stdout = getwriter("utf-8")(sys.stdout) # type: ignore
else:
stdout = sys.stdout
_logger.removeHandler(defaultStreamHandler)
stderr_handler = logger_handler
if stderr_handler is not None:
_logger.addHandler(stderr_handler)
else:
coloredlogs.install(logger=_logger, stream=stderr)
stderr_handler = _logger.handlers[-1]
workflowobj = None
prov_log_handler: Optional[logging.StreamHandler[ProvOut]] = None
try:
if args is None:
if argsl is None:
argsl = sys.argv[1:]
addl = [] # type: List[str]
if "CWLTOOL_OPTIONS" in os.environ:
addl = os.environ["CWLTOOL_OPTIONS"].split(" ")
parser = arg_parser()
argcomplete.autocomplete(parser)
args = parser.parse_args(addl + argsl)
if args.record_container_id:
if not args.cidfile_dir:
args.cidfile_dir = os.getcwd()
del args.record_container_id
if runtimeContext is None:
runtimeContext = RuntimeContext(vars(args))
else:
runtimeContext = runtimeContext.copy()
# If caller parsed its own arguments, it may not include every
# cwltool option, so fill in defaults to avoid crashing when
# dereferencing them in args.
for key, val in get_default_args().items():
if not hasattr(args, key):
setattr(args, key, val)
configure_logging(
stderr_handler,
args.quiet,
runtimeContext.debug,
args.enable_color,
args.timestamps,
)
if args.version:
print(versionfunc(), file=stdout)
return 0
_logger.info(versionfunc())
if args.print_supported_versions:
print("\n".join(supported_cwl_versions(args.enable_dev)), file=stdout)
return 0
if not args.workflow:
if os.path.isfile("CWLFile"):
args.workflow = "CWLFile"
else:
_logger.error("CWL document required, no input file was provided")
parser.print_help(stderr)
return 1
if args.ga4gh_tool_registries:
ga4gh_tool_registries[:] = args.ga4gh_tool_registries
if not args.enable_ga4gh_tool_registry:
del ga4gh_tool_registries[:]
if args.mpi_config_file is not None:
runtimeContext.mpi_config = MpiConfig.load(args.mpi_config_file)
setup_schema(args, custom_schema_callback)
prov_log_stream: Optional[Union[io.TextIOWrapper, WritableBagFile]] = None
if args.provenance:
if argsl is None:
raise Exception("argsl cannot be None")
try:
prov_log_stream, prov_log_handler = setup_provenance(
args, argsl, runtimeContext
)
except ArgumentException:
return 1
loadingContext = setup_loadingContext(loadingContext, runtimeContext, args)
uri, tool_file_uri = resolve_tool_uri(
args.workflow,
resolver=loadingContext.resolver,
fetcher_constructor=loadingContext.fetcher_constructor,
)
try_again_msg = (
"" if args.debug else ", try again with --debug for more information"
)
try:
job_order_object, input_basedir, jobloader = load_job_order(
args,
stdin,
loadingContext.fetcher_constructor,
loadingContext.overrides_list,
tool_file_uri,
)
if args.overrides:
loadingContext.overrides_list.extend(
load_overrides(
file_uri(os.path.abspath(args.overrides)), tool_file_uri
)
)
loadingContext, workflowobj, uri = fetch_document(uri, loadingContext)
if args.print_deps and loadingContext.loader:
printdeps(
workflowobj, loadingContext.loader, stdout, args.relative_deps, uri
)
return 0
loadingContext, uri = resolve_and_validate_document(
loadingContext,
workflowobj,
uri,
preprocess_only=(args.print_pre or args.pack),
skip_schemas=args.skip_schemas,
)
if loadingContext.loader is None:
raise Exception("Impossible code path.")
processobj, metadata = loadingContext.loader.resolve_ref(uri)
processobj = cast(Union[CommentedMap, CommentedSeq], processobj)
if args.pack:
print(print_pack(loadingContext, uri), file=stdout)
return 0
if args.provenance and runtimeContext.research_obj:
# Can't really be combined with args.pack at same time
runtimeContext.research_obj.packed_workflow(
print_pack(loadingContext, uri)
)
if args.print_pre:
print(
json_dumps(
processobj,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
try:
tool = make_tool(uri, loadingContext)
except GraphTargetMissingException as main_missing_exc:
if args.validate:
logging.warn(
"File contains $graph of multiple objects and no default "
"process (#main). Validating all objects:"
)
for entry in workflowobj["$graph"]:
entry_id = entry["id"]
make_tool(entry_id, loadingContext)
print(f"{entry_id} is valid CWL.", file=stdout)
else:
raise main_missing_exc
if args.make_template:
make_template(tool)
return 0
if args.validate:
print(f"{args.workflow} is valid CWL.", file=stdout)
return 0
if args.print_rdf:
print(
printrdf(tool, loadingContext.loader.ctx, args.rdf_serializer),
file=stdout,
)
return 0
if args.print_dot:
printdot(tool, loadingContext.loader.ctx, stdout)
return 0
if args.print_targets:
print_targets(tool, stdout, loadingContext)
return 0
if args.target:
ctool = choose_target(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_step:
ctool = choose_step(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
elif args.single_process:
ctool = choose_process(args, tool, loadingContext)
if ctool is None:
return 1
else:
tool = ctool
if args.print_subgraph:
if "name" in tool.tool:
del tool.tool["name"]
print(
json_dumps(
tool.tool,
indent=4,
sort_keys=True,
separators=(",", ": "),
default=str,
),
file=stdout,
)
return 0
except (ValidationException) as exc:
_logger.error(
"Tool definition failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except (RuntimeError, WorkflowException) as exc:
_logger.error(
"Tool definition failed initialization:\n%s",
str(exc),
exc_info=args.debug,
)
return 1
except Exception as exc:
_logger.error(
"I'm sorry, I couldn't load this CWL file%s.\nThe error was: %s",
try_again_msg,
str(exc) if not args.debug else "",
exc_info=args.debug,
)
return 1
if isinstance(tool, int):
return tool
# If on MacOS platform, TMPDIR must be set to be under one of the
# shared volumes in Docker for Mac
# More info: https://dockstore.org/docs/faq
if sys.platform == "darwin":
default_mac_path = "/private/tmp/docker_tmp"
if runtimeContext.tmp_outdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmp_outdir_prefix = default_mac_path
if runtimeContext.tmpdir_prefix == DEFAULT_TMP_PREFIX:
runtimeContext.tmpdir_prefix = default_mac_path
if check_working_directories(runtimeContext) is not None:
return 1
if args.cachedir:
if args.move_outputs == "move":
runtimeContext.move_outputs = "copy"
runtimeContext.tmp_outdir_prefix = args.cachedir
runtimeContext.log_dir = args.log_dir
runtimeContext.secret_store = getdefault(
runtimeContext.secret_store, SecretStore()
)
runtimeContext.make_fs_access = getdefault(
runtimeContext.make_fs_access, StdFsAccess
)
if not executor:
if args.parallel:
temp_executor = MultithreadedJobExecutor()
runtimeContext.select_resources = temp_executor.select_resources
real_executor = temp_executor # type: JobExecutor
else:
real_executor = SingleJobExecutor()
else:
real_executor = executor
try:
runtimeContext.basedir = input_basedir
if isinstance(tool, ProcessGenerator):
tfjob_order = {} # type: CWLObjectType
if loadingContext.jobdefaults:
tfjob_order.update(loadingContext.jobdefaults)
if job_order_object:
tfjob_order.update(job_order_object)
tfout, tfstatus = real_executor(
tool.embedded_tool, tfjob_order, runtimeContext
)
if not tfout or tfstatus != "success":
raise WorkflowException(
"ProcessGenerator failed to generate workflow"
)
tool, job_order_object = tool.result(tfjob_order, tfout, runtimeContext)
if not job_order_object:
job_order_object = None
try:
initialized_job_order_object = init_job_order(
job_order_object,
args,
tool,
jobloader,
stdout,
print_input_deps=args.print_input_deps,
relative_deps=args.relative_deps,
make_fs_access=runtimeContext.make_fs_access,
input_basedir=input_basedir,
secret_store=runtimeContext.secret_store,
input_required=input_required,
runtime_context=runtimeContext,
)
except SystemExit as err:
return err.code
del args.workflow
del args.job_order
conf_file = getattr(
args, "beta_dependency_resolvers_configuration", None
) # str
use_conda_dependencies = getattr(
args, "beta_conda_dependencies", None
) # str
if conf_file or use_conda_dependencies:
runtimeContext.job_script_provider = DependenciesConfiguration(args)
else:
runtimeContext.find_default_container = functools.partial(
find_default_container,
default_container=runtimeContext.default_container,
use_biocontainers=args.beta_use_biocontainers,
)
(out, status) = real_executor(
tool, initialized_job_order_object, runtimeContext, logger=_logger
)
if out is not None:
if runtimeContext.research_obj is not None:
runtimeContext.research_obj.create_job(out, True)
def remove_at_id(doc: CWLObjectType) -> None:
for key in list(doc.keys()):
if key == "@id":
del doc[key]
else:
value = doc[key]
if isinstance(value, MutableMapping):
remove_at_id(value)
elif isinstance(value, MutableSequence):
for entry in value:
if isinstance(entry, MutableMapping):
remove_at_id(entry)
remove_at_id(out)
visit_class(
out,
("File",),
functools.partial(add_sizes, runtimeContext.make_fs_access("")),
)
def loc_to_path(obj: CWLObjectType) -> None:
for field in ("path", "nameext", "nameroot", "dirname"):
if field in obj:
del obj[field]
if cast(str, obj["location"]).startswith("file://"):
obj["path"] = uri_file_path(cast(str, obj["location"]))
visit_class(out, ("File", "Directory"), loc_to_path)
# Unsetting the Generation from final output object
visit_class(out, ("File",), MutationManager().unset_generation)
print(
json_dumps(out, indent=4, ensure_ascii=False, default=str),
file=stdout,
)
if hasattr(stdout, "flush"):
stdout.flush()
if status != "success":
_logger.warning("Final process status is %s", status)
return 1
_logger.info("Final process status is %s", status)
return 0
except (ValidationException) as exc:
_logger.error(
"Input object failed validation:\n%s", str(exc), exc_info=args.debug
)
return 1
except UnsupportedRequirement as exc:
_logger.error(
"Workflow or tool uses unsupported feature:\n%s",
str(exc),
exc_info=args.debug,
)
return 33
except WorkflowException as exc:
_logger.error(
"Workflow error%s:\n%s",
try_again_msg,
strip_dup_lineno(str(exc)),
exc_info=args.debug,
)
return 1
except Exception as exc: # pylint: disable=broad-except
_logger.error(
"Unhandled error%s:\n %s",
try_again_msg,
str(exc),
exc_info=args.debug,
)
return 1
finally:
if (
args
and runtimeContext
and runtimeContext.research_obj
and workflowobj
and loadingContext
):
research_obj = runtimeContext.research_obj
if loadingContext.loader is not None:
research_obj.generate_snapshot(
prov_deps(workflowobj, loadingContext.loader, uri)
)
else:
_logger.warning(
"Unable to generate provenance snapshot "
" due to missing loadingContext.loader."
)
if prov_log_handler is not None:
# Stop logging so we won't half-log adding ourself to RO
_logger.debug(
"[provenance] Closing provenance log file %s", prov_log_handler
)
_logger.removeHandler(prov_log_handler)
# Ensure last log lines are written out
prov_log_handler.flush()
# Underlying WritableBagFile will add the tagfile to the manifest
if prov_log_stream:
prov_log_stream.close()
# Why not use prov_log_handler.stream ? That is not part of the
# public API for logging.StreamHandler
prov_log_handler.close()
research_obj.close(args.provenance)
_logger.removeHandler(stderr_handler)
_logger.addHandler(defaultStreamHandler)
def find_default_container(
builder: HasReqsHints,
default_container: Optional[str] = None,
use_biocontainers: Optional[bool] = None,
) -> Optional[str]:
"""Find a container."""
if not default_container and use_biocontainers:
default_container = get_container_from_software_requirements(
use_biocontainers, builder
)
return default_container
def windows_check() -> None:
"""See if we are running on MS Windows and warn about the lack of support."""
if os.name == "nt":
warnings.warn(
"The CWL reference runner (cwltool) no longer supports running "
"CWL workflows natively on MS Windows as its previous MS Windows "
"support was incomplete and untested. Instead, please see "
"https://pypi.org/project/cwltool/#ms-windows-users "
"for instructions on running cwltool via "
"Windows Subsystem for Linux 2 (WSL2). If don't need to execute "
"CWL documents, then you can ignore this warning, but please "
"consider migrating to https://pypi.org/project/cwl-utils/ "
"for your CWL document processing needs."
)
def run(*args: Any, **kwargs: Any) -> None:
"""Run cwltool."""
windows_check()
signal.signal(signal.SIGTERM, _signal_handler)
try:
sys.exit(main(*args, **kwargs))
finally:
_terminate_processes()
if __name__ == "__main__":
run(sys.argv[1:])
| 35.69103 | 88 | 0.574365 | [
"Apache-2.0"
] | suecharo/cwltool | cwltool/main.py | 53,715 | Python |
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from UW_Madison.items import UwMadisonItem
class Madison_courses( CrawlSpider ):
name = 'uw_madison5'
allowed_domains = ['wisc.edu']
start_urls = [
"http://guide.wisc.edu/courses/",
]
rules = (
Rule( LinkExtractor( allow = ( r'ttp://guide.wisc.edu/courses/' )),
callback = 'parse_httpbin',
follow = True
),
)
'''
def start_requests( self ):
for u in self.start_urls:
yield scrapy.Request( u, callback = self.parse_httpbin,
errback = self.errback_httpbin,
dont_filter = True )
'''
def parse_httpbin( self, response ):
#self.logger.info("Got successful response {}".format(response.url) )
items = UwMadisonItem()
course = response.css('span.courseblockcode::text').extract()
#course = response.css('span.courseblockcode::text').extract_first()
title = response.css('div.sc_sccoursedescs > div.courseblock > p.courseblocktitle > strong::text').extract()
#title = response.css('div.sc_sccoursedescs > div.courseblock > p.courseblocktitle > strong::text').extract_first()
unit = response.css('.courseblockcredits::text').extract()
#unit = response.css('.courseblockcredits::text').extract_first()
description = response.css('.courseblockdesc::text').extract()
#description = response.css('.courseblockdesc::text').extract_first()
prerequisites = response.css('p.courseblockextra.noindent.clearfix > span.cbextra-data > .bubblelink::text').extract()
#prerequisites = response.css('p.courseblockextra.noindent.clearfix > span.cbextra-data > .bubblelink::text').extract_first()
items['course'] = course
items['title'] = title
items['unit'] = unit
items['description'] = description
items['prerequisites'] = prerequisites
yield items
'''
def errback_httpbin( self, failure):
# log all failures
self.logger.error(repr(failure))
# in case you want to do something special for some errors,
# you may need the failure's type:
if failure.check(HttpError):
# These exception come from HttpError spider middleware
# you can get the non-200 response
response = failure.value.response
self.logger.error("HttpError on %s", response.url )
elif failure.check(DNSLookupError):
# This is the original request
request = failure.request
self.logger.error('DNSLookupError on %s', request.url )
elif failure.check(TimeoutError, TCPTimeOutError ):
request = failure.request
self.logger.error('TimeoutError on %s', request.url)
'''
| 29.2 | 133 | 0.638177 | [
"MIT"
] | Nouldine/CrawlerSystems | UW_Madison/UW_Madison/spiders/uw_madison_courses3.py | 3,358 | Python |
import unittest
import pandas as pd
import numpy as np
from resources.backend_scripts.is_data import DataEnsurer
from resources.backend_scripts.load_data import LoaderCreator
from resources.backend_scripts.split_data import SplitterReturner
class MyTestCase(unittest.TestCase):
_loader_creator = LoaderCreator()
def test_single_split_columns_match(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
expected_y_len, expected_x_len = df.shape # true prediction and data len with shape method
# shape returns original column value. x doesn't have prediction column, so it must be original value - 1
expected_x_len -= 1
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
x, y = splitter.split_x_y_from_df(df)
# do the values match in both x and y dataframes
self.assertEqual(len(x.columns), expected_x_len)
self.assertEqual(len(y), expected_y_len)
def test_single_split_returns_a_tuple(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
result = DataEnsurer.validate_py_data(data, tuple)
self.assertTrue(result)
def test_single_split_x_and_y_is_a_dataframe_and_numpy_array(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y
data = splitter.split_x_y_from_df(df)
results = [isinstance(data[0], pd.DataFrame), isinstance(data[-1], np.ndarray)]
# are all outputs True?
for r in results:
self.assertTrue(r)
def test_train_test_split_size_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# use of splitterReturner with a NormalSplitter implementation
with self.assertRaises(ValueError):
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, 0.0) # 80 percent of data should be training and the other 20 is
def test_train_test_split_size_less_than_zero_is_wrong(self):
# load diabetes.csv from disk
folder_name = "datasets"
file_name = "diabetes.csv"
test_full_path = ".\\..\\" + folder_name + "\\" + file_name
csv_type = self._loader_creator.create_loader(test_full_path, "CSV")
df = csv_type.get_file_transformed()
# this should raise a ValueError because size = -0.5 is not a valid number
with self.assertRaises(ValueError):
# use of splitterReturner with a NormalSplitter implementation
splitter = SplitterReturner()
# split dataframe into x and y, then use train_and_test_split
x, y = splitter.split_x_y_from_df(df)
_ = splitter.train_and_test_split(x, y, -0.5) # -0.5 is not a valid value
def test_split_into_x_and_y_is_not_a_valid_dataframe(self):
# dummy dictionary
temp_dict = {'x': [i for i in range(200)]}
# transform dictionary to dataframe
df = pd.DataFrame.from_dict(temp_dict)
# this should raise a TypeError because dataframe doesnt meet column requirements
with self.assertRaises(TypeError):
splitter = SplitterReturner()
_, _ = splitter.split_x_y_from_df(df)
if __name__ == '__main__':
unittest.main()
| 45.368932 | 117 | 0.673015 | [
"BSD-3-Clause"
] | Noczio/VoorSpelling | AppVoor/tests/split_data_test.py | 4,673 | Python |
import sys
import os
import json
import re
import numpy as np
import pandas as pd
from Bio import motifs
from Bio import SeqIO
from Bio.Alphabet import IUPAC
from io import StringIO
def build_mfmd_command(inputFilePath, motiflen, prb):
if not os.path.exists('/kb/module/work/tmp/mfmd'):
os.mkdir('/kb/module/work/tmp/mfmd')
outputFilePath = '/kb/module/work/tmp/mfmd/mfmd_out/mfmd_output.txt'
command = 'java -jar mfmd.jar ' + inputFilePath + ' ' + parameter + ' ' + prb + ' > ' + outputFilePath
return command
def run_mfmd_command(command):
os.system(command)
def parse_mfmd_output(path):
pfmList = []
pfmDict={}
outputFileList = []
pfmMatrix=False
seqflag=False
motifList={}
motifDict={}
locList=[]
alphabet=['A','C','G','T']
motifSet=[]
motifList['Condition']='temp'
motifList['SequenceSet_ref']='123'
background={}
background['A']=0.0
background['C']=0.0
background['G']=0.0
background['T']=0.0
motifDict['Motif_Locations'] = []
motifDict['PWM'] = []
motifDict['PFM'] = []
motiflen=0
a=[]
c=[]
g=[]
t=[]
pwmList=[]
pwmDict={}
rowList = []
rowDict={}
for filename in os.listdir(path):
outputFileList.append(path + '/' + filename)
if(filename=="mfmd_out.txt"):
outputFilePath=path+'/'+filename
mfmdFile = open(outputFilePath,'r')
for line in mfmdFile:
if(re.search("PPM Matrix",line)):
pfmMatrix=True
if(pfmMatrix):
if(line[0].isdigit()):
line=line.strip()
out=line.split()
pfmList.append(out)
a.append(out[0])
c.append(out[1])
g.append(out[2])
t.append(out[3])
rowList = []
rowList.append(('A',float(out[0])))
rowList.append(('C',float(out[1])))
rowList.append(('G',float(out[2])))
rowList.append(('T',float(out[3])))
rowDict['A']=float(out[0])
rowDict['C']=float(out[1])
rowDict['G']=float(out[2])
rowDict['T']=float(out[3])
if(re.search("PSSM Matrix",line)):
pfmMatrix=False
if(re.search("Sequences",line)):
seqflag=True
if(seqflag==True):
line=line.strip()
if(re.search('\*',line)):
seqflag=False
if((line) and not (line.startswith("Seq")) and not (line.startswith("*"))):
line=line.rstrip()
seq=line.split()
seqid=seq[0]
seq_start=int(seq[1])
seq_end=int(seq_start)+int(motiflen)
sequence=seq[2]
orientation='+'
locDict={}
locDict['sequence_id']=seqid;
locDict['start']=seq_start;
locDict['end']=seq_end;
locDict['sequence']=sequence;
locDict['orientation']=orientation;
motifDict['Motif_Locations'].append(locDict)
if(re.search("Width",line)):
arr=line.split(" ")
motiflen=arr[1].split("\t")[0]
a=[float(x) for x in a]
c=[float(x) for x in c]
g=[float(x) for x in g]
t=[float(x) for x in t]
pwmDict['A']=a
pwmDict['C']=c
pwmDict['G']=g
pwmDict['T']=t
pfmDict['A']=[]
pfmDict['C']=[]
pfmDict['G']=[]
pfmDict['T']=[]
motifStr = '>test\n'
motifStr += 'A ' + str(a).replace(',','') + '\n'
motifStr += 'C ' + str(c).replace(',','') + '\n'
motifStr += 'G ' + str(g).replace(',','') + '\n'
motifStr += 'T ' + str(t).replace(',','') + '\n'
handle = StringIO(motifStr)
BioMotif = motifs.read(handle, 'jaspar')
motifDict['PWM']=pwmDict
motifDict['PFM']=pfmDict
motifDict['Iupac_sequence']=str(BioMotif.degenerate_consensus)
motifSet.append(motifDict) #keep in loop for multiple motifs
motifList['Motifs']=motifSet
motifList['Background']=background
motifList['Alphabet']=alphabet
return motifList
output=parse_mfmd_output("/home/manish/Desktop/Data/motifs/man4ish_guptamfmd/test_local/workdir/tmp/mfmd_out")
jsondata = json.dumps(output)
with open('ReportMotif.json', 'w') as outfile:
json.dump(output, outfile)
print(jsondata)
#print(output)
| 35.93038 | 199 | 0.42223 | [
"MIT"
] | kbasecollaborations/MotifFinderalgoMFMD | lib/MotifFindermfmd/Utils/obsolete/parser.py | 5,677 | Python |
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets of the Open Images Challange 2019.
https://storage.googleapis.com/openimages/web/challenge2019.html
"""
import abc
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_DESCRIPTION = """\
Open Images is a collaborative release of ~9 million images annotated with
image-level labels, object bounding boxes, object segmentation masks, and
visual relationships. This uniquely large and diverse dataset is designed to
spur state of the art advances in analyzing and understanding images.
"""
_DESCRIPTION_DETECTION = """\
This contains the data from thee Object Detection track of the competition.
The goal in this track is to predict a tight bounding box around all object
instances of 500 classes.
The images are annotated with positive image-level labels, indicating certain
object classes are present, and with negative image-level labels, indicating
certain classes are absent. In the competition, all other unannotated classes
are excluded from evaluation in that image. For each positive image-level label
in an image, every instance of that object class in the image was annotated.
"""
_URL = "https://storage.googleapis.com/openimages/web/challenge2019.html"
_GOOGLE_URL_PREFIX = (
"https://storage.googleapis.com/openimages/challenge_2019/challenge-2019-")
_FIGURE_EIGHT_BASE_URL = (
"https://datasets.figure-eight.com/figure_eight_datasets/open-images/")
_TRAIN_IMAGES_URLS = [
"{}zip_files_copy/train_{:02d}.zip".format(_FIGURE_EIGHT_BASE_URL, n)
for n in range(9)
]
_VALIDATION_IMAGES_URL = (
_FIGURE_EIGHT_BASE_URL + "zip_files_copy/validation.zip")
_TEST_IMAGES_URL = _FIGURE_EIGHT_BASE_URL + "test_challenge.zip"
_NUM_CLASSES = 500
class OpenImagesChallenge2019Config(tfds.core.BuilderConfig):
"""BuilderConfig for OpenImages Challenge 2019 datasets."""
def __init__(self, target_pixels=None, **kwargs):
kwargs.setdefault("version", tfds.core.Version("1.0.0"))
super(OpenImagesChallenge2019Config, self).__init__(**kwargs)
self._target_pixels = target_pixels
@property
def target_pixels(self):
return self._target_pixels
class _OpenImagesChallenge2019(tfds.core.BeamBasedBuilder):
"""Base abstract class for Open Images Challenge 2019 datasets."""
BUILDER_CONFIGS = [
OpenImagesChallenge2019Config(
name="200k",
description="Images have at most 200,000 pixels, at 72 JPEG quality.",
target_pixels=200000),
OpenImagesChallenge2019Config(
name="300k",
description="Images have at most 300,000 pixels, at 72 JPEG quality.",
target_pixels=300000),
]
@property
@abc.abstractmethod
def annotation_urls(self):
"""Dictionary passed to the DownloadManager to download annotations.
An example:
{"test_annotations": "https://somewebpage.com/data/openimages/test.txt"}
Returns:
A dictionary whose values are the URLs to download the annotations of the
dataset, and the keys are some short string identifying the URL.
This dictionary is passed to the DownloadManager.
"""
def _split_generators(self, dl_manager):
urls = {
"train_images": _TRAIN_IMAGES_URLS,
"test_images": [_TEST_IMAGES_URL],
"validation_images": [_VALIDATION_IMAGES_URL]
}
urls.update(self.annotation_urls)
paths = dl_manager.download(urls)
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs=dict(paths=paths, split="train"),
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs=dict(paths=paths, split="test"),
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs=dict(paths=paths, split="validation"),
),
]
class OpenImagesChallenge2019Detection(_OpenImagesChallenge2019):
"""Dataset for the Detection Track."""
@property
def annotation_urls(self):
return {
"train_image_label":
_GOOGLE_URL_PREFIX + "train-detection-human-imagelabels.csv",
"train_boxes":
_GOOGLE_URL_PREFIX + "train-detection-bbox.csv",
"validation_image_label":
_GOOGLE_URL_PREFIX + "validation-detection-human-imagelabels.csv",
"validation_boxes":
_GOOGLE_URL_PREFIX + "validation-detection-bbox.csv",
"classes":
_GOOGLE_URL_PREFIX + "classes-description-500.csv",
"hierarchy":
_GOOGLE_URL_PREFIX + "label500-hierarchy.json",
}
def _info(self):
label = tfds.features.ClassLabel(num_classes=_NUM_CLASSES)
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION + "\n\n" + _DESCRIPTION_DETECTION,
features=tfds.features.FeaturesDict({
"id":
tfds.features.Text(),
"image":
tfds.features.Image(),
# A sequence of image-level labels.
"objects":
tfds.features.Sequence({
"label": label,
# All labels have been verified by humans.
# - If confidence is 1.0, the object IS in the image.
# - If confidence is 0.0, the object is NOT in the image.
"confidence": tf.float32,
"source": tfds.features.Text(),
}),
# A sequence of bounding boxes.
"bobjects":
tfds.features.Sequence({
"label": label,
"bbox": tfds.features.BBoxFeature(),
"is_group_of": tf.bool,
}),
}),
homepage=_URL,
)
def _build_pcollection(self, pipeline, paths, split):
beam = tfds.core.lazy_imports.apache_beam
# We need to lazily import the oi_beam module (and thus, violate the
# "imports only at the top" rule), so that beam is only required during the
# generation of the dataset, and not to use the dataset itself (once built).
# See: https://www.tensorflow.org/datasets/beam_datasets.
import tensorflow_datasets.object_detection.open_images_challenge2019_beam as oi_beam # pylint: disable=g-import-not-at-top,import-outside-toplevel
if split == "test":
# Note: annotations are not available for the test split.
generate_examples_kwargs = dict(
image_labels_filepath=None,
box_labels_filepath=None,
hierarchy_filepath=None,
classes_filepath=None,
)
else:
generate_examples_kwargs = dict(
image_labels_filepath=paths["{}_image_label".format(split)],
box_labels_filepath=paths["{}_boxes".format(split)],
hierarchy_filepath=paths["hierarchy"],
classes_filepath=paths["classes"],
)
# Fill class names after the data has been downloaded.
oi_beam.fill_class_names_in_tfds_info(paths["classes"], self.info.features)
return (pipeline | beam.Create(paths["{}_images".format(split)])
| "ReadImages" >> beam.ParDo(oi_beam.ReadZipFn())
| "ProcessImages" >> beam.ParDo(
oi_beam.ProcessImageFn(
target_pixels=self.builder_config.target_pixels,
jpeg_quality=72))
| "GenerateExamples" >> beam.ParDo(
oi_beam.CreateDetectionExampleFn(**generate_examples_kwargs)))
| 38.533654 | 152 | 0.675234 | [
"Apache-2.0"
] | 8bitmp3/datasets | tensorflow_datasets/object_detection/open_images_challenge2019.py | 8,015 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.28 on 2021-10-02 20:31
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('user', '0017_user_email_subscribed'),
]
operations = [
migrations.CreateModel(
name='LoginRequest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(max_length=30)),
('latestRequest', models.DateTimeField()),
('login_tries', models.IntegerField(default=1)),
],
),
]
| 28.28 | 114 | 0.591231 | [
"MIT"
] | EncryptEx/myhackupc | user/migrations/0018_loginrequest.py | 707 | Python |
import logging
from typing import Text, List, Optional, Dict, Any
from rasa.nlu.config import RasaNLUModelConfig
from rasa.nlu.training_data import TrainingData, Message
from rasa.nlu.components import Component
from rasa.nlu.constants import (
RESPONSE_ATTRIBUTE,
TEXT_ATTRIBUTE,
CLS_TOKEN,
TOKENS_NAMES,
MESSAGE_ATTRIBUTES,
INTENT_ATTRIBUTE,
)
logger = logging.getLogger(__name__)
class Token(object):
def __init__(
self,
text: Text,
start: int,
data: Optional[Dict[Text, Any]] = None,
lemma: Optional[Text] = None,
end: Optional[int] = None,
) -> None:
self.start = start
self.text = text
self.end = start + len(text)
self.data = data if data else {}
self.lemma = lemma or text
self.end = end if end else start + len(text)
def set(self, prop: Text, info: Any) -> None:
self.data[prop] = info
def get(self, prop: Text, default: Optional[Any] = None) -> Any:
return self.data.get(prop, default)
def __eq__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) == (
other.start,
other.end,
other.text,
other.lemma,
)
def __lt__(self, other):
if not isinstance(other, Token):
return NotImplemented
return (self.start, self.end, self.text, self.lemma) < (
other.start,
other.end,
other.text,
other.lemma,
)
class Tokenizer(Component):
def __init__(self, component_config: Dict[Text, Any] = None) -> None:
"""Construct a new tokenizer using the WhitespaceTokenizer framework."""
super().__init__(component_config)
# flag to check whether to split intents
self.intent_tokenization_flag = self.component_config.get(
"intent_tokenization_flag", False
)
# split symbol for intents
self.intent_split_symbol = self.component_config.get("intent_split_symbol", "_")
def tokenize(self, message: Message, attribute: Text) -> List[Token]:
"""Tokenizes the text of the provided attribute of the incoming message."""
raise NotImplementedError
def train(
self,
training_data: TrainingData,
config: Optional[RasaNLUModelConfig] = None,
**kwargs: Any,
) -> None:
"""Tokenize all training data."""
for example in training_data.training_examples:
for attribute in MESSAGE_ATTRIBUTES:
if example.get(attribute) is not None:
if attribute == INTENT_ATTRIBUTE:
tokens = self._split_intent(example)
else:
tokens = self.tokenize(example, attribute)
tokens = self.add_cls_token(tokens, attribute)
example.set(TOKENS_NAMES[attribute], tokens)
def process(self, message: Message, **kwargs: Any) -> None:
"""Tokenize the incoming message."""
tokens = self.tokenize(message, TEXT_ATTRIBUTE)
tokens = self.add_cls_token(tokens, TEXT_ATTRIBUTE)
message.set(TOKENS_NAMES[TEXT_ATTRIBUTE], tokens)
def _split_intent(self, message: Message):
text = message.get(INTENT_ATTRIBUTE)
words = (
text.split(self.intent_split_symbol)
if self.intent_tokenization_flag
else [text]
)
return self._convert_words_to_tokens(words, text)
@staticmethod
def _convert_words_to_tokens(words: List[Text], text: Text) -> List[Token]:
running_offset = 0
tokens = []
for word in words:
word_offset = text.index(word, running_offset)
word_len = len(word)
running_offset = word_offset + word_len
tokens.append(Token(word, word_offset))
return tokens
@staticmethod
def add_cls_token(tokens: List[Token], attribute: Text) -> List[Token]:
if attribute in [RESPONSE_ATTRIBUTE, TEXT_ATTRIBUTE] and tokens:
# +1 to have a space between the last token and the __cls__ token
idx = tokens[-1].end + 1
tokens.append(Token(CLS_TOKEN, idx))
return tokens
| 31.644928 | 88 | 0.608198 | [
"Apache-2.0"
] | Ali-vohra/final_project | rasa/nlu/tokenizers/tokenizer.py | 4,367 | Python |
from importlib import import_module
from rest_framework import status
from rest_framework.response import Response
from rest_framework.viewsets import GenericViewSet
class HookViewSet(GenericViewSet):
def post(self, request, *args, **kwargs):
data = request.data
action = data['action']
event = request.META.get('HTTP_X_GITHUB_EVENT', None)
if not event:
return Response({'result': False}, status=status.HTTP_200_OK)
if 'installation' in event:
event = 'installation'
try:
dirname = __name__.split('viewsets')[0]
module = import_module(f'{dirname}{event}.api')
result = getattr(module, f'hook_{action}')(data)
except ImportError:
result = False
return Response({'result': result}, status.HTTP_200_OK)
| 33.8 | 73 | 0.648521 | [
"MIT"
] | PythonBenin/sphinx-bot | hook/viewsets.py | 845 | Python |
import os
imagedir = "/Users/titlis/cogsci/projects/stanford/projects/overinformativeness/experiments/5_norming_object_typicality/images"
for t in os.listdir(imagedir):
if not t.startswith("."):
for i in os.listdir(imagedir+"/"+t):
if not i.startswith("."):
print "{"
print "\"item\": \""+i[0:-4]+"\","
print "\"objecttype\": \""+t+"\""
print "}," | 30.916667 | 127 | 0.638814 | [
"MIT"
] | thegricean/overinformativeness | experiments/5_norming_object_typicality_phrasing1/results/scripts/makeItemList.py | 371 | Python |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-04 21:24
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('registration', '0013_eventresult_scoresubmittedby'),
]
operations = [
migrations.AddField(
model_name='eventresult',
name='timeStamp',
field=models.DateTimeField(default=django.utils.timezone.now, editable=False),
),
]
| 24.590909 | 90 | 0.661738 | [
"MIT"
] | arpanpathak/college-fest-management | registration/migrations/0014_eventresult_timestamp.py | 541 | Python |
# Copyright 2019, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Internal dispatcher for training loops."""
import contextlib
import os.path
import pprint
import time
from typing import Any, Callable, Dict, List, Optional
from absl import logging
import tensorflow as tf
import tensorflow_federated as tff
class IterativeProcessCompatibilityError(TypeError):
pass
def create_if_not_exists(path):
try:
tf.io.gfile.makedirs(path)
except tf.errors.OpError:
logging.info('Skipping creation of directory [%s], already exists', path)
def _setup_outputs(root_output_dir,
experiment_name,
rounds_per_profile=0):
"""Set up directories for experiment loops, write hyperparameters to disk."""
if not experiment_name:
raise ValueError('experiment_name must be specified.')
create_if_not_exists(root_output_dir)
checkpoint_dir = os.path.join(root_output_dir, 'checkpoints', experiment_name)
create_if_not_exists(checkpoint_dir)
checkpoint_mngr = tff.simulation.FileCheckpointManager(checkpoint_dir)
results_dir = os.path.join(root_output_dir, 'results', experiment_name)
create_if_not_exists(results_dir)
csv_file = os.path.join(results_dir, 'experiment.metrics.csv')
metrics_mngr = tff.simulation.CSVMetricsManager(csv_file)
summary_logdir = os.path.join(root_output_dir, 'logdir', experiment_name)
tb_mngr = tff.simulation.TensorBoardManager(summary_dir=summary_logdir)
logging.info('Writing...')
logging.info(' checkpoints to: %s', checkpoint_dir)
logging.info(' metrics csv to: %s', metrics_mngr.metrics_filename)
logging.info(' summaries to: %s', summary_logdir)
@contextlib.contextmanager
def profiler(round_num):
if (rounds_per_profile > 0 and round_num % rounds_per_profile == 0):
with tf.profiler.experimental.Profile(summary_logdir):
yield
else:
yield
return checkpoint_mngr, metrics_mngr, tb_mngr, profiler
def _write_metrics(metrics_mngr, tb_mngr, metrics, round_num):
"""Atomic metrics writer which inlines logic from MetricsHook class."""
if not isinstance(metrics, dict):
raise TypeError('metrics should be type `dict`.')
if not isinstance(round_num, int):
raise TypeError('round_num should be type `int`.')
logging.info('Metrics at round {:d}:\n{!s}'.format(round_num,
pprint.pformat(metrics)))
metrics_mngr.save_metrics(metrics, round_num)
tb_mngr.save_metrics(metrics, round_num)
def _compute_numpy_l2_difference(model, previous_model):
squared_norms = tf.nest.map_structure(lambda x, y: tf.linalg.norm(x - y)**2,
model, previous_model)
l2_total_tensor = tf.reduce_sum(tf.nest.flatten(squared_norms))**0.5
return l2_total_tensor.numpy()
def _check_iterative_process_compatibility(iterative_process):
"""Checks the compatibility of an iterative process with the training loop."""
error_message = (
'The iterative_process argument must be of '
'type`tff.templates.IterativeProcess`, and must have an '
'attribute `get_model_weights`, which must be a `tff.Computation`. This '
'computation must accept as input the state of `iterative_process`, and '
'its output must be a nested structure of tensors matching the input '
'shape of `validation_fn`.')
compatibility_error = IterativeProcessCompatibilityError(error_message)
if not isinstance(iterative_process, tff.templates.IterativeProcess):
raise compatibility_error
if not hasattr(iterative_process, 'get_model_weights'):
raise compatibility_error
elif not callable(iterative_process.get_model_weights):
raise compatibility_error
get_model_weights_fn = iterative_process.get_model_weights
if not isinstance(get_model_weights_fn, tff.Computation):
raise compatibility_error
input_type = get_model_weights_fn.type_signature.parameter
server_state_type = iterative_process.state_type.member
server_state_type.is_assignable_from(input_type)
# TODO(b/174268978): Once we enforce federated evaluations, we can check
# compatibility with `validation_fn` without actually running the function.
def run(iterative_process: tff.templates.IterativeProcess,
client_datasets_fn: Callable[[int], List[tf.data.Dataset]],
validation_fn: Callable[[Any, int], Dict[str, float]],
total_rounds: int,
experiment_name: str,
test_fn: Optional[Callable[[Any], Dict[str, float]]] = None,
root_output_dir: Optional[str] = '/tmp/fed_opt',
rounds_per_eval: Optional[int] = 1,
rounds_per_checkpoint: Optional[int] = 50,
rounds_per_profile: Optional[int] = 0):
"""Runs federated training for a given `tff.templates.IterativeProcess`.
We assume that the iterative process has the following functional type
signatures:
* `initialize`: `( -> S@SERVER)` where `S` represents the server state.
* `next`: `<S@SERVER, {B*}@CLIENTS> -> <S@SERVER, T@SERVER>` where `S`
represents the server state, `{B*}` represents the client datasets,
and `T` represents a python `Mapping` object.
The iterative process must also have a callable attribute `get_model_weights`
that takes as input the state of the iterative process, and returns a
`tff.learning.ModelWeights` object.
Args:
iterative_process: A `tff.templates.IterativeProcess` instance to run.
client_datasets_fn: Function accepting an integer argument (the round
number) and returning a list of client datasets to use as federated data
for that round.
validation_fn: A callable accepting a `tff.learning.ModelWeights` and the
current round number, and returning a dict of evaluation metrics. Used to
compute validation metrics throughout the training process.
total_rounds: The number of federated training rounds to perform.
experiment_name: The name of the experiment being run. This will be appended
to the `root_output_dir` for purposes of writing outputs.
test_fn: An optional callable accepting a `tff.learning.ModelWeights` and
returning a dict of test set metrics. Used to compute test metrics at the
end of the training process.
root_output_dir: The name of the root output directory for writing
experiment outputs.
rounds_per_eval: How often to compute validation metrics.
rounds_per_checkpoint: How often to checkpoint the iterative process state.
If you expect the job to restart frequently, this should be small. If no
interruptions are expected, this can be made larger.
rounds_per_profile: Experimental setting. If set to a value greater than 0,
this dictates how often a TensorFlow profiler is run.
Returns:
The final `state` of the iterative process after training.
"""
_check_iterative_process_compatibility(iterative_process)
if not callable(client_datasets_fn):
raise TypeError('client_datasets_fn should be callable.')
if not callable(validation_fn):
raise TypeError('validation_fn should be callable.')
if test_fn is not None and not callable(test_fn):
raise TypeError('test_fn should be callable.')
logging.info('Starting iterative_process training loop...')
initial_state = iterative_process.initialize()
checkpoint_mngr, metrics_mngr, tb_mngr, profiler = _setup_outputs(
root_output_dir, experiment_name, rounds_per_profile)
logging.info('Asking checkpoint manager to load checkpoint.')
state, round_num = checkpoint_mngr.load_latest_checkpoint(initial_state)
if state is None:
logging.info('Initializing experiment from scratch.')
state = initial_state
round_num = 0
else:
logging.info('Restarted from checkpoint round %d', round_num)
round_num += 1 # Increment to avoid overwriting current checkpoint
metrics_mngr.clear_metrics(round_num)
current_model = iterative_process.get_model_weights(state)
loop_start_time = time.time()
loop_start_round = round_num
while round_num < total_rounds:
data_prep_start_time = time.time()
federated_train_data = client_datasets_fn(round_num)
train_metrics = {
'prepare_datasets_secs': time.time() - data_prep_start_time
}
training_start_time = time.time()
prev_model = current_model
# TODO(b/145604851): This try/except is used to circumvent ambiguous TF
# errors during training, and should be removed once the root cause is
# determined (and possibly fixed).
try:
with profiler(round_num):
state, round_metrics = iterative_process.next(state,
federated_train_data)
except (tf.errors.FailedPreconditionError, tf.errors.NotFoundError,
tf.errors.InternalError) as e:
logging.warning('Caught %s exception while running round %d:\n\t%s',
type(e), round_num, e)
continue # restart the loop without incrementing the round number
current_model = iterative_process.get_model_weights(state)
train_metrics['training_secs'] = time.time() - training_start_time
train_metrics['model_delta_l2_norm'] = _compute_numpy_l2_difference(
current_model, prev_model)
train_metrics['client_drift'] = state.client_drift
train_metrics.update(round_metrics)
loop_time = time.time() - loop_start_time
loop_rounds = (round_num - loop_start_round + 1)
logging.info('Round {:2d}, {:.2f}s per round in average.'.format(
round_num, loop_time / loop_rounds))
if (round_num % rounds_per_checkpoint == 0 or
round_num == total_rounds - 1):
save_checkpoint_start_time = time.time()
checkpoint_mngr.save_checkpoint(state, round_num)
train_metrics['save_checkpoint_secs'] = (
time.time() - save_checkpoint_start_time)
metrics = {'train': train_metrics}
if round_num % rounds_per_eval == 0:
# Compute validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, round_num)
round_num += 1
# Final metrics evaluation once the training has completed
metrics = {}
# Validation metrics
evaluate_start_time = time.time()
validation_metrics = validation_fn(current_model, round_num)
validation_metrics['evaluate_secs'] = time.time() - evaluate_start_time
metrics['eval'] = validation_metrics
# Test set metrics
if test_fn:
test_start_time = time.time()
test_metrics = test_fn(current_model)
test_metrics['evaluate_secs'] = time.time() - test_start_time
metrics['test'] = test_metrics
_write_metrics(metrics_mngr, tb_mngr, metrics, total_rounds)
return state
| 41.086643 | 80 | 0.734645 | [
"Apache-2.0"
] | houcharlie/federated | utils/training_loop.py | 11,381 | Python |
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nba_stats.settings.base")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| 23.363636 | 78 | 0.774319 | [
"MIT"
] | sparbz/nba-stats | nba_stats/manage.py | 257 | Python |
def setup_inp(inp):
"""Convert list of strings into list of lists, with glves/goblins replaced by tuples"""
grid = []
for rowI,row in enumerate(inp.split("\n")):
grid.append([x for x in row])
for colI,col in enumerate(row):
if col in ["G","E"]:
#Replace enemies with tuples so we can track them - (character_type, hit_points, moved_already_bool)
char_tup = (col, 200, False)
grid[rowI][colI] = char_tup
return grid
def print_board(inp):
for row in inp:
extra = []
print_row = [] #In case we append hitpoints
for char in row:
if isinstance(char,tuple):
print_row.append(char[0])
extra.append(str(char[1]))
else:
print_row.append(char)
print("".join(print_row)," ", " ".join(extra))
def move_character(inp, from_row, from_col, to_row, to_col, char):
"""Move character on grid, and increment the i value so we can tell we already moved it"""
inp[from_row][from_col] = "."
inp[to_row][to_col] = (char[0],char[1],True)
return inp
def attack(inp, row, col, enemy, damage=3):
"""
Attack weakest adjacent enemy, if one is there
If multiple weakest enemies, attack in reading order
Return the modified board, and a boolean indicating whether anyone died
"""
if not adjacent_enemy(inp, row, col, enemy):
return inp, False
#Create a dict of {coordinates: hp} for each adjacent enemy
enemies = {}
for coords in [(row-1,col), (row+1,col), (row,col-1), (row,col+1)]:
if inp[coords[0]][coords[1]][0] == enemy:
#enemy is a tuple, (char_type, hp, already_moved_bool)
enemies[coords] = inp[coords[0]][coords[1]][1]
#Filter to only the enemies with minimum hp
min_hp = min(enemies.values())
enemies = [x for x in enemies if enemies[x]==min_hp]
#Now we have a list of coordinates, we can sort to get reading order, then take the first to get our enemy
enemies.sort()
coords = enemies[0]
enemy = inp[coords[0]][coords[1]]
enemy_pts = enemy[1] - damage
enemy_tup = (enemy[0], enemy_pts, enemy[2])
#Check for killed
if enemy_pts <= 0:
inp[coords[0]][coords[1]] = "."
return inp, True
else:
inp[coords[0]][coords[1]] = enemy_tup
return inp, False
def adjacent_enemy(inp, rowI, colI, enemy):
"""Check for enemy in adjacent square"""
if any(x[0]==enemy for x in [inp[rowI+1][colI], inp[rowI-1][colI], inp[rowI][colI+1], inp[rowI][colI-1]]):
return True
return False
def get_best_move(best_moves):
"""
Takes a list of tuples of
(first_move, number_of_moves, tile_coordinates), which might look like -
((12, 22), 8, (17, 25))
((12, 22), 8, (18, 24))
((12, 22), 8, (19, 21))
((13, 21), 6, (19, 21))
((13, 23), 6, (17, 25))
((13, 23), 6, (18, 24))
((14, 22), 6, (17, 25))
((14, 22), 6, (18, 24))
((14, 22), 6, (19, 21))
And filters/sorts them to satisfy all the conditions
"""
if not best_moves:
return None
#First condition - fewest number of moves away
min_steps = min([x[1] for x in best_moves])
best_moves = [x for x in best_moves if x[1]==min_steps]
#Second condition - if tie, choose the first tile in reading order
best_moves.sort(key = lambda x:x[2])
best_moves = [x for x in best_moves if x[2]==best_moves[0][2]]
#Third condition - if tie, take the first step in reading order
best_moves.sort(key = lambda x:x[0])
best_moves = [x for x in best_moves if x[0]==best_moves[0][0]]
return best_moves[0][0]
def count_characters(inp):
seen = {"G":0,"E":0}
for row in inp:
for col in row:
if col[0] in ["G","E"]:
seen[col[0]]+=1
return seen
def bfs_move(inp, rowI, colI, hero, enemy):
"""
Perform a breadth first search for each adjacent tile
Although not the most efficient, the approach is still fast and makes it
easy to sort in such a way that satisfies all the conditions
"""
#If an enemy is located adjacent to our current location - no move!
if adjacent_enemy(inp, rowI, colI, enemy):
return None
first_moves = [(rowI+1,colI),(rowI-1,colI),(rowI,colI-1),(rowI,colI+1)]
#Filter down to valid first moves - must be a '.' there
first_moves = [x for x in first_moves if inp[x[0]][x[1]]=="."]
#Keep the list of tuples nearest tiles we've found, in format -
#(first_move, number_of_moves, tile_coordinates)
#At the end we'll need to use all these values to find the proper move
best_moves = []
for move in first_moves:
r,c = move
#We might immediately have an adjacent enemy and not need to search further
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move, 1, move))
continue
#We'll need to keep track of two things -
#seen_coordinates - the tiles we've already visited
#stack - the "new" tiles accessible from the current furthest points
seen_coordinates = {(rowI,colI),(r,c)}
stack = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
#Filter stack to only include "." tiles, which we haven't already seen
stack = [x for x in stack if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
#Now do the search -
i=1 #Already have moved one tile at this point
run = True
while run:
i+=1
#Keep track of the new tiles here
new_stack = []
#Loop through and look for new tiles to add
for tile in stack:
if tile in seen_coordinates:
continue
seen_coordinates.add(tile)
r,c = tile
if adjacent_enemy(inp, r, c, enemy):
best_moves.append((move,i,(r,c)))
#We want to complete this iteration to find all other reachable tiles at the same distance
run = False
continue
#Add all newly accessible tiles to stack
new_tiles = [(r+1,c),(r-1,c),(r,c-1),(r,c+1)]
new_stack += [x for x in new_tiles if inp[x[0]][x[1]]=="." and (x[0],x[1]) not in seen_coordinates]
stack = list(set(new_stack))
#We might also need to end at this point if we have no more newly accessible tiles
if not stack:
run = False
#Take our list of the best_moves from each starting point that we generated, and find the one move we'll take
return get_best_move(best_moves)
def score_game(inp, rounds):
pts = 0
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
pts+=col[1]
return rounds*pts
def reset_moved_bools(inp):
"""Reset the third value in our character tuples, which tracks whether they've moved in a round"""
for rowI,row in enumerate(inp):
for colI,col in enumerate(row):
if col[0] in ["G","E"]:
char_tup = (col[0],col[1],False)
inp[rowI][colI] = char_tup
return inp
t0 = """#######
#.G...#
#...EG#
#.#.#G#
#..G#E#
#.....#
#######"""
t1 = """#######
#G..#E#
#E#E.E#
#G.##.#
#...#E#
#...E.#
#######"""
t2 = """#######
#E..EG#
#.#G.E#
#E.##E#
#G..#.#
#..E#.#
#######"""
t3 = """#######
#E.G#.#
#.#G..#
#G.#.G#
#G..#.#
#...E.#
#######"""
t4 = """#######
#.E...#
#.#..G#
#.###.#
#E#G#G#
#...#G#
#######"""
t5 = """#########
#G......#
#.E.#...#
#..##..G#
#...##..#
#...#...#
#.G...G.#
#.....G.#
#########"""
def problem1(inp, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
#Count the current number of each character type
#We can use this to determine if the game has ended in the middle or end of a round
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
#Indicates we already moved it this round
if char[2]:
continue
r,c = rowI,colI #Keep track of our current coordinates in case we move
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to #Need to update our current coordinates for the impending attack
grid = move_character(grid, rowI, colI, r, c, char)
grid, death = attack(grid, r, c, enemy)
if death:
#Check to see if it's over - all of one side dead
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
#If game is over, we need to see if the round is complete or not
if game_over:
#Means we ended midround
if counts[hero]>0:
final_score = score_game(grid, rounds)
#Otherwise round is complete- add 1 to rounds when calculating
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
#Reset the variable that tracks whether a character has moved in a round
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2_loop(inp, damage_dict, print_=False):
grid = setup_inp(inp)
rounds = 0
while True:
#Count the current number of each character type
#We can use this to determine if the game has ended in the middle or end of a round
counts = count_characters(grid)
seen = {}
for rowI,row in enumerate(grid):
for colI,col in enumerate(row):
char = grid[rowI][colI]
if isinstance(char, tuple):
#Indicates we already moved it this round
if char[2]:
continue
r,c = rowI,colI #Keep track of our current coordinates in case we move
hero = char[0]
enemy = "G" if hero=="E" else "E"
counts[hero]-=1
move_to = bfs_move(grid, rowI, colI, hero, enemy)
if move_to:
r,c = move_to #Need to update our current coordinates for the impending attack
grid = move_character(grid, rowI, colI, r, c, char)
damage = damage_dict[hero]
grid, death = attack(grid, r, c, enemy, damage)
if death and enemy=="E":
#FAILED
return False
#If goblin death, same logic as before
elif death:
#Check to see if it's over - all of one side dead
current_counts = count_characters(grid)
game_over = any(x==0 for x in current_counts.values())
#If game is over, we need to see if the round is complete or not
if game_over:
#Means we ended midround
if counts[hero]>0:
final_score = score_game(grid, rounds)
#Otherwise round is complete- add 1 to rounds when calculating
else:
rounds+=1
final_score = score_game(grid, rounds)
if print_:
print("GAME ENDED",rounds)
print_board(grid)
return final_score
#Reset the variable that tracks whether a character has moved in a round
grid = reset_moved_bools(grid)
rounds += 1
if print_:
print(rounds)
print_board(grid)
def problem2(inp, print_=False):
score = False
damage_dict = {"G":3, "E":3}
while not score:
damage_dict["E"] += 1
print("Elf power", damage_dict["E"])
score = problem2_loop(inp, damage_dict, print_)
return score
if __name__=="__main__":
with open("input15.txt","r") as f:
data = f.read().strip()
for row in data.split("\n"):
print(row)
assert problem1(t0)==27730
assert problem1(t1)==36334
assert problem1(t2)==39514
assert problem1(t3)==27755
assert problem1(t4)==28944
assert problem1(t5)==18740
print(problem1(data))
print(problem2(data)) | 33.359223 | 117 | 0.511641 | [
"MIT"
] | mark-inderhees/aoc | 2018/15/helpme.py | 13,744 | Python |
#!/usr/bin/env python3
import math
import csv
import itertools
from pprint import pprint
import func
INPUTFILE = './task04.input'
def main():
accept = 0
with open(INPUTFILE, mode='r') as csvfile:
reader = csv.reader(csvfile, delimiter=" ")
lines = list(reader)
for line in lines:
reject_line = False
for word in line:
if line.count(word) > 1:
reject_line = True
break
if not reject_line:
accept = accept + 1
print("file {} has {} lines".format(INPUTFILE, len(lines)))
print("we accept {} of them".format(accept))
if __name__ == '__main__':
main()
| 20.5 | 67 | 0.533875 | [
"MIT"
] | mboehn/aoc2017 | task04_a.py | 738 | Python |
# Date 3/10/2020
# __Author__ : AdityaLata
# __Package__ : Python 3
# __GitHub__ : https://www.github.com/adityalata
from Python.DataStructure.TreeDS.Node import Node
# A utility function to check if 'c' is an operator
def isOperator(c):
if c == '+' or c == '-' or c == '*' or c == '/' or c == '^':
return True
else:
return False
# Returns root of constructed tree for given postfix expression string
def getExpressionTree(postfix):
stack = []
# Traverse through every character of input expression
for char in postfix:
# for space separated postfix
if char == " ":
continue
# if operand, simply push into stack
elif not isOperator(char):
t = Node(char)
stack.append(t)
# Operator
else:
# Pop two top nodes
t = Node(char)
t1 = stack.pop()
t2 = stack.pop()
# make them children
t.right = t1
t.left = t2
# Add this subexpression to stack
stack.append(t)
# Only element will be the root of expression tree
t = stack.pop()
return t
# Returns value evaluated from given root of valid(full binary tree) expression tree
def evaluateExpressionTree(rootNode):
# empty tree
if rootNode is None:
return 0
# leaf node
if rootNode.left is None and rootNode.right is None:
return int(rootNode.value)
# evaluate left tree
leftSubtreeValue = evaluateExpressionTree(rootNode.left)
# evaluate right tree
rightSubtreeValue = evaluateExpressionTree(rootNode.right)
# check which operation to apply on non leaf node
if rootNode.value == '+':
return leftSubtreeValue + rightSubtreeValue
elif rootNode.value == '-':
return leftSubtreeValue - rightSubtreeValue
elif rootNode.value == '*':
return leftSubtreeValue * rightSubtreeValue
elif rootNode.value == '^':
return leftSubtreeValue ** rightSubtreeValue
elif rootNode.value == '/':
return leftSubtreeValue / rightSubtreeValue
| 25.258824 | 84 | 0.615277 | [
"MIT"
] | adityalata/AlgorithmsAndDataStructure | Python/DataStructure/TreeDS/ExpressionTree.py | 2,147 | Python |
# Copyright (c) 2019 by LatentAI Inc.
# All rights reserved.
# This file is part of the LEIP(tm) SDK,
# and is released under the "LatentAI Commercial Software License".
# Please see the LICENSE file that should have been included as part of
# this package.
#
# @file tf_inference.py
#
# @author Videet Parekh
#
# @date Wed 16 Dec 20
#
# @brief TF inference engine designed with the same interface as leip_inference for parallel comparison
# from time import time
# import tensorflow as tf
import glob
import os
import logging
import utils.common_utils as utils
import argparse
import numpy as np
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
# tf.debugging.set_log_device_placement(True)
class TFModel():
def __init__(self, base_path, context, config):
self.context = context
self.config = config
self.load(base_path)
def load(self, base):
h5_path = glob.glob(os.path.join(base, '*.h5'))[0]
self.model = utils.load_keras_model(h5_path)
def infer(self, data):
# Here's how you may measure runtime speed
# start = time()
output_data = self.model.predict(data)
# end = time()
pred = {'label': output_data}
return pred
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_path', type=str, default=None, required=True, help='Path to model directory.')
parser.add_argument('--test_path', type=str, default=None, required=True, help='Path to output test file')
parser.add_argument('--class_names', type=str, default=None, required=True, help='Path to class names list.')
parser.add_argument('--data_type', type=str, default="float32", required=False, help='Data Type.')
parser.add_argument('--preprocessor', type=str, default="none", required=False, help='Preprocessor function')
parser.add_argument('--inference_context', type=str, default="none", required=False, help='cpu/gpu/cuda.')
parser.add_argument('--loglevel', type=str, default="WARNING", required=False, help='Logging verbosity.')
args = parser.parse_args()
base = args.input_path
test_path = args.test_path
class_names = args.class_names
data_type = args.data_type
preprocessor = args.preprocessor
context = args.inference_context
loglevel = args.loglevel
# Set Logger Parameters
logging.basicConfig(level=utils.get_numeric_loglevel(loglevel))
# Get class_names for model
with open(class_names) as f:
synset = f.readlines()
config = utils.load_json(os.path.join(base, 'model_schema.json'))
config['input_shapes'] = utils.parse_input_shapes(config['input_shapes'])
# Load dataset and collect preprocessor function
data_index = utils.load_index(test_path)
preprocessor = utils.collect_preprocessor(preprocessor)
# Create model object for inference
model = TFModel(base, context, config)
acc = 0
# Loop over data and call infer()
for data in data_index:
# Load and preprocess image
img = utils.collect_image(data[0], data_type, preprocessor, config['input_shapes'])
# Infer
pred = model.infer(img)
pred_label = np.argmax(pred['label'])
acc += 1 if pred_label == data[1] else 0
print(acc*100/len(data_index))
| 33.673267 | 113 | 0.69362 | [
"Apache-2.0"
] | videetparekh/latentai-sdk-examples | inference/tf_inference.py | 3,401 | Python |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
評価用のテストパターン作成ツール集
"""
import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
from colour.colorimetry import CMFS, ILLUMINANTS
from colour.models import XYZ_to_xy, xy_to_XYZ, XYZ_to_RGB, RGB_to_XYZ
from colour.models import xy_to_xyY, xyY_to_XYZ, Lab_to_XYZ
from colour.models import BT709_COLOURSPACE
from colour.utilities import normalise_maximum
from colour import models
from colour import RGB_COLOURSPACES, COLOURCHECKERS
from scipy.spatial import Delaunay
from scipy.ndimage.filters import convolve
import math
import transfer_functions as tf
CMFS_NAME = 'CIE 1931 2 Degree Standard Observer'
D65_WHITE = ILLUMINANTS[CMFS_NAME]['D65']
YCBCR_CHECK_MARKER = [0, 0, 0]
UNIVERSAL_COLOR_LIST = ["#F6AA00", "#FFF100", "#03AF7A",
"#005AFF", "#4DC4FF", "#804000"]
def preview_image(img, order='rgb', over_disp=False):
if order == 'rgb':
cv2.imshow('preview', img[:, :, ::-1])
elif order == 'bgr':
cv2.imshow('preview', img)
elif order == 'mono':
cv2.imshow('preview', img)
else:
raise ValueError("order parameter is invalid")
if over_disp:
cv2.resizeWindow('preview', )
cv2.waitKey(0)
cv2.destroyAllWindows()
def equal_devision(length, div_num):
"""
# 概要
length を div_num で分割する。
端数が出た場合は誤差拡散法を使って上手い具合に分散させる。
"""
base = length / div_num
ret_array = [base for x in range(div_num)]
# 誤差拡散法を使った辻褄合わせを適用
# -------------------------------------------
diff = 0
for idx in range(div_num):
diff += math.modf(ret_array[idx])[0]
if diff >= 1.0:
diff -= 1.0
ret_array[idx] = int(math.floor(ret_array[idx]) + 1)
else:
ret_array[idx] = int(math.floor(ret_array[idx]))
# 計算誤差により最終点が +1 されない場合への対処
# -------------------------------------------
diff = length - sum(ret_array)
if diff != 0:
ret_array[-1] += diff
# 最終確認
# -------------------------------------------
if length != sum(ret_array):
raise ValueError("the output of equal_division() is abnormal.")
return ret_array
def do_matrix(img, mtx):
"""
img に対して mtx を適用する。
"""
base_shape = img.shape
r, g, b = img[..., 0], img[..., 1], img[..., 2]
ro = r * mtx[0][0] + g * mtx[0][1] + b * mtx[0][2]
go = r * mtx[1][0] + g * mtx[1][1] + b * mtx[1][2]
bo = r * mtx[2][0] + g * mtx[2][1] + b * mtx[2][2]
out_img = np.dstack((ro, go, bo)).reshape(base_shape)
return out_img
def _get_cmfs_xy():
"""
xy色度図のプロットのための馬蹄形の外枠のxy値を求める。
Returns
-------
array_like
xy coordinate for chromaticity diagram
"""
# 基本パラメータ設定
# ------------------
cmf = CMFS.get(CMFS_NAME)
d65_white = D65_WHITE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = XYZ_to_xy(cmf.values, d65_white)
return cmf_xy
def get_primaries(name='ITU-R BT.2020'):
"""
prmary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
prmaries. [[rx, ry], [gx, gy], [bx, by], [rx, ry]]
"""
primaries = RGB_COLOURSPACES[name].primaries
primaries = np.append(primaries, [primaries[0, :]], axis=0)
rgb = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
return primaries, rgb
def xy_to_rgb(xy, name='ITU-R BT.2020', normalize='maximum', specific=None):
"""
xy値からRGB値を算出する。
いい感じに正規化もしておく。
Parameters
----------
xy : array_like
xy value.
name : string
color space name.
normalize : string
normalize method. You can select 'maximum', 'specific' or None.
Returns
-------
array_like
rgb value. the value is normalized.
"""
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
if normalize == 'specific':
xyY = xy_to_xyY(xy)
xyY[..., 2] = specific
large_xyz = xyY_to_XYZ(xyY)
else:
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。必要であれば。
"""
if normalize == 'maximum':
rgb = normalise_maximum(rgb, axis=-1)
else:
if(np.sum(rgb > 1.0) > 0):
print("warning: over flow has occured at xy_to_rgb")
if(np.sum(rgb < 0.0) > 0):
print("warning: under flow has occured at xy_to_rgb")
rgb[rgb < 0] = 0
rgb[rgb > 1.0] = 1.0
return rgb
def get_white_point(name):
"""
white point を求める。CIE1931ベース。
"""
if name != "DCI-P3":
illuminant = RGB_COLOURSPACES[name].illuminant
white_point = ILLUMINANTS[CMFS_NAME][illuminant]
else:
white_point = ILLUMINANTS[CMFS_NAME]["D65"]
return white_point
def get_secondaries(name='ITU-R BT.2020'):
"""
secondary color の座標を求める
Parameters
----------
name : str
a name of the color space.
Returns
-------
array_like
secondaries. the order is magenta, yellow, cyan.
"""
secondary_rgb = np.array([[1.0, 0.0, 1.0],
[1.0, 1.0, 0.0],
[0.0, 1.0, 1.0]])
illuminant_XYZ = D65_WHITE
illuminant_RGB = D65_WHITE
chromatic_adaptation_transform = 'CAT02'
rgb_to_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = RGB_to_XYZ(secondary_rgb, illuminant_RGB,
illuminant_XYZ, rgb_to_xyz_matrix,
chromatic_adaptation_transform)
xy = XYZ_to_xy(large_xyz, illuminant_XYZ)
return xy, secondary_rgb.reshape((3, 3))
# def plot_chromaticity_diagram(
# rate=480/755.0*2, xmin=0.0, xmax=0.8, ymin=0.0, ymax=0.9, **kwargs):
# # キーワード引数の初期値設定
# # ------------------------------------
# monitor_primaries = kwargs.get('monitor_primaries', None)
# secondaries = kwargs.get('secondaries', None)
# test_scatter = kwargs.get('test_scatter', None)
# intersection = kwargs.get('intersection', None)
# # プロット用データ準備
# # ---------------------------------
# xy_image = get_chromaticity_image(
# xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax)
# cmf_xy = _get_cmfs_xy()
# bt709_gamut, _ = get_primaries(name=cs.BT709)
# bt2020_gamut, _ = get_primaries(name=cs.BT2020)
# dci_p3_gamut, _ = get_primaries(name=cs.P3_D65)
# ap0_gamut, _ = get_primaries(name=cs.ACES_AP0)
# ap1_gamut, _ = get_primaries(name=cs.ACES_AP1)
# xlim = (min(0, xmin), max(0.8, xmax))
# ylim = (min(0, ymin), max(0.9, ymax))
# ax1 = pu.plot_1_graph(fontsize=20 * rate,
# figsize=((xmax - xmin) * 10 * rate,
# (ymax - ymin) * 10 * rate),
# graph_title="CIE1931 Chromaticity Diagram",
# graph_title_size=None,
# xlabel=None, ylabel=None,
# axis_label_size=None,
# legend_size=18 * rate,
# xlim=xlim, ylim=ylim,
# xtick=[x * 0.1 + xmin for x in
# range(int((xlim[1] - xlim[0])/0.1) + 1)],
# ytick=[x * 0.1 + ymin for x in
# range(int((ylim[1] - ylim[0])/0.1) + 1)],
# xtick_size=17 * rate,
# ytick_size=17 * rate,
# linewidth=4 * rate,
# minor_xtick_num=2,
# minor_ytick_num=2)
# ax1.plot(cmf_xy[..., 0], cmf_xy[..., 1], '-k', lw=3.5*rate, label=None)
# ax1.plot((cmf_xy[-1, 0], cmf_xy[0, 0]), (cmf_xy[-1, 1], cmf_xy[0, 1]),
# '-k', lw=3.5*rate, label=None)
# ax1.plot(bt709_gamut[:, 0], bt709_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[0], label="BT.709", lw=2.75*rate)
# ax1.plot(bt2020_gamut[:, 0], bt2020_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[1], label="BT.2020", lw=2.75*rate)
# ax1.plot(dci_p3_gamut[:, 0], dci_p3_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[2], label="DCI-P3", lw=2.75*rate)
# ax1.plot(ap1_gamut[:, 0], ap1_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[3], label="ACES AP1", lw=2.75*rate)
# ax1.plot(ap0_gamut[:, 0], ap0_gamut[:, 1],
# c=UNIVERSAL_COLOR_LIST[4], label="ACES AP0", lw=2.75*rate)
# if monitor_primaries is not None:
# ax1.plot(monitor_primaries[:, 0], monitor_primaries[:, 1],
# c="#202020", label="???", lw=3*rate)
# if secondaries is not None:
# xy, rgb = secondaries
# ax1.scatter(xy[..., 0], xy[..., 1], s=700*rate, marker='s', c=rgb,
# edgecolors='#404000', linewidth=2*rate)
# if test_scatter is not None:
# xy, rgb = test_scatter
# ax1.scatter(xy[..., 0], xy[..., 1], s=300*rate, marker='s', c=rgb,
# edgecolors='#404040', linewidth=2*rate)
# if intersection is not None:
# ax1.scatter(intersection[..., 0], intersection[..., 1],
# s=300*rate, marker='s', c='#CCCCCC',
# edgecolors='#404040', linewidth=2*rate)
# ax1.imshow(xy_image, extent=(xmin, xmax, ymin, ymax))
# plt.legend(loc='upper right')
# plt.savefig('temp_fig.png', bbox_inches='tight')
# plt.show()
def get_chromaticity_image(samples=1024, antialiasing=True, bg_color=0.9,
xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0):
"""
xy色度図の馬蹄形の画像を生成する
Returns
-------
ndarray
rgb image.
"""
"""
色域設定。sRGBだと狭くて少し変だったのでBT.2020に設定。
若干色が薄くなるのが難点。暇があれば改良したい。
"""
# color_space = models.BT2020_COLOURSPACE
# color_space = models.S_GAMUT3_COLOURSPACE
color_space = models.ACES_CG_COLOURSPACE
# 馬蹄形のxy値を算出
# --------------------------
cmf_xy = _get_cmfs_xy()
"""
馬蹄の内外の判別をするために三角形で領域分割する(ドロネー図を作成)。
ドロネー図を作れば後は外積計算で領域の内外を判別できる(たぶん)。
なお、作成したドロネー図は以下のコードでプロット可能。
1点補足しておくと、```plt.triplot``` の第三引数は、
第一、第二引数から三角形を作成するための **インデックス** のリスト
になっている。[[0, 1, 2], [2, 4, 3], ...]的な。
```python
plt.figure()
plt.triplot(xy[:, 0], xy[:, 1], triangulation.simplices.copy(), '-o')
plt.title('triplot of Delaunay triangulation')
plt.show()
```
"""
triangulation = Delaunay(cmf_xy)
"""
```triangulation.find_simplex()``` で xy がどのインデックスの領域か
調べることができる。戻り値が ```-1``` の場合は領域に含まれないため、
0以下のリストで領域判定の mask を作ることができる。
"""
xx, yy\
= np.meshgrid(np.linspace(xmin, xmax, samples),
np.linspace(ymax, ymin, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = color_space.whitepoint
chromatic_adaptation_transform = 'XYZ Scaling'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
xy[xy == 0.0] = 1.0 # ゼロ割対策
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb[rgb == 0] = 1.0 # ゼロ割対策
rgb = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb *= mask_rgb
# 背景色をグレーに変更
# -------------------------------------
bg_rgb = np.ones_like(rgb)
bg_rgb *= (1 - mask_rgb) * bg_color
rgb += bg_rgb
rgb = rgb ** (1/2.2)
return rgb
def get_csf_color_image(width=640, height=480,
lv1=np.uint16(np.array([1.0, 1.0, 1.0]) * 1023 * 0x40),
lv2=np.uint16(np.array([1.0, 1.0, 1.0]) * 512 * 0x40),
stripe_num=18):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは16bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : numeric
video level 1. this value must be 10bit.
lv2 : numeric
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
lv1_16bit = lv1
lv2_16bit = lv2
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1_16bit if (idx % 2) == 0 else lv2_16bit
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
# temp_img *= lv
temp_img[:, :] = lv
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
return img
def plot_xyY_color_space(name='ITU-R BT.2020', samples=1024,
antialiasing=True):
"""
SONY の HDR説明資料にあるような xyY の図を作る。
Parameters
----------
name : str
name of the target color space.
Returns
-------
None
"""
# 馬蹄の領域判別用データ作成
# --------------------------
primary_xy, _ = get_primaries(name=name)
triangulation = Delaunay(primary_xy)
xx, yy\
= np.meshgrid(np.linspace(0, 1, samples), np.linspace(1, 0, samples))
xy = np.dstack((xx, yy))
mask = (triangulation.find_simplex(xy) < 0).astype(np.float)
# アンチエイリアシングしてアルファチャンネルを滑らかに
# ------------------------------------------------
if antialiasing:
kernel = np.array([
[0, 1, 0],
[1, 2, 1],
[0, 1, 0],
]).astype(np.float)
kernel /= np.sum(kernel)
mask = convolve(mask, kernel)
# ネガポジ反転
# --------------------------------
mask = 1 - mask[:, :, np.newaxis]
# xy のメッシュから色を復元
# ------------------------
illuminant_XYZ = D65_WHITE
illuminant_RGB = RGB_COLOURSPACES[name].whitepoint
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = get_xyz_to_rgb_matrix(name)
rgb_to_large_xyz_matrix = get_rgb_to_xyz_matrix(name)
large_xyz = xy_to_XYZ(xy)
rgb = XYZ_to_RGB(large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix,
chromatic_adaptation_transform)
"""
そのままだとビデオレベルが低かったりするので、
各ドット毎にRGB値を正規化&最大化する。
"""
rgb_org = normalise_maximum(rgb, axis=-1)
# mask 適用
# -------------------------------------
mask_rgb = np.dstack((mask, mask, mask))
rgb = rgb_org * mask_rgb
rgba = np.dstack((rgb, mask))
# こっからもういちど XYZ に変換。Yを求めるために。
# ---------------------------------------------
large_xyz2 = RGB_to_XYZ(rgb, illuminant_RGB, illuminant_XYZ,
rgb_to_large_xyz_matrix,
chromatic_adaptation_transform)
# ログスケールに変換する準備
# --------------------------
large_y = large_xyz2[..., 1] * 1000
large_y[large_y < 1] = 1.0
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
# ax.plot_wireframe(xy[..., 0], xy[..., 1], np.log10(large_y),
# rcount=100, ccount=100)
ax.plot_surface(xy[..., 0], xy[..., 1], np.log10(large_y),
rcount=64, ccount=64, facecolors=rgb_org)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("Y")
ax.set_zticks([0, 1, 2, 3])
ax.set_zticklabels([1, 10, 100, 1000])
# chromatcity_image の取得。z=0 の位置に貼り付ける
# ----------------------------------------------
cie1931_rgb = get_chromaticity_image(samples=samples, bg_color=0.0)
alpha = np.zeros_like(cie1931_rgb[..., 0])
rgb_sum = np.sum(cie1931_rgb, axis=-1)
alpha[rgb_sum > 0.00001] = 1
cie1931_rgb = np.dstack((cie1931_rgb[..., 0], cie1931_rgb[..., 1],
cie1931_rgb[..., 2], alpha))
zz = np.zeros_like(xy[..., 0])
ax.plot_surface(xy[..., 0], xy[..., 1], zz,
facecolors=cie1931_rgb)
plt.show()
def log_tick_formatter(val, pos=None):
return "{:.0e}".format(10**val)
def get_3d_grid_cube_format(grid_num=4):
"""
# 概要
(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0), (0, 0, 1), ...
みたいな配列を返す。
CUBE形式の3DLUTを作成する時に便利。
"""
base = np.linspace(0, 1, grid_num)
ones_x = np.ones((grid_num, grid_num, 1))
ones_y = np.ones((grid_num, 1, grid_num))
ones_z = np.ones((1, grid_num, grid_num))
r_3d = base[np.newaxis, np.newaxis, :] * ones_x
g_3d = base[np.newaxis, :, np.newaxis] * ones_y
b_3d = base[:, np.newaxis, np.newaxis] * ones_z
r_3d = r_3d.flatten()
g_3d = g_3d.flatten()
b_3d = b_3d.flatten()
return np.dstack((r_3d, g_3d, b_3d))
def quadratic_bezier_curve(t, p0, p1, p2, samples=1024):
# x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
# + (t ** 2) * p2[0]
# y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
# + (t ** 2) * p2[1]
x = ((1 - t) ** 2) * p0[0] + 2 * (1 - t) * t * p1[0]\
+ (t ** 2) * p2[0]
y = ((1 - t) ** 2) * p0[1] + 2 * (1 - t) * t * p1[1]\
+ (t ** 2) * p2[1]
# ax1 = pu.plot_1_graph(fontsize=20,
# figsize=(10, 8),
# graph_title="Title",
# graph_title_size=None,
# xlabel="X Axis Label", ylabel="Y Axis Label",
# axis_label_size=None,
# legend_size=17,
# xlim=None,
# ylim=None,
# xtick=None,
# ytick=None,
# xtick_size=None, ytick_size=None,
# linewidth=3,
# minor_xtick_num=None,
# minor_ytick_num=None)
# ax1.plot(x, y, label='aaa')
# plt.legend(loc='upper left')
# plt.show()
def gen_step_gradation(width=1024, height=128, step_num=17,
bit_depth=10, color=(1.0, 1.0, 1.0),
direction='h', debug=False):
"""
# 概要
階段状に変化するグラデーションパターンを作る。
なお、引数の調整により正確に1階調ずつ変化するパターンも作成可能。
# 注意事項
正確に1階調ずつ変化するグラデーションを作る場合は
```step_num = (2 ** bit_depth) + 1```
となるようにパラメータを指定すること。具体例は以下のExample参照。
# Example
```
grad_8 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=257, bit_depth=8,
color=(1.0, 1.0, 1.0), direction='h')
grad_10 = gen_step_gradation(width=grad_width, height=grad_height,
step_num=1025, bit_depth=10,
color=(1.0, 1.0, 1.0), direction='h')
```
"""
max = 2 ** bit_depth
# グラデーション方向設定
# ----------------------
if direction == 'h':
pass
else:
temp = height
height = width
width = temp
if (max + 1 != step_num):
"""
1階調ずつの増加では無いパターン。
末尾のデータが 256 や 1024 になるため -1 する。
"""
val_list = np.linspace(0, max, step_num)
val_list[-1] -= 1
else:
"""
正確に1階調ずつ変化するパターン。
末尾のデータが 256 や 1024 になるため除外する。
"""
val_list = np.linspace(0, max, step_num)[0:-1]
step_num -= 1 # step_num は 引数で余計に +1 されてるので引く
# 念のため1階調ずつの変化か確認
# ---------------------------
diff = val_list[1:] - val_list[0:-1]
if (diff == 1).all():
pass
else:
raise ValueError("calculated value is invalid.")
# まずは水平1LINEのグラデーションを作る
# -----------------------------------
step_length_list = equal_devision(width, step_num)
step_bar_list = []
for step_idx, length in enumerate(step_length_list):
step = [np.ones((length)) * color[c_idx] * val_list[step_idx]
for c_idx in range(3)]
if direction == 'h':
step = np.dstack(step)
step_bar_list.append(step)
step_bar = np.hstack(step_bar_list)
else:
step = np.dstack(step).reshape((length, 1, 3))
step_bar_list.append(step)
step_bar = np.vstack(step_bar_list)
# ブロードキャストを利用して2次元に拡張する
# ------------------------------------------
if direction == 'h':
img = step_bar * np.ones((height, 1, 3))
else:
img = step_bar * np.ones((1, height, 3))
# np.uint16 にコンバート
# ------------------------------
# img = np.uint16(np.round(img * (2 ** (16 - bit_depth))))
if debug:
preview_image(img, 'rgb')
return img
def merge(img_a, img_b, pos=(0, 0)):
"""
img_a に img_b をマージする。
img_a にデータを上書きする。
pos = (horizontal_st, vertical_st)
"""
b_width = img_b.shape[1]
b_height = img_b.shape[0]
img_a[pos[1]:b_height+pos[1], pos[0]:b_width+pos[0]] = img_b
def merge_with_alpha(bg_img, fg_img, tf_str=tf.SRGB, pos=(0, 0)):
"""
合成する。
Parameters
----------
bg_img : array_like(float, 3-channel)
image data.
fg_img : array_like(float, 4-channel)
image data
tf : strings
transfer function
pos : list(int)
(pos_h, pos_v)
"""
f_width = fg_img.shape[1]
f_height = fg_img.shape[0]
bg_merge_area = bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]]
bg_linear = tf.eotf_to_luminance(bg_merge_area, tf_str)
fg_linear = tf.eotf_to_luminance(fg_img, tf_str)
alpha = fg_linear[:, :, 3:] / tf.PEAK_LUMINANCE[tf_str]
out_linear = (1 - alpha) * bg_linear + fg_linear[:, :, :-1]
out_merge_area = tf.oetf_from_luminance(out_linear, tf_str)
bg_img[pos[1]:f_height+pos[1], pos[0]:f_width+pos[0]] = out_merge_area
return bg_img
def dot_pattern(dot_size=4, repeat=4, color=np.array([1.0, 1.0, 1.0])):
"""
dot pattern 作る。
Parameters
----------
dot_size : integer
dot size.
repeat : integer
The number of high-low pairs.
color : array_like
color value.
Returns
-------
array_like
dot pattern image.
"""
# 水平・垂直のピクセル数
pixel_num = dot_size * 2 * repeat
# High-Log の 論理配列を生成
even_logic = [(np.arange(pixel_num) % (dot_size * 2)) - dot_size < 0]
even_logic = np.dstack((even_logic, even_logic, even_logic))
odd_logic = np.logical_not(even_logic)
# 着色
color = color.reshape((1, 1, 3))
even_line = (np.ones((1, pixel_num, 3)) * even_logic) * color
odd_line = (np.ones((1, pixel_num, 3)) * odd_logic) * color
# V方向にコピー&Even-Oddの結合
even_block = np.repeat(even_line, dot_size, axis=0)
odd_block = np.repeat(odd_line, dot_size, axis=0)
pair_block = np.vstack((even_block, odd_block))
img = np.vstack([pair_block for x in range(repeat)])
return img
def complex_dot_pattern(kind_num=3, whole_repeat=2,
fg_color=np.array([1.0, 1.0, 1.0]),
bg_color=np.array([0.15, 0.15, 0.15])):
"""
dot pattern 作る。
Parameters
----------
kind_num : integer
作成するドットサイズの種類。
例えば、kind_num=3 ならば、1dot, 2dot, 4dot のパターンを作成。
whole_repeat : integer
異なる複数種類のドットパターンの組数。
例えば、kind_num=3, whole_repeat=2 ならば、
1dot, 2dot, 4dot のパターンを水平・垂直に2組作る。
fg_color : array_like
foreground color value.
bg_color : array_like
background color value.
reduce : bool
HDRテストパターンの3840x2160専用。縦横を半分にする。
Returns
-------
array_like
dot pattern image.
"""
max_dot_width = 2 ** kind_num
img_list = []
for size_idx in range(kind_num)[::-1]:
dot_size = 2 ** size_idx
repeat = max_dot_width // dot_size
dot_img = dot_pattern(dot_size, repeat, fg_color)
img_list.append(dot_img)
img_list.append(np.ones_like(dot_img) * bg_color)
# preview_image(dot_img)
line_upper_img = np.hstack(img_list)
line_upper_img = np.hstack([line_upper_img for x in range(whole_repeat)])
line_lower_img = line_upper_img.copy()[:, ::-1, :]
h_unit_img = np.vstack((line_upper_img, line_lower_img))
img = np.vstack([h_unit_img for x in range(kind_num * whole_repeat)])
# preview_image(img)
# cv2.imwrite("hoge.tiff", np.uint8(img * 0xFF)[..., ::-1])
return img
def make_csf_color_image(width=640, height=640,
lv1=np.array([940, 940, 940], dtype=np.uint16),
lv2=np.array([1023, 1023, 1023], dtype=np.uint16),
stripe_num=6):
"""
長方形を複数個ズラして重ねることでCSFパターンっぽいのを作る。
入力信号レベルは10bitに限定する。
Parameters
----------
width : numeric.
width of the pattern image.
height : numeric.
height of the pattern image.
lv1 : array_like
video level 1. this value must be 10bit.
lv2 : array_like
video level 2. this value must be 10bit.
stripe_num : numeric
number of the stripe.
Returns
-------
array_like
a cms pattern image.
"""
width_list = equal_devision(width, stripe_num)
height_list = equal_devision(height, stripe_num)
h_pos_list = equal_devision(width // 2, stripe_num)
v_pos_list = equal_devision(height // 2, stripe_num)
img = np.zeros((height, width, 3), dtype=np.uint16)
width_temp = width
height_temp = height
h_pos_temp = 0
v_pos_temp = 0
for idx in range(stripe_num):
lv = lv1 if (idx % 2) == 0 else lv2
temp_img = np.ones((height_temp, width_temp, 3), dtype=np.uint16)
temp_img = temp_img * lv.reshape((1, 1, 3))
ed_pos_h = h_pos_temp + width_temp
ed_pos_v = v_pos_temp + height_temp
img[v_pos_temp:ed_pos_v, h_pos_temp:ed_pos_h] = temp_img
width_temp -= width_list[stripe_num - 1 - idx]
height_temp -= height_list[stripe_num - 1 - idx]
h_pos_temp += h_pos_list[idx]
v_pos_temp += v_pos_list[idx]
# preview_image(img / 1023)
return img
def make_tile_pattern(width=480, height=960, h_tile_num=4,
v_tile_num=4, low_level=(940, 940, 940),
high_level=(1023, 1023, 1023)):
"""
タイル状の縞々パターンを作る
"""
width_array = equal_devision(width, h_tile_num)
height_array = equal_devision(height, v_tile_num)
high_level = np.array(high_level, dtype=np.uint16)
low_level = np.array(low_level, dtype=np.uint16)
v_buf = []
for v_idx, height in enumerate(height_array):
h_buf = []
for h_idx, width in enumerate(width_array):
tile_judge = (h_idx + v_idx) % 2 == 0
h_temp = np.zeros((height, width, 3), dtype=np.uint16)
h_temp[:, :] = high_level if tile_judge else low_level
h_buf.append(h_temp)
v_buf.append(np.hstack(h_buf))
img = np.vstack(v_buf)
# preview_image(img/1024.0)
return img
def get_marker_idx(img, marker_value):
return np.all(img == marker_value, axis=-1)
def make_ycbcr_checker(height=480, v_tile_num=4):
"""
YCbCr係数誤りを確認するテストパターンを作る。
正直かなり汚い組み方です。雑に作ったパターンを悪魔合体させています。
Parameters
----------
height : numeric.
height of the pattern image.
v_tile_num : numeric
number of the tile in the vertical direction.
Note
----
横長のパターンになる。以下の式が成立する。
```
h_tile_num = v_tile_num * 2
width = height * 2
```
Returns
-------
array_like
ycbcr checker image
"""
cyan_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[0, 990, 990],
high_level=[0, 1023, 1023])
magenta_img = make_tile_pattern(width=height, height=height,
h_tile_num=v_tile_num,
v_tile_num=v_tile_num,
low_level=[990, 0, 312],
high_level=[1023, 0, 312])
out_img = np.hstack([cyan_img, magenta_img])
# preview_image(out_img/1023.0)
return out_img
def plot_color_checker_image(rgb, rgb2=None, size=(1920, 1080),
block_size=1/4.5, padding=0.01):
"""
ColorCheckerをプロットする
Parameters
----------
rgb : array_like
RGB value of the ColorChecker.
RGB's shape must be (24, 3).
rgb2 : array_like
It's a optional parameter.
If You want to draw two different ColorCheckers,
set the RGB value to this variable.
size : tuple
canvas size.
block_size : float
A each block's size.
This value is ratio to height of the canvas.
padding : float
A padding to the block.
Returns
-------
array_like
A ColorChecker image.
"""
IMG_HEIGHT = size[1]
IMG_WIDTH = size[0]
COLOR_CHECKER_SIZE = block_size
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
COLOR_CHECKER_PADDING = 0.01
# 基本パラメータ算出
# --------------------------------------
COLOR_CHECKER_H_NUM = 6
COLOR_CHECKER_V_NUM = 4
img_height = IMG_HEIGHT
img_width = IMG_WIDTH
patch_st_h = int(IMG_WIDTH / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_H_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_H_NUM / 2.0 - 0.5)) / 2.0))
patch_st_v = int(IMG_HEIGHT / 2.0
- (IMG_HEIGHT * COLOR_CHECKER_SIZE
* COLOR_CHECKER_V_NUM / 2.0
+ (IMG_HEIGHT * COLOR_CHECKER_PADDING
* (COLOR_CHECKER_V_NUM / 2.0 - 0.5)) / 2.0))
patch_width = int(img_height * COLOR_CHECKER_SIZE)
patch_height = patch_width
patch_space = int(img_height * COLOR_CHECKER_PADDING)
# 24ループで1枚の画像に24パッチを描画
# -------------------------------------------------
img_all_patch = np.zeros((img_height, img_width, 3), dtype=np.uint8)
for idx in range(COLOR_CHECKER_H_NUM * COLOR_CHECKER_V_NUM):
v_idx = idx // COLOR_CHECKER_H_NUM
h_idx = (idx % COLOR_CHECKER_H_NUM)
patch = np.ones((patch_height, patch_width, 3))
patch[:, :] = rgb[idx]
st_h = patch_st_h + (patch_width + patch_space) * h_idx
st_v = patch_st_v + (patch_height + patch_space) * v_idx
img_all_patch[st_v:st_v+patch_height, st_h:st_h+patch_width] = patch
# pt1 = (st_h, st_v) # upper left
pt2 = (st_h + patch_width, st_v) # upper right
pt3 = (st_h, st_v + patch_height) # lower left
pt4 = (st_h + patch_width, st_v + patch_height) # lower right
pts = np.array((pt2, pt3, pt4))
sub_color = rgb[idx].tolist() if rgb2 is None else rgb2[idx].tolist()
cv2.fillPoly(img_all_patch, [pts], sub_color)
preview_image(img_all_patch)
return img_all_patch
def get_log10_x_scale(
sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6):
"""
Log10スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(
... sample_num=8, ref_val=1.0, min_exposure=-1, max_exposure=6)
array([ 1.0000e-01 1.0000e+00 1.0000e+01 1.0000e+02
1.0000e+03 1.0000e+04 1.0000e+05 1.0000e+06])
"""
x_min = np.log10(ref_val * (10 ** min_exposure))
x_max = np.log10(ref_val * (10 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 10.0 ** x
def get_log2_x_scale(
sample_num=32, ref_val=1.0, min_exposure=-6.5, max_exposure=6.5):
"""
Log2スケールのx軸データを作る。
Examples
--------
>>> get_log2_x_scale(sample_num=10, min_exposure=-4.0, max_exposure=4.0)
array([[ 0.0625 0.11573434 0.214311 0.39685026 0.73486725
1.36079 2.5198421 4.66611616 8.64047791 16. ]])
"""
x_min = np.log2(ref_val * (2 ** min_exposure))
x_max = np.log2(ref_val * (2 ** max_exposure))
x = np.linspace(x_min, x_max, sample_num)
return 2.0 ** x
def shaper_func_linear_to_log2(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Lin_to_Log2_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Lin_to_Log2_param.ctl
Parameters
----------
x : array_like
linear data.
mid_gray : float
18% gray value on linear scale.
min_exposure : float
minimum value on log scale.
max_exposure : float
maximum value on log scale.
Returns
-------
array_like
log2 value that is transformed from linear x value.
Examples
--------
>>> shaper_func_linear_to_log2(
... x=0.18, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
0.5
>>> shaper_func_linear_to_log2(
... x=np.array([0.00198873782209, 16.2917402385])
... mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([ 1.58232402e-13 1.00000000e+00])
"""
# log2空間への変換。mid_gray が 0.0 となるように補正
y = np.log2(x / mid_gray)
# min, max の範囲で正規化。
y_normalized = (y - min_exposure) / (max_exposure - min_exposure)
y_normalized[y_normalized < 0] = 0
return y_normalized
def shaper_func_log2_to_linear(
x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5):
"""
ACESutil.Log2_to_Lin_param.ctl を参考に作成。
https://github.com/ampas/aces-dev/blob/master/transforms/ctl/utilities/ACESutil.Log2_to_Lin_param.ctl
Log2空間の補足は shaper_func_linear_to_log2() の説明を参照
Examples
--------
>>> x = np.array([0.0, 1.0])
>>> shaper_func_log2_to_linear(
... x, mid_gray=0.18, min_exposure=-6.5, max_exposure=6.5)
array([0.00198873782209, 16.2917402385])
"""
x_re_scale = x * (max_exposure - min_exposure) + min_exposure
y = (2.0 ** x_re_scale) * mid_gray
# plt.plot(x, y)
# plt.show()
return y
def draw_straight_line(img, pt1, pt2, color, thickness):
"""
直線を引く。OpenCV だと 8bit しか対応してないっぽいので自作。
Parameters
----------
img : array_like
image data.
pt1 : list(pos_h, pos_v)
start point.
pt2 : list(pos_h, pos_v)
end point.
color : array_like
color
thickness : int
thickness.
Returns
-------
array_like
image data with line.
Notes
-----
thickness のパラメータは pt1 の点から右下方向に効きます。
pt1 を中心として太さではない事に注意。
Examples
--------
>>> pt1 = (0, 0)
>>> pt2 = (1920, 0)
>>> color = (940, 940, 940)
>>> thickness = 4
>>> draw_straight_line(img, pt1, pt2, color, thickness)
"""
# parameter check
if (pt1[0] != pt2[0]) and (pt1[1] != pt2[1]):
raise ValueError("invalid pt1, pt2 parameters")
# check direction
if pt1[0] == pt2[0]:
thickness_direction = 'h'
else:
thickness_direction = 'v'
if thickness_direction == 'h':
for h_idx in range(thickness):
img[pt1[1]:pt2[1], pt1[0] + h_idx, :] = color
elif thickness_direction == 'v':
for v_idx in range(thickness):
img[pt1[1] + v_idx, pt1[0]:pt2[0], :] = color
def draw_outline(img, fg_color, outline_width):
"""
img に対して外枠線を引く
Parameters
----------
img : array_like
image data.
fg_color : array_like
color
outline_width : int
thickness.
Returns
-------
array_like
image data with line.
Examples
--------
>>> img = np.zeros((1080, 1920, 3))
>>> color = (940, 940, 940)
>>> thickness = 2
>>> draw_outline(img, color, thickness)
"""
width = img.shape[1]
height = img.shape[0]
# upper left
pt1 = (0, 0)
pt2 = (width, 0)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, 0)
pt2 = (0, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
# lower right
pt1 = (width - outline_width, 0)
pt2 = (width - outline_width, height)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
pt1 = (0, height - outline_width)
pt2 = (width, height - outline_width)
draw_straight_line(
img, pt1, pt2, fg_color, outline_width)
def convert_luminance_to_color_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value の RGB値に変換する。
luminance の単位は [cd/m2]。無彩色である。
Examples
--------
>>> convert_luminance_to_color_value(100, tf.GAMMA24)
>>> [ 1.0 1.0 1.0 ]
>>> convert_luminance_to_color_value(100, tf.ST2084)
>>> [ 0.50807842 0.50807842 0.50807842 ]
"""
code_value = convert_luminance_to_code_value(
luminance, transfer_function)
return np.array([code_value, code_value, code_value])
def convert_luminance_to_code_value(luminance, transfer_function):
"""
輝度[cd/m2] から code value に変換する。
luminance の単位は [cd/m2]
"""
return tf.oetf_from_luminance(luminance, transfer_function)
def calc_rad_patch_idx2(outmost_num=5, current_num=3):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
本関数はまさにその変換を行う。
"""
base = np.arange(outmost_num ** 2).reshape((outmost_num, outmost_num))
# print(base)
t_idx = (outmost_num - current_num) // 2
trimmed = base[t_idx:t_idx+current_num, t_idx:t_idx+current_num]
# print(trimmed)
# print(np.arange(current_num**2).reshape((current_num, current_num)))
half_num = current_num // 2
conv_idx = []
for idx in range(half_num):
val = (current_num ** 2) // 2 + half_num - current_num * idx
conv_idx.append(val)
for idx in range(current_num)[::-1]:
conv_idx.append(idx)
for idx in range(1, current_num - 1):
conv_idx.append(idx * current_num)
for idx in range(current_num):
val = (current_num ** 2) - current_num + idx
conv_idx.append(val)
for idx in range(1, half_num):
val = (current_num ** 2) - 1 - idx * current_num
conv_idx.append(val)
conv_idx = trimmed.flatten()[conv_idx]
return conv_idx
def _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られたデータは並べ替えが済んでいないため、calc_rad_patch_idx2() で
得られる変換テーブルを使った変換が必要。
"""
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rad = np.linspace(0, 2 * np.pi, current_patch_num, endpoint=False)
ll = np.ones((current_patch_num)) * lstar
aa = np.cos(rad) * temp_chroma
bb = np.sin(rad) * temp_chroma
lab = np.dstack((ll, aa, bb))
large_xyz = Lab_to_XYZ(lab)
rgb = XYZ_to_RGB(large_xyz, D65_WHITE, D65_WHITE,
color_space.XYZ_to_RGB_matrix)
return np.clip(rgb, 0.0, 1.0)
def calc_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
"""
以下のような、中心がGray、周りは CIELAB 空間の a*b*平面のカラーパッチの
RGB値のリストを得る。
https://user-images.githubusercontent.com/3609012/75444470-d3bc5600-59a6-11ea-962b-c315648782a9.png
得られた RGB値のリストは最初のデータが画像左上の緑データ、
最後のデータが画像右下の紫データとなるよう既に**並べ替え**が行われている。
よってパッチをプロットする場合はRGB値リストの先頭から順にデータを取り出し、
右下に向かって並べていけば良い。
"""
patch_num = outmost_num ** 2
transfer_function = tf.GAMMA24
rgb_list = np.ones((patch_num, 3))
current_num_list = range(1, outmost_num + 1, 2)
chroma_list = np.linspace(0, chroma, len(current_num_list))
for temp_chroma, current_num in zip(chroma_list, current_num_list):
current_patch_num = (current_num - 1) * 4 if current_num > 1 else 1
rgb = _calc_rgb_from_same_lstar_radial_data(
lstar, temp_chroma, current_num, color_space)
rgb = np.reshape(rgb, (current_patch_num, 3))
rgb = tf.oetf(rgb, transfer_function)
conv_idx = calc_rad_patch_idx2(
outmost_num=outmost_num, current_num=current_num)
for idx in range(current_patch_num):
rgb_list[conv_idx[idx]] = rgb[idx]
return rgb_list
def _plot_same_lstar_radial_color_patch_data(
lstar=58, chroma=32.5, outmost_num=9,
color_space=BT709_COLOURSPACE,
transfer_function=tf.GAMMA24):
patch_size = 1080 // outmost_num
img = np.ones((1080, 1080, 3)) * 0.0
rgb = calc_same_lstar_radial_color_patch_data(
lstar=lstar, chroma=chroma, outmost_num=outmost_num,
color_space=color_space, transfer_function=transfer_function)
for idx in range(outmost_num ** 2):
h_idx = idx % outmost_num
v_idx = idx // outmost_num
st_pos = (h_idx * patch_size, v_idx * patch_size)
temp_img = np.ones((patch_size, patch_size, 3))\
* rgb[idx][np.newaxis, np.newaxis, :]
merge(img, temp_img, st_pos)
cv2.imwrite("hoge2.tiff", np.uint16(np.round(img[:, :, ::-1] * 0xFFFF)))
def get_accelerated_x_1x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x1 = get_accelerated_x_1x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x1)
>>> [ 0. 0.049 0.188 0.388 0.611 0.811 0.950 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_2x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の2倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
Examples
--------
>>> x0 = np.linspace(0, 1, 8)
>>> x2 = get_accelerated_x_2x(8)
>>> print(x0)
>>> [ 0. 0.142 0.285 0.428 0.571 0.714 0.857 1. ]
>>> print(x2)
>>> [ 0. 0.006 0.084 0.328 0.671 0.915 0.993 1. ]
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_4x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def get_accelerated_x_8x(sample_num=64):
"""
単調増加ではなく、加速度が 0→1→0 となるような x を作る。
加速度が `get_accelerated_x_1x` の4倍!!
Parameters
----------
sample_num : int
the number of the sample.
Returns
-------
array_like
accelerated value list
"""
rad = np.linspace(-0.5 * np.pi, 0.5 * np.pi, sample_num)
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
rad = np.sin(rad) * 0.5 * np.pi
x = (np.sin(rad) + 1) / 2
return x
def generate_color_checker_rgb_value(
color_space=BT709_COLOURSPACE, target_white=D65_WHITE):
"""
Generate the 24 RGB values of the color checker.
Parameters
----------
color_space : color space
color space object in `colour` module.
target_white : array_like
the xy values of the white point of target color space.
Returns
-------
array_like
24 RGB values. This is linear. OETF is not applied.
Examples
--------
>>> generate_color_checker_rgb_value(
... color_space=colour.models.BT709_COLOURSPACE,
... target_white=[0.3127, 0.3290])
>>> [[ 0.17289286 0.08205728 0.05714562]
>>> [ 0.5680292 0.29250401 0.21951748]
>>> [ 0.10435534 0.19656108 0.32958666]
>>> [ 0.1008804 0.14839018 0.05327639]
>>> [ 0.22303549 0.2169701 0.43166537]
>>> [ 0.10715338 0.513512 0.41415978]
>>> [ 0.74639182 0.20020473 0.03081343]
>>> [ 0.05947812 0.10659045 0.39897686]
>>> [ 0.5673215 0.08485376 0.11945382]
>>> [ 0.11177253 0.04285397 0.14166202]
>>> [ 0.34250836 0.5062777 0.0557734 ]
>>> [ 0.79262553 0.35803886 0.025485 ]
>>> [ 0.01864598 0.05139665 0.28886469]
>>> [ 0.054392 0.29876719 0.07187681]
>>> [ 0.45628547 0.03075684 0.04092033]
>>> [ 0.85379178 0.56503558 0.01475575]
>>> [ 0.53533883 0.09006355 0.3047824 ]
>>> [-0.03662977 0.24753781 0.39824679]
>>> [ 0.91177068 0.91497623 0.89427332]
>>> [ 0.57973934 0.59203191 0.59370647]
>>> [ 0.35495537 0.36538027 0.36772001]
>>> [ 0.19009594 0.19180133 0.19316719]
>>> [ 0.08524707 0.08890587 0.09255774]
>>> [ 0.03038879 0.03118623 0.03279615]]
"""
colour_checker_param = COLOURCHECKERS.get('ColorChecker 2005')
# 今回の処理では必要ないデータもあるので xyY と whitepoint だけ抽出
# -------------------------------------------------------------
_name, data, whitepoint = colour_checker_param
temp_xyY = []
for key in data.keys():
temp_xyY.append(data[key])
temp_xyY = np.array(temp_xyY)
large_xyz = xyY_to_XYZ(temp_xyY)
rgb_white_point = D65_WHITE
illuminant_XYZ = whitepoint # ColorCheckerのオリジナルデータの白色点
illuminant_RGB = rgb_white_point # XYZ to RGB 変換後の白色点を設定
chromatic_adaptation_transform = 'CAT02'
large_xyz_to_rgb_matrix = color_space.XYZ_to_RGB_matrix
rgb = XYZ_to_RGB(
large_xyz, illuminant_XYZ, illuminant_RGB,
large_xyz_to_rgb_matrix, chromatic_adaptation_transform)
return rgb
def make_color_checker_image(rgb, width=1920, padding_rate=0.01):
"""
6x4 の カラーチェッカーの画像を作る。
Height は Width から自動計算される。padding_rate で少し値が変わる。
"""
h_patch_num = 6
v_patch_num = 4
# 各種パラメータ計算
each_padding = int(width * padding_rate + 0.5)
h_padding_total = each_padding * (h_patch_num + 1)
h_patch_width_total = width - h_padding_total
patch_height = h_patch_width_total // h_patch_num
height = patch_height * v_patch_num + each_padding * (v_patch_num + 1)
patch_width_list = equal_devision(h_patch_width_total, h_patch_num)
# パッチを並べる
img = np.zeros((height, width, 3))
for v_idx in range(v_patch_num):
h_pos_st = each_padding
v_pos_st = each_padding + v_idx * (patch_height + each_padding)
for h_idx in range(h_patch_num):
rgb_idx = v_idx * h_patch_num + h_idx
pos = (h_pos_st, v_pos_st)
patch_img = np.ones((patch_height, patch_width_list[h_idx], 3))\
* rgb[rgb_idx]
merge(img, patch_img, pos)
h_pos_st += (patch_width_list[h_idx] + each_padding)
return img
def calc_st_pos_for_centering(bg_size, fg_size):
"""
Calculate start postion for centering.
Parameters
----------
bg_size : touple(int)
(width, height) of the background image.
fg_size : touple(int)
(width, height) of the foreground image.
Returns
-------
touple (int)
(st_pos_h, st_pos_v)
Examples
--------
>>> calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480))
>>> (640, 300)
"""
bg_width = bg_size[0]
bg_height = bg_size[1]
fg_width = fg_size[0]
fg_height = fg_size[1]
st_pos_h = bg_width // 2 - fg_width // 2
st_pos_v = bg_height // 2 - fg_height // 2
return (st_pos_h, st_pos_v)
def get_size_from_image(img):
"""
`calc_st_pos_for_centering()` の引数計算が面倒だったので関数化。
"""
return (img.shape[1], img.shape[0])
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# print(calc_rad_patch_idx(outmost_num=9, current_num=1))
# _plot_same_lstar_radial_color_patch_data(
# lstar=58, chroma=32.5, outmost_num=7,
# color_space=BT709_COLOURSPACE,
# transfer_function=tf.GAMMA24)
# calc_rad_patch_idx2(outmost_num=9, current_num=7)
# print(convert_luminance_to_color_value(100, tf.ST2084))
# print(generate_color_checker_rgb_value(target_white=[0.3127, 0.3290]))
print(calc_st_pos_for_centering(bg_size=(1920, 1080), fg_size=(640, 480)))
| 29.806202 | 105 | 0.573112 | [
"BSD-3-Clause"
] | colour-science/sample_code | ty_lib/test_pattern_generator2.py | 54,415 | Python |
__license__ = "MIT"
__copyright__ = r"""
MIT License
Copyright (c) 2017 Gregor Engberding
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import sys
import json
from PySide2.QtCore import QAbstractItemModel, QAbstractListModel, QByteArray, QDataStream, QJsonParseError, QJsonValue, QMimeData, QModelIndex, Qt
from PySide2.QtWidgets import QApplication, QFileDialog
class QJsonTreeItem(object):
def __init__(self, parent=None):
self.mParent = parent
self.mChilds = []
self.mType = None
self.mValue = None
def appendChild(self, item):
self.mChilds.append(item)
def child(self, row: int):
return self.mChilds[row]
def parent(self):
return self.mParent
def childCount(self):
return len(self.mChilds)
def row(self):
if self.mParent is not None:
return self.mParent.mChilds.index(self)
return 0
def setKey(self, key: str):
self.mKey = key
def setValue(self, value: str):
self.mValue = value
def setType(self, type: QJsonValue.Type):
self.mType = type
def key(self):
return self.mKey
def value(self):
return self.mValue
def type(self):
return self.mType
def load(self, value, parent=None):
rootItem = QJsonTreeItem(parent)
rootItem.setKey("root")
jsonType = None
jsonType = value.__class__.__name__
if isinstance(value, dict):
# process the key/value pairs
for key in value:
v = value[key]
child = self.load(v, rootItem)
child.setKey(key)
child.setType(v.__class__.__name__)
rootItem.appendChild(child)
elif isinstance(value, list):
# process the values in the list
for i, v in enumerate(value):
child = self.load(v, rootItem)
child.setKey(str(i))
child.setType(v.__class__)
rootItem.appendChild(child)
else:
# value is processed
rootItem.setValue(value)
try:
rootItem.setType(value.type())
except AttributeError:
if jsonType is not None:
rootItem.setType(jsonType)
else:
rootItem.setType(value.__class__)
return rootItem
class QJsonModel(QAbstractItemModel):
def __init__(self, parent=None):
super().__init__(parent)
self.mRootItem = QJsonTreeItem()
self.mHeaders = ["key", "value", "type"]
def load(self, fileName):
if fileName is None or fileName is False:
return False
with open(fileName, "rb") as file:
if file is None:
return False
else:
jsonTxt = file.read()
self.loadJson(jsonTxt)
def loadJson(self, json):
error = QJsonParseError()
return self.loadDict(QJsonDocument.fromJson(json, error))
def loadDict(self, dic):
self.mDocument = dic
if self.mDocument is not None:
self.beginResetModel()
if isinstance(self.mDocument, list):
self.mRootItem.load(list(self.mDocument))
else:
self.mRootItem = self.mRootItem.load(self.mDocument)
self.endResetModel()
return True
# print("QJsonModel: error loading Json")
return False
def data(self, index: QModelIndex, role: int = ...):
if not index.isValid():
return None
item = index.internalPointer()
col = index.column()
if role == Qt.DisplayRole:
if col == 0:
return str(item.key())
elif col == 1:
return str(item.value())
elif col == 2:
return str(item.type())
return None
def headerData(self, section: int, orientation: Qt.Orientation, role: int = ...):
if role != Qt.DisplayRole:
return None
if orientation == Qt.Horizontal:
return self.mHeaders[section]
return QVariant()
def index(self, row: int, column: int, parent: QModelIndex = ...):
if not self.hasIndex(row, column, parent):
return QModelIndex()
if not parent.isValid():
parentItem = self.mRootItem
else:
parentItem = parent.internalPointer()
try:
childItem = parentItem.child(row)
return self.createIndex(row, column, childItem)
except IndexError:
return QModelIndex()
def parent(self, index: QModelIndex):
if not index.isValid():
return QModelIndex()
childItem = index.internalPointer()
parentItem = childItem.parent()
if parentItem == self.mRootItem:
return QModelIndex()
return self.createIndex(parentItem.row(), 0, parentItem)
def rowCount(self, parent: QModelIndex = ...):
if parent.column() > 0:
return 0
if not parent.isValid():
parentItem = self.mRootItem
else:
parentItem = parent.internalPointer()
return parentItem.childCount()
def columnCount(self, parent: QModelIndex = ...):
return 3
| 24.810185 | 147 | 0.717485 | [
"Unlicense"
] | KOLANICH-tools/WindowsTelemetryViewer.py | WindowsTelemetryViewer/PyQtJsonModel.py | 5,359 | Python |
"""
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Ontology Engineering Group
http://www.oeg-upm.net/
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Copyright (C) 2016 Ontology Engineering Group.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
#-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=#
"""
import json
from setuptools import setup, find_packages
__author__ = 'Fernando Serena'
with open("kg_search/metadata.json", 'r') as stream:
metadata = json.load(stream)
setup(
name="kg-search",
version=metadata['version'],
author=metadata['author'],
author_email=metadata['email'],
description=metadata['description'],
license="Apache 2",
keywords=["knowledge graph", "wikidata"],
url=metadata['github'],
download_url="https://github.com/fserena/kg-search/tarball/{}".format(metadata['version']),
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
install_requires=['Flask', 'Flask-Cache', 'gunicorn', 'futures', 'requests', 'urllib3', 'rdflib==4.2.0',
'python-dateutil', 'pyld', 'rdflib-jsonld', 'shortuuid', 'wikipedia==1.4.0'],
classifiers=[],
package_dir={'kg_search': 'kg_search'},
package_data={'kg_search': ['metadata.json']},
scripts=['kg-search']
)
| 39.571429 | 108 | 0.581743 | [
"Apache-2.0"
] | fserena/kg-search | setup.py | 1,939 | Python |
"""
The primary specified return wavefunction quantities.
"""
result_wavefunction = {}
# Orbitals
result_wavefunction["orbitals_a"] = {
"type": "string",
"description": "Alpha-spin orbitals in the AO basis of the primary return. "
}
result_wavefunction["orbitals_b"] = {
"type": "string",
"description": "Beta-spin orbitals in the AO basis of the primary return."
}
# Density
result_wavefunction["density_a"] = {
"type": "string",
"description": "Alpha-spin density in the AO basis of the primary return."
}
result_wavefunction["density_b"] = {
"type": "string",
"description": "Beta-spin density in the AO basis of the primary return."
}
# Fock matrix
result_wavefunction["fock_a"] = {
"type": "string",
"description": "Alpha-spin Fock matrix in the AO basis of the primary return."
}
result_wavefunction["fock_b"] = {
"type": "string",
"description": "Beta-spin Fock matrix in the AO basis of the primary return."
}
# Eigenvalues
result_wavefunction["eigenvalues_a"] = {
"type": "string",
"description": "Alpha-spin orbital eigenvalues of the primary return."
}
result_wavefunction["eigenvalues_b"] = {
"type": "string",
"description": "Beta-spin orbital eigenvalues of the primary return."
}
# Occupations
result_wavefunction["occupations_a"] = {
"type": "string",
"description": "Alpha-spin orbital occupations of the primary return."
}
result_wavefunction["occupations_b"] = {
"type": "string",
"description": "Beta-spin orbital occupations of the primary return."
}
| 22.782609 | 82 | 0.683206 | [
"BSD-3-Clause"
] | MolSSI/QCSchema | qcschema/dev/wavefunction/result_wavefunction.py | 1,572 | Python |
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import uuid
from os_vif import objects as osv_objects
from oslo_config import cfg
from kuryr_kubernetes.cni.binding import base
from kuryr_kubernetes import objects
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests import fake
CONF = cfg.CONF
class TestDriverMixin(test_base.TestCase):
def setUp(self):
super(TestDriverMixin, self).setUp()
self.instance_info = osv_objects.instance_info.InstanceInfo(
uuid=uuid.uuid4(), name='foo')
self.ifname = 'c_interface'
self.netns = '/proc/netns/1234'
# Mock IPDB context managers
self.ipdbs = {}
self.m_bridge_iface = mock.Mock(__exit__=mock.Mock(return_value=None))
self.m_c_iface = mock.Mock()
self.m_h_iface = mock.Mock()
self.h_ipdb, self.h_ipdb_exit = self._mock_ipdb_context_manager(None)
self.c_ipdb, self.c_ipdb_exit = self._mock_ipdb_context_manager(
self.netns)
self.m_create = mock.Mock()
self.h_ipdb.create = mock.Mock(
return_value=mock.Mock(
__enter__=mock.Mock(return_value=self.m_create),
__exit__=mock.Mock(return_value=None)))
self.c_ipdb.create = mock.Mock(
return_value=mock.Mock(
__enter__=mock.Mock(return_value=self.m_create),
__exit__=mock.Mock(return_value=None)))
def _mock_ipdb_context_manager(self, netns):
mock_ipdb = mock.Mock(
interfaces={
'bridge': mock.Mock(
__enter__=mock.Mock(return_value=self.m_bridge_iface),
__exit__=mock.Mock(return_value=None),
),
'c_interface': mock.Mock(
__enter__=mock.Mock(return_value=self.m_c_iface),
__exit__=mock.Mock(return_value=None),
),
'h_interface': mock.Mock(
__enter__=mock.Mock(return_value=self.m_h_iface),
__exit__=mock.Mock(return_value=None),
),
}
)
mock_exit = mock.Mock(return_value=None)
mock_ipdb.__exit__ = mock_exit
mock_ipdb.__enter__ = mock.Mock(return_value=mock_ipdb)
self.ipdbs[netns] = mock_ipdb
return mock_ipdb, mock_exit
@mock.patch('kuryr_kubernetes.cni.binding.base.get_ipdb')
@mock.patch('os_vif.plug')
def _test_connect(self, m_vif_plug, m_get_ipdb, report=None):
def get_ipdb(netns=None):
return self.ipdbs[netns]
m_get_ipdb.side_effect = get_ipdb
base.connect(self.vif, self.instance_info, self.ifname, self.netns,
report)
m_vif_plug.assert_called_once_with(self.vif, self.instance_info)
self.m_c_iface.add_ip.assert_called_once_with('192.168.0.2/24')
if report:
report.assert_called_once()
@mock.patch('os_vif.unplug')
def _test_disconnect(self, m_vif_unplug, report=None):
base.disconnect(self.vif, self.instance_info, self.ifname, self.netns,
report)
m_vif_unplug.assert_called_once_with(self.vif, self.instance_info)
if report:
report.assert_called_once()
class TestOpenVSwitchDriver(TestDriverMixin, test_base.TestCase):
def setUp(self):
super(TestOpenVSwitchDriver, self).setUp()
self.vif = fake._fake_vif(osv_objects.vif.VIFOpenVSwitch)
@mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.'
'K8sCNIRegistryPlugin.report_drivers_health')
@mock.patch('os.getpid', mock.Mock(return_value=123))
@mock.patch('kuryr_kubernetes.linux_net_utils.create_ovs_vif_port')
def test_connect(self, mock_create_ovs, m_report):
self._test_connect(report=m_report)
self.assertEqual(3, self.h_ipdb_exit.call_count)
self.assertEqual(2, self.c_ipdb_exit.call_count)
self.c_ipdb.create.assert_called_once_with(
ifname=self.ifname, peer='h_interface', kind='veth')
self.assertEqual(1, self.m_create.mtu)
self.assertEqual(str(self.vif.address),
self.m_create.address)
self.m_create.up.assert_called_once_with()
self.assertEqual(123, self.m_h_iface.net_ns_pid)
self.assertEqual(1, self.m_h_iface.mtu)
self.m_h_iface.up.assert_called_once_with()
mock_create_ovs.assert_called_once_with(
'bridge', 'h_interface', '89eccd45-43e9-43d8-b4cc-4c13db13f782',
'3e:94:b7:31:a0:83', 'kuryr')
@mock.patch('kuryr_kubernetes.cni.plugins.k8s_cni_registry.'
'K8sCNIRegistryPlugin.report_drivers_health')
@mock.patch('kuryr_kubernetes.linux_net_utils.delete_ovs_vif_port')
def test_disconnect(self, mock_delete_ovs, m_report):
self._test_disconnect(report=m_report)
mock_delete_ovs.assert_called_once_with('bridge', 'h_interface')
class TestBridgeDriver(TestDriverMixin, test_base.TestCase):
def setUp(self):
super(TestBridgeDriver, self).setUp()
self.vif = fake._fake_vif(osv_objects.vif.VIFBridge)
@mock.patch('os.getpid', mock.Mock(return_value=123))
def test_connect(self):
self._test_connect()
self.m_h_iface.remove.assert_called_once_with()
self.assertEqual(3, self.h_ipdb_exit.call_count)
self.assertEqual(2, self.c_ipdb_exit.call_count)
self.c_ipdb.create.assert_called_once_with(
ifname=self.ifname, peer='h_interface', kind='veth')
self.assertEqual(1, self.m_create.mtu)
self.assertEqual(str(self.vif.address),
self.m_create.address)
self.m_create.up.assert_called_once_with()
self.assertEqual(123, self.m_h_iface.net_ns_pid)
self.assertEqual(1, self.m_h_iface.mtu)
self.m_h_iface.up.assert_called_once_with()
self.m_bridge_iface.add_port.assert_called_once_with('h_interface')
def test_disconnect(self):
self._test_disconnect()
class TestNestedVlanDriver(TestDriverMixin, test_base.TestCase):
def setUp(self):
super(TestNestedVlanDriver, self).setUp()
self.vif = fake._fake_vif(objects.vif.VIFVlanNested)
self.vif.vlan_id = 7
CONF.set_override('link_iface', 'bridge', group='binding')
self.addCleanup(CONF.clear_override, 'link_iface', group='binding')
def test_connect(self):
self._test_connect()
self.assertEqual(1, self.h_ipdb_exit.call_count)
self.assertEqual(2, self.c_ipdb_exit.call_count)
self.assertEqual(self.ifname, self.m_h_iface.ifname)
self.assertEqual(1, self.m_h_iface.mtu)
self.assertEqual(str(self.vif.address), self.m_h_iface.address)
self.m_h_iface.up.assert_called_once_with()
def test_disconnect(self):
self._test_disconnect()
class TestNestedMacvlanDriver(TestDriverMixin, test_base.TestCase):
def setUp(self):
super(TestNestedMacvlanDriver, self).setUp()
self.vif = fake._fake_vif(objects.vif.VIFMacvlanNested)
CONF.set_override('link_iface', 'bridge', group='binding')
self.addCleanup(CONF.clear_override, 'link_iface', group='binding')
def test_connect(self):
self._test_connect()
self.assertEqual(1, self.h_ipdb_exit.call_count)
self.assertEqual(2, self.c_ipdb_exit.call_count)
self.assertEqual(self.ifname, self.m_h_iface.ifname)
self.assertEqual(1, self.m_h_iface.mtu)
self.assertEqual(str(self.vif.address), self.m_h_iface.address)
self.m_h_iface.up.assert_called_once_with()
def test_disconnect(self):
self._test_disconnect()
class TestSriovDriver(TestDriverMixin, test_base.TestCase):
def setUp(self):
super(TestSriovDriver, self).setUp()
self.vif = fake._fake_vif(objects.vif.VIFSriov)
self.vif.physnet = 'test_physnet'
@mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.'
'_get_host_pf_names')
@mock.patch('kuryr_kubernetes.cni.binding.sriov.VIFSriovDriver.'
'_get_available_vf_info')
def test_connect(self, m_avail_vf_info, m_host_pf_names):
m_avail_vf_info.return_value = [self.ifname, 1, 'h_interface']
m_host_pf_names.return_value = 'h_interface'
self._test_connect()
self.assertEqual(self.ifname, self.m_c_iface.ifname)
self.assertEqual(1, self.m_c_iface.mtu)
self.assertEqual(str(self.vif.address), self.m_c_iface.address)
self.m_c_iface.up.assert_called_once_with()
def test_disconnect(self):
self._test_disconnect()
| 39.688034 | 78 | 0.676214 | [
"Apache-2.0"
] | dulek/kuryr-kubernetes | kuryr_kubernetes/tests/unit/cni/test_binding.py | 9,287 | Python |
# -*- coding: utf-8 -*- #
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command for creating VM instances running Docker images."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.compute import base_classes
from googlecloudsdk.api_lib.compute import containers_utils
from googlecloudsdk.api_lib.compute import image_utils
from googlecloudsdk.api_lib.compute import instance_utils
from googlecloudsdk.api_lib.compute import metadata_utils
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions
from googlecloudsdk.command_lib.compute import completers
from googlecloudsdk.command_lib.compute.instances import flags as instances_flags
from googlecloudsdk.command_lib.util.args import labels_util
from googlecloudsdk.core import log
from six.moves import zip
def _Args(parser, deprecate_maintenance_policy=False,
container_mount_enabled=False):
"""Add flags shared by all release tracks."""
parser.display_info.AddFormat(instances_flags.DEFAULT_LIST_FORMAT)
metadata_utils.AddMetadataArgs(parser)
instances_flags.AddDiskArgs(
parser, True, container_mount_enabled=container_mount_enabled)
instances_flags.AddCreateDiskArgs(
parser, container_mount_enabled=container_mount_enabled)
instances_flags.AddCanIpForwardArgs(parser)
instances_flags.AddAddressArgs(parser, instances=True)
instances_flags.AddMachineTypeArgs(parser)
instances_flags.AddMaintenancePolicyArgs(
parser, deprecate=deprecate_maintenance_policy)
instances_flags.AddNoRestartOnFailureArgs(parser)
instances_flags.AddPreemptibleVmArgs(parser)
instances_flags.AddServiceAccountAndScopeArgs(parser, False)
instances_flags.AddTagsArgs(parser)
instances_flags.AddCustomMachineTypeArgs(parser)
instances_flags.AddNetworkArgs(parser)
instances_flags.AddPrivateNetworkIpArgs(parser)
instances_flags.AddKonletArgs(parser)
instances_flags.AddPublicDnsArgs(parser, instance=True)
instances_flags.AddPublicPtrArgs(parser, instance=True)
instances_flags.AddImageArgs(parser)
labels_util.AddCreateLabelsFlags(parser)
parser.add_argument(
'--description',
help='Specifies a textual description of the instances.')
instances_flags.INSTANCES_ARG.AddArgument(parser, operation_type='create')
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE = (
instances_flags.MakeSourceInstanceTemplateArg())
CreateWithContainer.SOURCE_INSTANCE_TEMPLATE.AddArgument(parser)
parser.display_info.AddCacheUpdater(completers.InstancesCompleter)
@base.ReleaseTracks(base.ReleaseTrack.GA)
class CreateWithContainer(base.CreateCommand):
"""Command for creating VM instances running container images."""
@staticmethod
def Args(parser):
"""Register parser args."""
_Args(parser)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.GA)
def _ValidateArgs(self, args):
instances_flags.ValidateNicFlags(args)
instances_flags.ValidateNetworkTierArgs(args)
instances_flags.ValidateKonletArgs(args)
instances_flags.ValidateDiskCommonFlags(args)
instances_flags.ValidateServiceAccountAndScopeArgs(args)
if instance_utils.UseExistingBootDisk(args.disk or []):
raise exceptions.InvalidArgumentException(
'--disk',
'Boot disk specified for containerized VM.')
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfaces(args, client, holder,
instance_refs, skip_defaults)
def GetNetworkInterfaces(
self, args, resources, client, holder, instance_refs, skip_defaults):
if args.network_interface:
return instance_utils.CreateNetworkInterfaceMessages(
resources=resources,
compute_client=client,
network_interface_arg=args.network_interface,
instance_refs=instance_refs)
return self._GetNetworkInterfaces(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.BETA)
class CreateWithContainerBeta(CreateWithContainer):
"""Command for creating VM instances running container images."""
@staticmethod
def Args(parser):
"""Register parser args."""
_Args(parser, container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)
def _ValidateArgs(self, args):
instances_flags.ValidateLocalSsdFlags(args)
super(CreateWithContainerBeta, self)._ValidateArgs(args)
def GetImageUri(self, args, client, holder, instance_refs):
if (args.IsSpecified('image') or args.IsSpecified('image_family') or
args.IsSpecified('image_project')):
image_expander = image_utils.ImageExpander(client, holder.resources)
image_uri, _ = image_expander.ExpandImageFlag(
user_project=instance_refs[0].project,
image=args.image,
image_family=args.image_family,
image_project=args.image_project)
if holder.resources.Parse(image_uri).project != 'cos-cloud':
log.warning('This container deployment mechanism requires a '
'Container-Optimized OS image in order to work. Select an '
'image from a cos-cloud project (cost-stable, cos-beta, '
'cos-dev image families).')
else:
image_uri = containers_utils.ExpandKonletCosImageFlag(client)
return image_uri
def Run(self, args):
self._ValidateArgs(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
@base.ReleaseTracks(base.ReleaseTrack.ALPHA)
class CreateWithContainerAlpha(CreateWithContainerBeta):
"""Alpha version of compute instances create-with-container command."""
@staticmethod
def Args(parser):
_Args(parser, deprecate_maintenance_policy=True,
container_mount_enabled=True)
instances_flags.AddNetworkTierArgs(parser, instance=True)
instances_flags.AddContainerMountDiskFlag(parser)
instances_flags.AddLocalSsdArgsWithSize(parser)
instances_flags.AddLocalNvdimmArgs(parser)
instances_flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.ALPHA)
def _GetNetworkInterfaces(
self, args, client, holder, instance_refs, skip_defaults):
return instance_utils.GetNetworkInterfacesAlpha(
args, client, holder, instance_refs, skip_defaults)
def Run(self, args):
self._ValidateArgs(args)
instances_flags.ValidatePublicDnsFlags(args)
instances_flags.ValidatePublicPtrFlags(args)
holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
holder,
args.container_mount_disk,
args.disk,
args.create_disk)
client = holder.client
source_instance_template = instance_utils.GetSourceInstanceTemplate(
args, holder.resources, self.SOURCE_INSTANCE_TEMPLATE)
skip_defaults = instance_utils.GetSkipDefaults(source_instance_template)
scheduling = instance_utils.GetScheduling(args, client, skip_defaults)
service_accounts = instance_utils.GetServiceAccounts(
args, client, skip_defaults)
user_metadata = instance_utils.GetValidatedMetadata(args, client)
boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)
instance_refs = instance_utils.GetInstanceRefs(args, client, holder)
network_interfaces = self.GetNetworkInterfaces(
args, holder.resources, client, holder, instance_refs, skip_defaults)
machine_type_uris = instance_utils.GetMachineTypeUris(
args, client, holder, instance_refs, skip_defaults)
image_uri = self.GetImageUri(args, client, holder, instance_refs)
labels = containers_utils.GetLabelsMessageWithCosVersion(
args.labels, image_uri, holder.resources, client.messages.Instance)
can_ip_forward = instance_utils.GetCanIpForward(args, skip_defaults)
tags = containers_utils.CreateTagsMessage(client.messages, args.tags)
requests = []
for instance_ref, machine_type_uri in zip(instance_refs, machine_type_uris):
metadata = containers_utils.CreateKonletMetadataMessage(
client.messages, args, instance_ref.Name(), user_metadata,
container_mount_disk_enabled=True,
container_mount_disk=container_mount_disk)
disks = instance_utils.CreateDiskMessages(
holder, args, boot_disk_size_gb, image_uri, instance_ref,
skip_defaults, match_container_mount_disks=True)
request = client.messages.ComputeInstancesInsertRequest(
instance=client.messages.Instance(
canIpForward=can_ip_forward,
disks=disks,
description=args.description,
labels=labels,
machineType=machine_type_uri,
metadata=metadata,
minCpuPlatform=args.min_cpu_platform,
name=instance_ref.Name(),
networkInterfaces=network_interfaces,
serviceAccounts=service_accounts,
scheduling=scheduling,
tags=tags),
sourceInstanceTemplate=source_instance_template,
project=instance_ref.project,
zone=instance_ref.zone)
requests.append((client.apitools_client.instances,
'Insert', request))
return client.MakeRequests(requests)
CreateWithContainer.detailed_help = {
'brief':
"""\
Creates Google Compute engine virtual machine instances running
container images.
""",
'DESCRIPTION':
"""\
*{command}* creates Google Compute Engine virtual
machines that runs a Docker image. For example:
$ {command} instance-1 --zone us-central1-a \
--container-image=gcr.io/google-containers/busybox
creates an instance called instance-1, in the us-central1-a zone,
running the 'busybox' image.
For more examples, refer to the *EXAMPLES* section below.
""",
'EXAMPLES':
"""\
To run the gcr.io/google-containers/busybox image on an instance named
'instance-1' that executes 'echo "Hello world"' as a run command, run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox \
--container-command='echo "Hello world"'
To run the gcr.io/google-containers/busybox image in privileged mode,
run:
$ {command} instance-1 \
--container-image=gcr.io/google-containers/busybox
--container-privileged
"""
}
| 43.813433 | 81 | 0.732925 | [
"Apache-2.0"
] | bshaffer/google-cloud-sdk | lib/surface/compute/instances/create_with_container.py | 17,613 | Python |
"""Utility functions and classes
"""
import sys
import inspect
import warnings
import importlib.util
from enum import Enum
from pathlib import Path
from weakref import WeakSet
from collections import namedtuple
from functools import partial, wraps
from types import ModuleType, MethodType
from typing import Union, Callable, Optional, Mapping, Any, Dict, Tuple
import numpy as np
from numpy import random
from scipy import sparse
from anndata import AnnData, __version__ as anndata_version
from textwrap import dedent
from packaging import version
from ._settings import settings
from ._compat import Literal
from . import logging as logg
class Empty(Enum):
token = 0
_empty = Empty.token
# e.g. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html
AnyRandom = Union[None, int, random.RandomState] # maybe in the future random.Generator
EPS = 1e-15
def check_versions():
from ._compat import pkg_version
umap_version = pkg_version("umap-learn")
if version.parse(anndata_version) < version.parse('0.6.10'):
from . import __version__
raise ImportError(
f'Scanpy {__version__} needs anndata version >=0.6.10, '
f'not {anndata_version}.\nRun `pip install anndata -U --no-deps`.'
)
if umap_version < version.parse('0.3.0'):
from . import __version__
# make this a warning, not an error
# it might be useful for people to still be able to run it
logg.warning(
f'Scanpy {__version__} needs umap ' f'version >=0.3.0, not {umap_version}.'
)
def getdoc(c_or_f: Union[Callable, type]) -> Optional[str]:
if getattr(c_or_f, '__doc__', None) is None:
return None
doc = inspect.getdoc(c_or_f)
if isinstance(c_or_f, type) and hasattr(c_or_f, '__init__'):
sig = inspect.signature(c_or_f.__init__)
else:
sig = inspect.signature(c_or_f)
def type_doc(name: str):
param: inspect.Parameter = sig.parameters[name]
cls = getattr(param.annotation, '__qualname__', repr(param.annotation))
if param.default is not param.empty:
return f'{cls}, optional (default: {param.default!r})'
else:
return cls
return '\n'.join(
f'{line} : {type_doc(line)}' if line.strip() in sig.parameters else line
for line in doc.split('\n')
)
def deprecated_arg_names(arg_mapping: Mapping[str, str]):
"""
Decorator which marks a functions keyword arguments as deprecated. It will
result in a warning being emitted when the deprecated keyword argument is
used, and the function being called with the new argument.
Parameters
----------
arg_mapping
Mapping from deprecated argument name to current argument name.
"""
def decorator(func):
@wraps(func)
def func_wrapper(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter
for old, new in arg_mapping.items():
if old in kwargs:
warnings.warn(
f"Keyword argument '{old}' has been "
f"deprecated in favour of '{new}'. "
f"'{old}' will be removed in a future version.",
category=DeprecationWarning,
stacklevel=2,
)
val = kwargs.pop(old)
kwargs[new] = val
# reset filter
warnings.simplefilter('default', DeprecationWarning)
return func(*args, **kwargs)
return func_wrapper
return decorator
def _one_of_ours(obj, root: str):
return (
hasattr(obj, "__name__")
and not obj.__name__.split(".")[-1].startswith("_")
and getattr(
obj, '__module__', getattr(obj, '__qualname__', obj.__name__)
).startswith(root)
)
def descend_classes_and_funcs(mod: ModuleType, root: str, encountered=None):
if encountered is None:
encountered = WeakSet()
for obj in vars(mod).values():
if not _one_of_ours(obj, root):
continue
if callable(obj) and not isinstance(obj, MethodType):
yield obj
if isinstance(obj, type):
for m in vars(obj).values():
if callable(m) and _one_of_ours(m, root):
yield m
elif isinstance(obj, ModuleType) and obj not in encountered:
if obj.__name__.startswith('scanpy.tests'):
# Python’s import mechanism seems to add this to `scanpy`’s attributes
continue
encountered.add(obj)
yield from descend_classes_and_funcs(obj, root, encountered)
def annotate_doc_types(mod: ModuleType, root: str):
for c_or_f in descend_classes_and_funcs(mod, root):
c_or_f.getdoc = partial(getdoc, c_or_f)
def _doc_params(**kwds):
"""\
Docstrings should start with "\" in the first line for proper formatting.
"""
def dec(obj):
obj.__orig_doc__ = obj.__doc__
obj.__doc__ = dedent(obj.__doc__).format_map(kwds)
return obj
return dec
def _check_array_function_arguments(**kwargs):
"""Checks for invalid arguments when an array is passed.
Helper for functions that work on either AnnData objects or array-likes.
"""
# TODO: Figure out a better solution for documenting dispatched functions
invalid_args = [k for k, v in kwargs.items() if v is not None]
if len(invalid_args) > 0:
raise TypeError(
f"Arguments {invalid_args} are only valid if an AnnData object is passed."
)
def _check_use_raw(adata: AnnData, use_raw: Union[None, bool]) -> bool:
"""
Normalize checking `use_raw`.
My intentention here is to also provide a single place to throw a deprecation warning from in future.
"""
if use_raw is not None:
return use_raw
else:
if adata.raw is not None:
return True
else:
return False
# --------------------------------------------------------------------------------
# Graph stuff
# --------------------------------------------------------------------------------
def get_igraph_from_adjacency(adjacency, directed=None):
"""Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(
f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.'
)
return g
def get_sparse_from_igraph(graph, weight_attr=None):
from scipy.sparse import csr_matrix
edges = graph.get_edgelist()
if weight_attr is None:
weights = [1] * len(edges)
else:
weights = graph.es[weight_attr]
if not graph.is_directed():
edges.extend([(v, u) for u, v in edges])
weights.extend(weights)
shape = graph.vcount()
shape = (shape, shape)
if len(edges) > 0:
return csr_matrix((weights, zip(*edges)), shape=shape)
else:
return csr_matrix(shape)
# --------------------------------------------------------------------------------
# Group stuff
# --------------------------------------------------------------------------------
def compute_association_matrix_of_groups(
adata: AnnData,
prediction: str,
reference: str,
normalization: Literal['prediction', 'reference'] = 'prediction',
threshold: float = 0.01,
max_n_names: Optional[int] = 2,
):
"""Compute overlaps between groups.
See ``identify_groups`` for identifying the groups.
Parameters
----------
adata
prediction
Field name of adata.obs.
reference
Field name of adata.obs.
normalization
Whether to normalize with respect to the predicted groups or the
reference groups.
threshold
Do not consider associations whose overlap is below this fraction.
max_n_names
Control how many reference names you want to be associated with per
predicted name. Set to `None`, if you want all.
Returns
-------
asso_names
List of associated reference names
(`max_n_names` for each predicted name).
asso_matrix
Matrix where rows correspond to the predicted labels and columns to the
reference labels, entries are proportional to degree of association.
"""
if normalization not in {'prediction', 'reference'}:
raise ValueError(
'`normalization` needs to be either "prediction" or "reference".'
)
sanitize_anndata(adata)
cats = adata.obs[reference].cat.categories
for cat in cats:
if cat in settings.categories_to_ignore:
logg.info(
f'Ignoring category {cat!r} '
'as it’s in `settings.categories_to_ignore`.'
)
asso_names = []
asso_matrix = []
for ipred_group, pred_group in enumerate(adata.obs[prediction].cat.categories):
if '?' in pred_group:
pred_group = str(ipred_group)
# starting from numpy version 1.13, subtractions of boolean arrays are deprecated
mask_pred = adata.obs[prediction].values == pred_group
mask_pred_int = mask_pred.astype(np.int8)
asso_matrix += [[]]
for ref_group in adata.obs[reference].cat.categories:
mask_ref = (adata.obs[reference].values == ref_group).astype(np.int8)
mask_ref_or_pred = mask_ref.copy()
mask_ref_or_pred[mask_pred] = 1
# e.g. if the pred group is contained in mask_ref, mask_ref and
# mask_ref_or_pred are the same
if normalization == 'prediction':
# compute which fraction of the predicted group is contained in
# the ref group
ratio_contained = (
np.sum(mask_pred_int) - np.sum(mask_ref_or_pred - mask_ref)
) / np.sum(mask_pred_int)
else:
# compute which fraction of the reference group is contained in
# the predicted group
ratio_contained = (
np.sum(mask_ref) - np.sum(mask_ref_or_pred - mask_pred_int)
) / np.sum(mask_ref)
asso_matrix[-1] += [ratio_contained]
name_list_pred = [
cats[i] if cats[i] not in settings.categories_to_ignore else ''
for i in np.argsort(asso_matrix[-1])[::-1]
if asso_matrix[-1][i] > threshold
]
asso_names += ['\n'.join(name_list_pred[:max_n_names])]
Result = namedtuple(
'compute_association_matrix_of_groups', ['asso_names', 'asso_matrix']
)
return Result(asso_names=asso_names, asso_matrix=np.array(asso_matrix))
def get_associated_colors_of_groups(reference_colors, asso_matrix):
return [
{
reference_colors[i_ref]: asso_matrix[i_pred, i_ref]
for i_ref in range(asso_matrix.shape[1])
}
for i_pred in range(asso_matrix.shape[0])
]
def identify_groups(ref_labels, pred_labels, return_overlaps=False):
"""Which predicted label explains which reference label?
A predicted label explains the reference label which maximizes the minimum
of ``relative_overlaps_pred`` and ``relative_overlaps_ref``.
Compare this with ``compute_association_matrix_of_groups``.
Returns
-------
A dictionary of length ``len(np.unique(ref_labels))`` that stores for each
reference label the predicted label that best explains it.
If ``return_overlaps`` is ``True``, this will in addition return the overlap
of the reference group with the predicted group; normalized with respect to
the reference group size and the predicted group size, respectively.
"""
ref_unique, ref_counts = np.unique(ref_labels, return_counts=True)
ref_dict = dict(zip(ref_unique, ref_counts))
pred_unique, pred_counts = np.unique(pred_labels, return_counts=True)
pred_dict = dict(zip(pred_unique, pred_counts))
associated_predictions = {}
associated_overlaps = {}
for ref_label in ref_unique:
sub_pred_unique, sub_pred_counts = np.unique(
pred_labels[ref_label == ref_labels], return_counts=True
)
relative_overlaps_pred = [
sub_pred_counts[i] / pred_dict[n] for i, n in enumerate(sub_pred_unique)
]
relative_overlaps_ref = [
sub_pred_counts[i] / ref_dict[ref_label]
for i, n in enumerate(sub_pred_unique)
]
relative_overlaps = np.c_[relative_overlaps_pred, relative_overlaps_ref]
relative_overlaps_min = np.min(relative_overlaps, axis=1)
pred_best_index = np.argsort(relative_overlaps_min)[::-1]
associated_predictions[ref_label] = sub_pred_unique[pred_best_index]
associated_overlaps[ref_label] = relative_overlaps[pred_best_index]
if return_overlaps:
return associated_predictions, associated_overlaps
else:
return associated_predictions
# --------------------------------------------------------------------------------
# Other stuff
# --------------------------------------------------------------------------------
# backwards compat... remove this in the future
def sanitize_anndata(adata):
"""Transform string annotations to categoricals."""
adata._sanitize()
def view_to_actual(adata):
if adata.is_view:
warnings.warn(
"Revieved a view of an AnnData. Making a copy.",
stacklevel=2,
)
adata._init_as_actual(adata.copy())
def moving_average(a: np.ndarray, n: int):
"""Moving average over one-dimensional array.
Parameters
----------
a
One-dimensional array.
n
Number of entries to average over. n=2 means averaging over the currrent
the previous entry.
Returns
-------
An array view storing the moving average.
"""
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
# --------------------------------------------------------------------------------
# Deal with tool parameters
# --------------------------------------------------------------------------------
def update_params(
old_params: Mapping[str, Any],
new_params: Mapping[str, Any],
check=False,
) -> Dict[str, Any]:
"""\
Update old_params with new_params.
If check==False, this merely adds and overwrites the content of old_params.
If check==True, this only allows updating of parameters that are already
present in old_params.
Parameters
----------
old_params
new_params
check
Returns
-------
updated_params
"""
updated_params = dict(old_params)
if new_params: # allow for new_params to be None
for key, val in new_params.items():
if key not in old_params and check:
raise ValueError(
'\''
+ key
+ '\' is not a valid parameter key, '
+ 'consider one of \n'
+ str(list(old_params.keys()))
)
if val is not None:
updated_params[key] = val
return updated_params
# --------------------------------------------------------------------------------
# Others
# --------------------------------------------------------------------------------
def check_nonnegative_integers(X: Union[np.ndarray, sparse.spmatrix]):
"""Checks values of X to ensure it is count data"""
from numbers import Integral
data = X if isinstance(X, np.ndarray) else X.data
# Check no negatives
if np.signbit(data).any():
return False
# Check all are integers
elif issubclass(data.dtype.type, Integral):
return True
elif np.any(~np.equal(np.mod(data, 1), 0)):
return False
else:
return True
def select_groups(adata, groups_order_subset='all', key='groups'):
"""Get subset of groups in adata.obs[key]."""
groups_order = adata.obs[key].cat.categories
if key + '_masks' in adata.uns:
groups_masks = adata.uns[key + '_masks']
else:
groups_masks = np.zeros(
(len(adata.obs[key].cat.categories), adata.obs[key].values.size), dtype=bool
)
for iname, name in enumerate(adata.obs[key].cat.categories):
# if the name is not found, fallback to index retrieval
if adata.obs[key].cat.categories[iname] in adata.obs[key].values:
mask = adata.obs[key].cat.categories[iname] == adata.obs[key].values
else:
mask = str(iname) == adata.obs[key].values
groups_masks[iname] = mask
groups_ids = list(range(len(groups_order)))
if groups_order_subset != 'all':
groups_ids = []
for name in groups_order_subset:
groups_ids.append(
np.where(adata.obs[key].cat.categories.values == name)[0][0]
)
if len(groups_ids) == 0:
# fallback to index retrieval
groups_ids = np.where(
np.in1d(
np.arange(len(adata.obs[key].cat.categories)).astype(str),
np.array(groups_order_subset),
)
)[0]
if len(groups_ids) == 0:
logg.debug(
f'{np.array(groups_order_subset)} invalid! specify valid '
f'groups_order (or indices) from {adata.obs[key].cat.categories}',
)
from sys import exit
exit(0)
groups_masks = groups_masks[groups_ids]
groups_order_subset = adata.obs[key].cat.categories[groups_ids].values
else:
groups_order_subset = groups_order.values
return groups_order_subset, groups_masks
def warn_with_traceback(message, category, filename, lineno, file=None, line=None):
"""Get full tracebacks when warning is raised by setting
warnings.showwarning = warn_with_traceback
See also
--------
http://stackoverflow.com/questions/22373927/get-traceback-of-warnings
"""
import traceback
traceback.print_stack()
log = file if hasattr(file, 'write') else sys.stderr
settings.write(warnings.formatwarning(message, category, filename, lineno, line))
def subsample(
X: np.ndarray,
subsample: int = 1,
seed: int = 0,
) -> Tuple[np.ndarray, np.ndarray]:
"""\
Subsample a fraction of 1/subsample samples from the rows of X.
Parameters
----------
X
Data array.
subsample
1/subsample is the fraction of data sampled, n = X.shape[0]/subsample.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if subsample == 1 and seed == 0:
return X, np.arange(X.shape[0], dtype=int)
if seed == 0:
# this sequence is defined simply by skipping rows
# is faster than sampling
rows = np.arange(0, X.shape[0], subsample, dtype=int)
n = rows.size
Xsampled = np.array(X[rows])
else:
if seed < 0:
raise ValueError(f'Invalid seed value < 0: {seed}')
n = int(X.shape[0] / subsample)
np.random.seed(seed)
Xsampled, rows = subsample_n(X, n=n)
logg.debug(f'... subsampled to {n} of {X.shape[0]} data points')
return Xsampled, rows
def subsample_n(
X: np.ndarray, n: int = 0, seed: int = 0
) -> Tuple[np.ndarray, np.ndarray]:
"""Subsample n samples from rows of array.
Parameters
----------
X
Data array.
n
Sample size.
seed
Seed for sampling.
Returns
-------
Xsampled
Subsampled X.
rows
Indices of rows that are stored in Xsampled.
"""
if n < 0:
raise ValueError('n must be greater 0')
np.random.seed(seed)
n = X.shape[0] if (n == 0 or n > X.shape[0]) else n
rows = np.random.choice(X.shape[0], size=n, replace=False)
Xsampled = X[rows]
return Xsampled, rows
def check_presence_download(filename: Path, backup_url):
"""Check if file is present otherwise download."""
if not filename.is_file():
from .readwrite import _download
_download(backup_url, filename)
def lazy_import(full_name):
"""Imports a module in a way that it’s only executed on member access"""
try:
return sys.modules[full_name]
except KeyError:
spec = importlib.util.find_spec(full_name)
module = importlib.util.module_from_spec(spec)
loader = importlib.util.LazyLoader(spec.loader)
# Make module with proper locking and get it inserted into sys.modules.
loader.exec_module(module)
return module
# --------------------------------------------------------------------------------
# Neighbors
# --------------------------------------------------------------------------------
def _fallback_to_uns(dct, conns, dists, conns_key, dists_key):
if conns is None and conns_key in dct:
conns = dct[conns_key]
if dists is None and dists_key in dct:
dists = dct[dists_key]
return conns, dists
class NeighborsView:
"""Convenience class for accessing neighbors graph representations.
Allows to access neighbors distances, connectivities and settings
dictionary in a uniform manner.
Parameters
----------
adata
AnnData object.
key
This defines where to look for neighbors dictionary,
connectivities, distances.
neigh = NeighborsView(adata, key)
neigh['distances']
neigh['connectivities']
neigh['params']
'connectivities' in neigh
'params' in neigh
is the same as
adata.obsp[adata.uns[key]['distances_key']]
adata.obsp[adata.uns[key]['connectivities_key']]
adata.uns[key]['params']
adata.uns[key]['connectivities_key'] in adata.obsp
'params' in adata.uns[key]
"""
def __init__(self, adata, key=None):
self._connectivities = None
self._distances = None
if key is None or key == 'neighbors':
if 'neighbors' not in adata.uns:
raise KeyError('No "neighbors" in .uns')
self._neighbors_dict = adata.uns['neighbors']
self._conns_key = 'connectivities'
self._dists_key = 'distances'
else:
if key not in adata.uns:
raise KeyError(f'No "{key}" in .uns')
self._neighbors_dict = adata.uns[key]
self._conns_key = self._neighbors_dict['connectivities_key']
self._dists_key = self._neighbors_dict['distances_key']
if self._conns_key in adata.obsp:
self._connectivities = adata.obsp[self._conns_key]
if self._dists_key in adata.obsp:
self._distances = adata.obsp[self._dists_key]
# fallback to uns
self._connectivities, self._distances = _fallback_to_uns(
self._neighbors_dict,
self._connectivities,
self._distances,
self._conns_key,
self._dists_key,
)
def __getitem__(self, key):
if key == 'distances':
if 'distances' not in self:
raise KeyError(f'No "{self._dists_key}" in .obsp')
return self._distances
elif key == 'connectivities':
if 'connectivities' not in self:
raise KeyError(f'No "{self._conns_key}" in .obsp')
return self._connectivities
else:
return self._neighbors_dict[key]
def __contains__(self, key):
if key == 'distances':
return self._distances is not None
elif key == 'connectivities':
return self._connectivities is not None
else:
return key in self._neighbors_dict
def _choose_graph(adata, obsp, neighbors_key):
"""Choose connectivities from neighbbors or another obsp column"""
if obsp is not None and neighbors_key is not None:
raise ValueError(
'You can\'t specify both obsp, neighbors_key. ' 'Please select only one.'
)
if obsp is not None:
return adata.obsp[obsp]
else:
neighbors = NeighborsView(adata, neighbors_key)
if 'connectivities' not in neighbors:
raise ValueError(
'You need to run `pp.neighbors` first '
'to compute a neighborhood graph.'
)
return neighbors['connectivities']
| 32.636719 | 105 | 0.596848 | [
"BSD-3-Clause"
] | VolkerBergen/scanpy | scanpy/_utils.py | 25,073 | Python |
# -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import operator
import pytest
import numpy as np
import pandas as pd
from pandas.compat.numpy import np_datetime64_compat
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning, NullFrequencyError
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
from pandas._libs import tslib
from pandas._libs.tslibs.offsets import shift_months
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexComparisons(object):
@pytest.mark.parametrize('other', [datetime(2016, 1, 1),
Timestamp('2016-01-01'),
np.datetime64('2016-01-01')])
def test_dti_cmp_datetimelike(self, other, tz):
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
elif isinstance(other, Timestamp):
other = other.tz_localize(dti.tzinfo)
else:
other = tslib._localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
def dti_cmp_non_datetime(self, tz):
# GH#19301 by convention datetime.date is not considered comparable
# to Timestamp or DatetimeIndex. This may change in the future.
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
other = datetime(2016, 1, 1).date()
assert not (dti == other).any()
assert (dti != other).all()
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_eq_null_scalar(self, other, tz):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert not (dti == other).any()
@pytest.mark.parametrize('other', [None, np.nan, pd.NaT])
def test_dti_ne_null_scalar(self, other, tz):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
assert (dti != other).all()
@pytest.mark.parametrize('other', [None, np.nan])
def test_dti_cmp_null_scalar_inequality(self, tz, other):
# GH#19301
dti = pd.date_range('2016-01-01', periods=2, tz=tz)
with pytest.raises(TypeError):
dti < other
with pytest.raises(TypeError):
dti <= other
with pytest.raises(TypeError):
dti > other
with pytest.raises(TypeError):
dti >= other
def test_dti_cmp_nat(self):
left = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT,
pd.Timestamp('2011-01-03')])
right = pd.DatetimeIndex([pd.NaT, pd.NaT, pd.Timestamp('2011-01-03')])
for lhs, rhs in [(left, right),
(left.astype(object), right.astype(object))]:
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
didx2 = pd.DatetimeIndex(['2014-02-01', '2014-03-01', pd.NaT, pd.NaT,
'2014-06-01', '2014-07-01'])
darr = np.array([np_datetime64_compat('2014-02-01 00:00Z'),
np_datetime64_compat('2014-03-01 00:00Z'),
np_datetime64_compat('nat'), np.datetime64('nat'),
np_datetime64_compat('2014-06-01 00:00Z'),
np_datetime64_compat('2014-07-01 00:00Z')])
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, pd.NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_comparison_tzawareness_compat(self, op):
# GH#18162
dr = pd.date_range('2016-01-01', periods=6)
dz = dr.tz_localize('US/Pacific')
with pytest.raises(TypeError):
op(dr, dz)
with pytest.raises(TypeError):
op(dr, list(dz))
with pytest.raises(TypeError):
op(dz, dr)
with pytest.raises(TypeError):
op(dz, list(dr))
# Check that there isn't a problem aware-aware and naive-naive do not
# raise
assert (dr == dr).all()
assert (dr == list(dr)).all()
assert (dz == dz).all()
assert (dz == list(dz)).all()
# Check comparisons against scalar Timestamps
ts = pd.Timestamp('2000-03-14 01:59')
ts_tz = pd.Timestamp('2000-03-14 01:59', tz='Europe/Amsterdam')
assert (dr > ts).all()
with pytest.raises(TypeError):
op(dr, ts_tz)
assert (dz > ts_tz).all()
with pytest.raises(TypeError):
op(dz, ts)
@pytest.mark.parametrize('op', [operator.eq, operator.ne,
operator.gt, operator.ge,
operator.lt, operator.le])
def test_nat_comparison_tzawareness(self, op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
dti = pd.DatetimeIndex(['2014-01-01', pd.NaT, '2014-03-01', pd.NaT,
'2014-05-01', '2014-07-01'])
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, pd.NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize('US/Pacific'), pd.NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_int_raises(self):
rng = date_range('1/1/2000', periods=10)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
def test_dti_cmp_list(self):
rng = date_range('1/1/2000', periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# DatetimeIndex.shift is used in integer addition
def test_dti_shift_tzaware(self, tz):
# GH#9903
idx = pd.DatetimeIndex([], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01 11:00'
'2011-01-01 12:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.DatetimeIndex(['2011-01-01 13:00', '2011-01-01 14:00'
'2011-01-01 15:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.DatetimeIndex(['2011-01-01 07:00', '2011-01-01 08:00'
'2011-01-01 09:00'], name='xxx', tz=tz)
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
def test_dti_shift_freqs(self):
# test shift for DatetimeIndex and non DatetimeIndex
# GH#8083
drange = pd.date_range('20130101', periods=5)
result = drange.shift(1)
expected = pd.DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = pd.DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
tm.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = pd.DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
tm.assert_index_equal(result, expected)
def test_dti_shift_int(self):
rng = date_range('1/1/2000', periods=20)
result = rng + 5
expected = rng.shift(5)
tm.assert_index_equal(result, expected)
result = rng - 5
expected = rng.shift(-5)
tm.assert_index_equal(result, expected)
def test_dti_shift_no_freq(self):
# GH#19147
dti = pd.DatetimeIndex(['2011-01-01 10:00', '2011-01-01'], freq=None)
with pytest.raises(NullFrequencyError):
dti.shift(2)
@pytest.mark.parametrize('tzstr', ['US/Eastern', 'dateutil/US/Eastern'])
def test_dti_shift_localized(self, tzstr):
dr = date_range('2011/1/1', '2012/1/1', freq='W-FRI')
dr_tz = dr.tz_localize(tzstr)
result = dr_tz.shift(1, '10T')
assert result.tz == dr_tz.tz
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('freq', [None, 'D'])
def test_sub_period(self, freq):
# GH#13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('names', [('foo', None, None),
('baz', 'bar', None),
('bar', 'bar', 'bar')])
@pytest.mark.parametrize('tz', [None, 'America/Chicago'])
def test_dti_add_series(self, tz, names):
# GH#13905
index = DatetimeIndex(['2016-06-28 05:30', '2016-06-28 05:31'],
tz=tz, name=names[0])
ser = Series([Timedelta(seconds=5)] * 2,
index=index, name=names[1])
expected = Series(index + Timedelta(seconds=5),
index=index, name=names[2])
# passing name arg isn't enough when names[2] is None
expected.name = names[2]
assert expected.dtype == index.dtype
result = ser + index
tm.assert_series_equal(result, expected)
result2 = index + ser
tm.assert_series_equal(result2, expected)
expected = index + Timedelta(seconds=5)
result3 = ser.values + index
tm.assert_index_equal(result3, expected)
result4 = index + ser.values
tm.assert_index_equal(result4, expected)
def test_dti_add_offset_array(self, tz):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_add_offset_index(self, tz, names):
# GH#18849, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
def test_dti_sub_offset_array(self, tz):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_sub_offset_index(self, tz, names):
# GH#18824, GH#19744
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = pd.Index([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=names[2], freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
def test_dti_add_offset_tzaware(self):
dates = date_range('2012-11-01', periods=3, tz='US/Pacific')
offset = dates + pd.offsets.Hour(5)
assert dates[0] + pd.offsets.Hour(5) == offset[0]
# GH#6818
for tz in ['UTC', 'US/Pacific', 'Asia/Tokyo']:
dates = date_range('2010-11-01 00:00', periods=3, tz=tz, freq='H')
expected = DatetimeIndex(['2010-11-01 05:00', '2010-11-01 06:00',
'2010-11-01 07:00'], freq='H', tz=tz)
offset = dates + pd.offsets.Hour(5)
tm.assert_index_equal(offset, expected)
offset = dates + np.timedelta64(5, 'h')
tm.assert_index_equal(offset, expected)
offset = dates + timedelta(hours=5)
tm.assert_index_equal(offset, expected)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_offset_array(klass, assert_func):
# GH#10699
# array of offsets
box = Series if klass is Series else pd.Index
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')])
assert_func(result, exp)
# same offset
result = s + box([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets_relativedelta(klass, assert_func):
# GH#10699
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in vec]), vec + op)
assert_func(klass([x - op for x in vec]), vec - op)
@pytest.mark.parametrize('cls_and_kwargs', [
'YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0, 'startingMonth': 2, 'variation': 'nearest'}),
('WeekOfMonth', {'weekday': 2, 'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})])
@pytest.mark.parametrize('normalize', [True, False])
@pytest.mark.parametrize('klass,assert_func', [
(Series, tm.assert_series_equal),
(DatetimeIndex, tm.assert_index_equal)])
def test_dt64_with_DateOffsets(klass, assert_func, normalize, cls_and_kwargs):
# GH#10699
# assert these are equal on a piecewise basis
vec = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
if isinstance(cls_and_kwargs, tuple):
# If cls_name param is a tuple, then 2nd entry is kwargs for
# the offset constructor
cls_name, kwargs = cls_and_kwargs
else:
cls_name = cls_and_kwargs
kwargs = {}
offset_cls = getattr(pd.offsets, cls_name)
with warnings.catch_warnings(record=True):
for n in [0, 5]:
if (cls_name in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
# passing n = 0 is invalid for these offset classes
continue
offset = offset_cls(n, normalize=normalize, **kwargs)
assert_func(klass([x + offset for x in vec]), vec + offset)
assert_func(klass([x - offset for x in vec]), vec - offset)
assert_func(klass([offset + x for x in vec]), offset + vec)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
@pytest.mark.parametrize('years', [-1, 0, 1])
@pytest.mark.parametrize('months', [-2, 0, 2])
def test_shift_months(years, months):
s = DatetimeIndex([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31')])
actual = DatetimeIndex(shift_months(s.asi8, years * 12 + months))
raw = [x + pd.offsets.DateOffset(years=years, months=months)
for x in s]
expected = DatetimeIndex(raw)
tm.assert_index_equal(actual, expected)
| 40.126904 | 79 | 0.559089 | [
"BSD-3-Clause"
] | wla80/pandas | pandas/tests/indexes/datetimes/test_arithmetic.py | 39,525 | Python |
def user_display_name(user):
"""
Returns the preferred display name for the given user object: the result of
user.get_full_name() if implemented and non-empty, or user.get_username() otherwise.
"""
try:
full_name = user.get_full_name().strip()
if full_name:
return full_name
except AttributeError:
pass
try:
return user.get_username()
except AttributeError:
# we were passed None or something else that isn't a valid user object; return
# empty string to replicate the behaviour of {{ user.get_full_name|default:user.get_username }}
return ''
| 33.894737 | 103 | 0.661491 | [
"BSD-3-Clause"
] | icanbwell/wagtail-review | wagtail_review/text.py | 644 | Python |
#
# Tencent is pleased to support the open source community by making Angel available.
#
# Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific language governing permissions and
#
from enum import Enum, unique
@unique
class RunningMode(Enum):
"""
Enum for running mode
"""
# Run ParameterServer & ParameterServerAgent
ANGEL_PS_PSAGENT = 0
# Only Run ParameterServer
ANGEL_PS = 1
# Run ParameterServer & Worker(embedded ParameterServerAgent)
ANGEL_PS_WORKER = 2
| 31.967742 | 102 | 0.744702 | [
"Apache-2.0",
"BSD-3-Clause"
] | 20100507/angel | angel-ps/python/build/lib/pyangel/running_mode.py | 991 | Python |
#!/usr/bin/env python2
# OpenPOWER Automated Test Project
#
# Contributors Listed Below - COPYRIGHT 2015,2017
# [+] International Business Machines Corp.
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
# This test runs FWTS and munges the JSON report output into
# python unittest.TestCase objects, so we get the individual
# failure/successes into the TestResult output (e.g. junit XML)
import time
import subprocess
import re
import sys
import os
import OpTestConfiguration
import unittest
from common.OpTestSystem import OpSystemState
from common.Exceptions import CommandFailed
import json
class FWTSCommandFailed(unittest.TestCase):
FAIL = None
def runTest(self):
self.assertEqual(self.FAIL, None, str(self.FAIL))
class FWTSVersion(unittest.TestCase):
MAJOR = None
MINOR = None
def version_check(self):
if self.MAJOR is None and self.MINOR is None:
self.skipTest("Test not meant to be run this way.")
return (self.MAJOR == 17 and self.MINOR >=1) or self.MAJOR > 17
def runTest(self):
self.assertTrue(self.version_check(),
'FWTS must be at least Version 17.01'
)
class FWTSTest(unittest.TestCase):
SUBTEST_RESULT = None
CENTAURS_PRESENT = True
IS_FSP_SYSTEM = False
FWTS_MAJOR_VERSION = None
FWTS_MINOR_VERSION = None
def runTest(self):
if self.SUBTEST_RESULT is None:
self.skipTest("Test not meant to be run this way.")
if self.SUBTEST_RESULT.get('log_text') == 'dtc reports warnings from device tree:Warning (reg_format): "reg" property in /ibm,opal/flash@0 has invalid length (8 bytes) (#address-cells == 0, #size-cells == 0)\n':
self.skipTest('/ibm,opal/flash@0 known warning')
# Some FWTS verions barfed (incorrectly) on missing nodes
# in the device tree. If we spot this, skip the test
# this work-around should be removed when the FWTS version readily
# available from the archives no longer has this problem
if not (self.SUBTEST_RESULT.get('failure_label') == 'None'):
log_text = self.SUBTEST_RESULT.get('log_text')
if re.match('Property of "(status|manufacturer-id|part-number|serial-number)" for "/sys/firmware/devicetree/base/memory-buffer' , log_text):
self.skipTest("FWTS bug: Incorrect Missing '(status|manufacturer-id|part-number|serial-number)' property in memory-buffer/dimm");
if re.match('property "serial-number" contains unprintable characters', log_text):
self.skipTest("FWTS bug: DIMM VPD has binary serial number")
if self.FWTS_MAJOR_VERSION <= 18:
# This is a guess based on when
# https://lists.ubuntu.com/archives/fwts-devel/2018-April/010318.html
# will be merged
if self.FWTS_MAJOR_VERSION < 18 or self.FWTS_MINOR_VERSION < 5:
if re.match('CPUFreqClaimedMax', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("Bug in FWTS r.e. boost frequencies, fixed sometime after April 2018")
# On FSP machines, memory-buffers (centaurs) aren't present in DT
# and FWTS 17.03 (at least) expects them to be, so skip those failures
if not self.CENTAURS_PRESENT and re.match('No MEM devices \(memory-buffer', log_text):
self.skipTest("FWTS assumes Centaurs present on FSP systems")
if self.IS_FSP_SYSTEM and re.match('Property of "(board-info|part-number|serial-number|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom@.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices', log_text):
self.skipTest("FWTS assumes some nodes present on FSP systems which aren't")
if re.match('Attempt was made to stop the opal-prd.service but was not successful', log_text):
self.skipTest("FWTS bug: prd did actually stop, and there's something strange with FWTS")
if re.match('OPAL "/ibm,firmware-versions" firmware version from device tree node "open-power" was not found', log_text):
self.skipTest("FWTS known issue: 'open-power' version no longer required")
# We currently guess that all these are going to be merged for FWTS 18.05 :)
# To be extra cautious, allowing them to fail for all of 18.XX though
if self.FWTS_MAJOR_VERSION <= 18:
if re.match('CPUPstateLimitsTestFail', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010315.html")
if re.match('DeviceTreeBaseDTCWarnings', self.SUBTEST_RESULT.get('failure_label')):
self.skipTest("FWTS known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010326.html")
if re.match('Property of "(board-info|vendor|ibm,slot-location-code)" for "/sys/firmware/devicetree/base/xscom.*" was not able to be retrieved. Check the installation for the CPU device config for missing nodes in the device tree if you expect CPU devices.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010329.html")
if re.match('No MEM DIMM devices \(memory-buffer\) were found in "/sys/firmware/devicetree/base" with a status of "okay" or "ok". This is unexpected so please check your system setup for issues.', log_text):
self.skipTest("FWTS/firmware known issue: https://lists.ubuntu.com/archives/fwts-devel/2018-April/010330.html")
self.assertEqual(self.SUBTEST_RESULT.get('failure_label'), 'None', self.SUBTEST_RESULT)
class FWTS(unittest.TestSuite):
def add_fwts_results(self, major_version, minor_version):
host = self.cv_HOST
try:
fwtsjson = host.host_run_command('fwts -q -r stdout --log-type=json')
except CommandFailed as cf:
# FWTS will have exit code of 1 if any test fails,
# we want to ignore that and parse the output.
fwtsjson = cf.output
if cf.exitcode not in [0, 1]:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
r = json.loads('\n'.join(fwtsjson), encoding='latin-1')
tests = []
for fwts in r['fwts']:
for k in fwts:
if k == "tests":
tests = fwts[k]
for test_container in tests:
for tr in test_container:
js_suite = test_container[tr][0]
js_subtests = test_container[tr][1]
suite = unittest.TestSuite()
for sts in js_subtests:
if sts == "subtests":
for subtest in js_subtests[sts]:
for st_info in subtest['subtest']:
if not st_info.get('subtest_results'):
continue
for st_result in st_info.get('subtest_results'):
t = FWTSTest()
t.SUBTEST_RESULT = st_result
t.CENTAURS_PRESENT = self.centaurs_present
t.FWTS_MAJOR_VERSION = major_version
t.FWTS_MINOR_VERSION = minor_version
if self.bmc_type == 'FSP':
t.IS_FSP_SYSTEM = True
suite.addTest(t)
self.real_fwts_suite.addTest(suite)
def run(self, result):
conf = OpTestConfiguration.conf
self.cv_HOST = conf.host()
self.cv_SYSTEM = conf.system()
self.bmc_type = conf.args.bmc_type
self.real_fwts_suite = unittest.TestSuite()
try:
self.cv_SYSTEM.goto_state(OpSystemState.OS)
except Exception as e:
# In the event of something going wrong during IPL,
# We need to catch that here as we're abusing UnitTest
# TestSuite infra and we don't have the catch-all that
# a TestCase provides.
f = FWTSCommandFailed()
f.FAIL = e
self.real_fwts_suite.addTest(f)
self.real_fwts_suite.run(result)
return
self.centaurs_present = self.cv_SYSTEM.has_centaurs_in_dt()
host = self.cv_HOST
fwts_version = None
try:
fwts_version = host.host_run_command('fwts --version')
except CommandFailed as cf:
command_failed = FWTSCommandFailed()
command_failed.FAIL = cf
self.real_fwts_suite.addTest(command_failed)
if fwts_version:
# We want to ensure we're at least at version 17.01
# which means we need to parse this:
# fwts, Version V17.01.00, 2017-01-19 04:20:38
v = re.search("fwts, Version V(\d+)\.(\d+)", ''.join(fwts_version))
major , minor = v.group(1) , v.group(2)
checkver = FWTSVersion()
checkver.MAJOR = major
checkver.MINOR = minor
self.real_fwts_suite.addTest(checkver)
if checkver.version_check():
self.add_fwts_results(int(major),int(minor))
self.real_fwts_suite.run(result)
| 49.617647 | 330 | 0.625766 | [
"Apache-2.0"
] | jk-ozlabs/op-test-framework | testcases/FWTS.py | 10,122 | Python |
all=['optvaedatasets','optvaemodels','optvaeutils']
| 26 | 51 | 0.769231 | [
"MIT"
] | rahulk90/inference_introspection | __init__.py | 52 | Python |
"""
My Data My Consent - Developer API
Unleashing the power of data consent by establishing trust. The Platform Core Developer API defines a set of capabilities that can be used to request, issue, manage and update data, documents and credentials by organizations. The API can be used to request, manage and update Decentralised Identifiers, Financial Data, Health Data issue Documents, Credentials directly or using OpenID Connect flows, and verify Messages signed with DIDs and much more. # noqa: E501
The version of the OpenAPI document: v1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import com.mydatamyconsent
from com.mydatamyconsent.model.data_protection_officer import DataProtectionOfficer
class TestDataProtectionOfficer(unittest.TestCase):
"""DataProtectionOfficer unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testDataProtectionOfficer(self):
"""Test DataProtectionOfficer"""
# FIXME: construct object with mandatory attributes with example values
# model = DataProtectionOfficer() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 33.783784 | 469 | 0.7448 | [
"Apache-2.0"
] | My-Data-My-Consent/python-sdk | test/test_data_protection_officer.py | 1,250 | Python |
from .encoder import CKKSEncoder # noqa: F401
from .encrypter import CKKSEncrypter # noqa: F401
from .plaintext import CKKSPlaintext # noqa: F401
| 37.25 | 50 | 0.778523 | [
"MIT"
] | Koukyosyumei/AIJack | src/aijack/defense/ckks/__init__.py | 149 | Python |
"""Base classes for an Ambianic Edge device abstraction"""
from pydantic import BaseModel, Field
class DeviceInfo(BaseModel):
version: str = Field(None, description="Ambianic Edge software version.")
display_name: str = Field(
None, description="User friendly display name for this device."
)
notifications_enabled: bool = Field(
False, description="Indicates whether device notifications are enabled."
)
| 34.076923 | 80 | 0.722348 | [
"Apache-2.0"
] | ambianic/ambianic | src/ambianic/device.py | 443 | Python |
#!/usr/bin/env python
##############################################################
# $Id$
# Project: MiSeq Metagenomic Assembly pipeline for Nephele project
# Language: Python 2.7
# Author: Alex Levitsky
# History: July 2015 Start of development
##############################################################
__author__ = "Alex Levitsky"
__copyright__ = ""
__credits__ = ["Alex Levitsky"]
__license__ = ""
__version__ = "1.0.1-dev"
__maintainer__ = "Alex Levitsky"
__email__ = "[email protected]"
__status__ = "Development"
import sys, os, random, time, glob
syscall = lambda cmd: (os.popen(cmd).read()).rstrip("\n")
def read_config( file_name, config ): #########################
config_file=open( file_name, 'r')
l=[]
for line in config_file:
if("" == line): # check for end of file
break
s=line.rstrip("\n")
s.strip()
if("" == s): # ignore empty lines
continue
if("#"==s[:1]): # ignore comments
continue
del l[:] # clear list
l=s.split(',')
config[l[0]]=l[1]
config_file.close()
### read_config ###
def send2log( message, log_file ): #######################
date = syscall("TZ='America/New_York' date")
os.system( "echo >> "+log_file)
if 0!=os.system( "echo '"+date+' '+message+"' >>"+log_file):
sys.exit(777)
### send2log ###
def exec_sys(cmd): #######################
#print >> sys.stderr, "Executing:",cmd
if 0!=os.system(cmd):
print >> sys.stderr, "ERROR when executing:",cmd
sys.exit(777)
### exec_sys ###
########### main ##############################
def main():
if len( sys.argv ) < 2:
print >> sys.stderr, "\n\n\nUsage: " + sys.argv[0] + " <configuration file>\n\n\n"
sys.exit(551)
# Read config file
conf_file = sys.argv[1]
if not os.path.isfile( conf_file ):
print >> sys.stderr, "ERROR: no config file:" + conf_file
sys.exit(555)
config = {}
read_config( conf_file,config )
work_dir=os.getcwd()
config['LOG_FILE']='logfile.txt'
log_file=work_dir+'/'+config['LOG_FILE']
##### Define optional and default parameters
for key in ['INPUT_TYPE', 'R1', 'R2', 'ZIP_FILE', 'LIB_FILE', 'BLAST_STEP','PREFIX']:
if(key not in config.keys()):
config[key]=''
##### Predefined and calculated options
if(''==config['LIB_FILE']):
config['INPUT_TYPE']='FASTQ_FILES'
if(''==config['PREFIX']):
config['PREFIX']='MiSEQ_metagenomic'
if(''==config['BLAST_STEP']):
config['BLAST_STEP']='YES'
send2log( 'MiSeq Metagenomic Assembly pipeline started', log_file )
# get env.json if available
if os.path.isfile('./env.json'):
send2log( 'env.json=', log_file )
syscall( 'cat ./env.json >> '+log_file)
# get number of cores
config['NUM_OF_PROC']=syscall('cat /proc/cpuinfo | grep processor | wc -l')
num_proc=int(config['NUM_OF_PROC'])
if(num_proc > 1):
num_proc-=1
config['NUM_OF_PROC']=str(num_proc)
send2log( 'number of cores='+config['NUM_OF_PROC'], log_file )
# get machine's memory
config['MEMORY']=syscall("cat /proc/meminfo | grep MemTotal | awk '{ print $2 }'")
mem=int(config['MEMORY'])
send2log( 'Memory='+config['MEMORY']+'KB', log_file )
w="MiSeq Metagenomic Assembly pipeline configuration\n"
for k in sorted(config.keys()):
if 'UseCode'==k:
continue
config[k]=config[k].replace("\"", "_")
config[k]=config[k].replace("\'", "_")
w=w+k+','+config[k]+"\n"
# print configuration to log file
send2log( w, log_file )
####################################################
os.chdir(work_dir)
# unzip reads
if os.path.isfile(work_dir+'/'+config['ZIP_FILE']):
# check files extension
w=''
if config['ZIP_FILE'][-4:]=='.zip':
send2log( 'unzip -oqj '+config['ZIP_FILE'], log_file )
w=syscall('unzip -oqj '+config['ZIP_FILE'])
send2log( w, log_file )
if (config['ZIP_FILE'][-7:]=='.tar.gz') or (config['ZIP_FILE'][-4:]=='.tgz'):
send2log( 'tar -zxvf '+config['ZIP_FILE'], log_file )
w=syscall('tar -zxvf '+config['ZIP_FILE'])
send2log( w, log_file )
if config['ZIP_FILE'][-8:]=='.tar.bz2':
send2log( 'tar -jxvf '+config['ZIP_FILE'], log_file )
w=syscall('tar -jxvf '+config['ZIP_FILE'])
send2log( w, log_file )
# unzip gzip files if any
w=''
w=syscall('ls *.gz')
if len(w)>3:
send2log( 'running gzip -d for *.gz files', log_file )
w=''
w=syscall('gzip -d *.gz')
else:
send2log( "ERROR: no zip archive with reads. Can not continue\n", log_file)
sys.exit(777)
if 'FASTQ_FILES'==config['INPUT_TYPE']:
# check reads files
w=''
w=syscall('ls *.fastq')
if len(w)<3:
w=''
w=syscall('ls *.fq')
if len(w)<3:
send2log( "ERROR: no reads files. Can not continue\n", log_file)
sys.exit(777)
l=[]
l=w.split('\n')
config['R1']=l[0]
config['R2']=l[1]
if not( os.path.exists(work_dir+'/'+config['R1']) and os.path.exists(work_dir+'/'+config['R2']) ):
send2log( "ERROR: no reads files. Can not continue\n", log_file)
sys.exit(777)
cmd='./bin/a5_pipeline.pl '+'--threads='+config['NUM_OF_PROC']+' --end=5 '+config['R1']+' '+config['R2']+' '+config['PREFIX']
send2log( "Running pipeline:\n"+cmd, log_file )
w=''
w=syscall( cmd+' 2>&1' )
send2log( w, log_file )
else:
if os.path.isfile(work_dir+'/'+config['LIB_FILE']):
send2log("contents of LIB file:", log_file)
syscall( 'cat '+config['LIB_FILE']+ ' >> ' +log_file)
send2log("\n", log_file)
else:
send2log( "ERROR: no LIB file. Can not continue\n", log_file)
sys.exit(777)
#cmd='./bin/a5_pipeline.pl '+config['LIB_FILE']+' '+config['PREFIX']
cmd='/opt/a5/bin/a5_pipeline.pl '+'--threads='+config['NUM_OF_PROC']+' --end=5 '+config['LIB_FILE']+' '+config['PREFIX']
send2log( "Running pipeline: \n"+cmd, log_file )
w=''
w=syscall( cmd+' 2>&1' )
send2log( w, log_file )
if 'YES'==config['BLAST_STEP']:
cmd ='./blast2nr.sh '+config['PREFIX']+' '+config['NUM_OF_PROC']
send2log( 'Executing:'+cmd, log_file)
w=syscall(cmd)
send2log( w, log_file )
send2log( 'MiSeq Metagenomic Assembly pipeline DONE',log_file )
if __name__ == "__main__":
main()
| 33.515464 | 131 | 0.557521 | [
"Unlicense"
] | niaid/Nephele | Pipes/pipeline-scripts/NGOPT/ngopt.py | 6,502 | Python |
from django.shortcuts import render
from django.http import HttpResponseRedirect
# <HINT> Import any new Models here
from .models import Course, Enrollment, Question, Choice, Submission , Lesson
from django.contrib.auth.models import User
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django.contrib.auth import login, logout, authenticate
import logging
# Get an instance of a logger
logger = logging.getLogger(__name__)
# Create your views here.
def registration_request(request):
context = {}
if request.method == 'GET':
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
elif request.method == 'POST':
# Check if user exists
username = request.POST['username']
password = request.POST['psw']
first_name = request.POST['firstname']
last_name = request.POST['lastname']
user_exist = False
try:
User.objects.get(username=username)
user_exist = True
except:
logger.error("New user")
if not user_exist:
user = User.objects.create_user(username=username, first_name=first_name, last_name=last_name,
password=password)
login(request, user)
return redirect("onlinecourse:index")
else:
context['message'] = "User already exists."
return render(request, 'onlinecourse/user_registration_bootstrap.html', context)
def login_request(request):
context = {}
if request.method == "POST":
username = request.POST['username']
password = request.POST['psw']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
return redirect('onlinecourse:index')
else:
context['message'] = "Invalid username or password."
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
else:
return render(request, 'onlinecourse/user_login_bootstrap.html', context)
def logout_request(request):
logout(request)
return redirect('onlinecourse:index')
def check_if_enrolled(user, course):
is_enrolled = False
if user.id is not None:
# Check if user enrolled
num_results = Enrollment.objects.filter(user=user, course=course).count()
if num_results > 0:
is_enrolled = True
return is_enrolled
# CourseListView
class CourseListView(generic.ListView):
template_name = 'onlinecourse/course_list_bootstrap.html'
context_object_name = 'course_list'
def get_queryset(self):
user = self.request.user
courses = Course.objects.order_by('-total_enrollment')[:10]
for course in courses:
if user.is_authenticated:
course.is_enrolled = check_if_enrolled(user, course)
return courses
class CourseDetailView(generic.DetailView):
model = Course
template_name = 'onlinecourse/course_detail_bootstrap.html'
def enroll(request, course_id):
course = get_object_or_404(Course, pk=course_id)
user = request.user
is_enrolled = check_if_enrolled(user, course)
if not is_enrolled and user.is_authenticated:
# Create an enrollment
Enrollment.objects.create(user=user, course=course, mode='honor')
course.total_enrollment += 1
course.save()
return HttpResponseRedirect(reverse(viewname='onlinecourse:course_details', args=(course.id,)))
# <HINT> Create a submit view to create an exam submission record for a course enrollment,
# you may implement it based on following logic:
# Get user and course object, then get the associated enrollment object created when the user enrolled the course
# Create a submission object referring to the enrollment
# Collect the selected choices from exam form
# Add each selected choice object to the submission object
# Redirect to show_exam_result with the submission id
# <HINT> A example method to collect the selected choices from the exam form from the request object
def extract_answers(request):
submitted_anwsers = []
for key in request.POST:
if key.startswith('choice'):
value = request.POST[key]
choice_id = int(value)
submitted_anwsers.append(choice_id)
return submitted_anwsers
def submit(request, course_id):
user = request.user
course = Course.objects.get(pk=course_id)
enrollment = Enrollment.objects.get(user=user, course=course)
submitted_anwsers = extract_answers(request)
submission = Submission.objects.create(enrollment=enrollment)
submission.chocies.set(submitted_anwsers)
print(submission)
return HttpResponseRedirect(reverse(viewname='onlinecourse:result', args=(course_id, submission.chocies.first().question.lesson.pk, submission.pk)))
# <HINT> Create an exam result view to check if learner passed exam and show their question results and result for each question,
# you may implement it based on the following logic:
# Get course and submission based on their ids
# Get the selected choice ids from the submission record
# For each selected choice, check if it is a correct answer or not
# Calculate the total score
def show_exam_result(request, course_id, lesson_id, submission_id):
from django.db.models import Sum
course = Course.objects.get(pk=course_id)
submission = Submission.objects.get(pk=submission_id)
selected_choices = submission.chocies.all()
lesson = Lesson.objects.get(pk=lesson_id)
questions = lesson.question_set.all()
total_mark = round(lesson.question_set.all().aggregate(Sum("grade"))["grade__sum"])
grade = 0
for question in questions:
if question.is_get_score(selected_choices):
grade += question.grade
ctx = {
'grade': round(grade),
'total_mark': total_mark,
'questions': questions,
'lesson': lesson,
'selected_choices': selected_choices,
}
return render(request , 'onlinecourse/exam_result_bootstrap.html' , ctx) | 36.654971 | 153 | 0.690651 | [
"Apache-2.0"
] | Givindu98/Givindu-Final-Cloud-App-With-Database | onlinecourse/views.py | 6,268 | Python |
import os
import numpy as np
import rasterio
aggregate_forest = np.vectorize(lambda x: np.where(0 < x < 6, 1, x))
aggregate_agriculture = np.vectorize(lambda x: np.where(11 < x < 21, 21, x))
for dirs, subdirs, files in os.walk('../output/ceara/'):
for file in files:
wp_raster = rasterio.open('../output/ceara/' + file)
file_name = file.replace('id_', '')
wp_id = int(file_name.replace('.tif', ''))
out_raster_temp = aggregate_forest(wp_raster.read(range(1, 34)))
out_raster = aggregate_agriculture(out_raster_temp)
out_raster = out_raster.astype('uint8')
out_meta = wp_raster.meta
with rasterio.open('../output/ceara_agg_v2/' + 'agg_v2_id_' + str(wp_id) + '.tif', 'w', **out_meta) as raster:
raster.write(out_raster)
| 33.541667 | 118 | 0.643478 | [
"MIT"
] | olga-turkovska/2019-egu-poster | scripts/2-aggregate-land-cover.py | 805 | Python |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
'''
In this file, we define the classes that live inside 'worker 0', the worker
responsible for orchestration and aggregation. The main class is the
OptimizationServer, which sends clients to the other workers to process and
combines the resulting models.
'''
import json
import logging
import os
import random
import shutil
import time
from collections import defaultdict
import numpy as np
import torch
# Internal imports
from core.globals import TRAINING_FRAMEWORK_TYPE
if TRAINING_FRAMEWORK_TYPE == 'mpi':
import core.federated as federated
else:
raise NotImplementedError('{} is not supported'.format(TRAINING_FRAMEWORK_TYPE))
from core.evaluation import Evaluation
from core.client import Client
from .strategies import select_strategy
from .trainer import (
ModelUpdater,
Trainer,
set_component_wise_lr,
)
from utils import (
get_lr,
print_rank,
update_json_log,
)
# For profiling
import cProfile
import pstats
# AzureML-related libs
from azureml.core import Run
run = Run.get_context()
class OptimizationServer(federated.Server):
def __init__(self, num_clients, model, optimizer, ss_scheduler, data_path, model_path, train_dataloader,
val_dataloader, test_dataloader, config, config_server):
'''Implement Server's orchestration and aggregation.
This is the main Server class, that actually implements orchestration
and aggregation, inheriting from `federated.Server`, which deals with
communication only.
The `train` method is central in FLUTE, as it defines good part of what
happens during training.
Args:
num_clients (int): total available clients.
model (torch.nn.Module): neural network model.
optimizer (torch.optim.Optimizer): optimizer.
ss_scheduler: scheduled sampling scheduler.
data_path (str): points to where data is.
model_path (str): points to where pretrained model is.
train_dataloader (torch.utils.data.DataLoader): dataloader for training
val_dataloader (torch.utils.data.DataLoader): dataloader for validation
test_dataloader (torch.utils.data.DataLoader): dataloader for test, can be None
config (dict): JSON style configuration parameters
config_server: deprecated, kept for API compatibility only.
'''
super().__init__()
# Initialize all attributes from arguments
self.client_idx_list = list(range(num_clients))
self.config = config
server_config = config['server_config']
decoder_config = config.get('decoder_config', None)
self.max_iteration = server_config['max_iteration']
self.do_clustering = server_config.get('clustering', False)
self.num_clients_per_iteration = [int(x) for x in server_config['num_clients_per_iteration'].split(',')] \
if isinstance(server_config['num_clients_per_iteration'], str) \
else [server_config['num_clients_per_iteration']]
self.val_freq = server_config['val_freq']
self.req_freq = server_config['rec_freq']
self.evaluation = Evaluation(config, model_path, self.process_testvalidate, val_dataloader, test_dataloader)
# TODO: does this need to be adjusted for custom metrics?
self.metrics = {
'best_val_loss': float('inf'),
'best_val_acc': 0.0,
'best_test_loss': float('inf'),
'best_test_acc': 0.0
}
self.model_backup_freq = server_config.get('model_backup_freq', 100)
self.worker_trainer_config = server_config.get('trainer_config', {})
self.aggregate_median = server_config['aggregate_median']
self.initial_lr_client = server_config.get('initial_lr_client', -1.0)
self.lr_decay_factor = server_config.get('lr_decay_factor', 1.0)
self.model_type = config['model_config']['model_type']
self.quant_thresh = config['client_config'].get('quant_thresh', None)
self.quant_bits = config['client_config'].get('quant_bits', 10)
self.list_of_train_data = config['client_config']['data_config']['train']['list_of_train_data']
self.data_path = data_path
# Get max grad norm from data config
if 'train' in server_config['data_config']:
max_grad_norm = server_config['data_config']['train'].get('max_grad_norm', None)
else:
max_grad_norm = None
# Creating an instance to update the model with stats aggregated from workers
self.worker_trainer = ModelUpdater(
model=model,
optimizer=optimizer,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader if train_dataloader is not None else val_dataloader,
val_dataloader=val_dataloader,
max_grad_norm=max_grad_norm,
anneal_config=server_config['annealing_config'],
model_type=self.model_type,
decoder_config=decoder_config
)
self.metrics['worker_trainer'] = self.worker_trainer
# Creating an instance for the server-side trainer (runs mini-batch SGD)
self.server_replay_iterations = None
self.server_trainer = None
if train_dataloader is not None:
assert 'server_replay_config' in server_config, 'server_replay_config is not set'
assert 'optimizer_config' in server_config[
'server_replay_config'], 'server-side replay training optimizer is not set'
self.server_optimizer_config = server_config['server_replay_config']['optimizer_config']
self.server_trainer_config = server_config['server_replay_config'].get('trainer_config', {})
self.server_replay_iterations = server_config['server_replay_config']['server_iterations']
self.server_trainer = Trainer(
model=model,
optimizer=None,
ss_scheduler=ss_scheduler,
train_dataloader=train_dataloader,
server_replay_config=server_config['server_replay_config'],
val_dataloader=None,
max_grad_norm=server_config['server_replay_config']\
.get('max_grad_norm',server_config['data_config']['train']\
.get('max_grad_norm',None)),
anneal_config=server_config['server_replay_config'].get('annealing_config', None)
)
self.skip_model_update = False # will not update the model if True
self.train_loss = 0.0
self.model_path = model_path
self.best_model_criterion = server_config['best_model_criterion']
self.fall_back_to_best_model = server_config['fall_back_to_best_model']
self.last_model_path = os.path.join(self.model_path, 'latest_model.tar')
self.best_model_path = os.path.join(self.model_path,
'best_val_{}_model.tar'.format(self.best_model_criterion))
self.log_path = os.path.join(self.model_path, 'status_log.json')
self.cur_iter_no = 0 # keep the iteration number for Tensor board plotting
self.lr_weight = 1.0
self.losses = []
self.no_label_updates = 0 # no. label updates
# Update the parameters above if the log file
if server_config.get('resume_from_checkpoint', False):
self.load_saved_status()
# Decoding config
self.decoder_config = decoder_config
self.spm_model = server_config['data_config']['test'].get('spm_model', None)
self.do_profiling = server_config.get('do_profiling', False)
# Parallel processing
self.clients_in_parallel = config['client_config'].get('clients_in_parallel', None)
StrategyClass = select_strategy(config['strategy'])
self.strategy = StrategyClass('server', self.config, self.model_path)
print_rank(f'Server successfully instantiated strategy {self.strategy}', loglevel=logging.DEBUG)
def load_saved_status(self):
'''Load checkpoint from disk'''
# Check if model is on disk, if so loads it onto trainer
if os.path.exists(self.last_model_path):
print_rank('Resuming from checkpoint model {}'.format(self.last_model_path))
self.worker_trainer.load(self.last_model_path, update_lr_scheduler=True, update_ss_scheduler=True)
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
# Check if log is on disk, if so loads it onto current stats
if os.path.exists(self.log_path):
with open(self.log_path, 'r') as logfp: # loading the iteration no., best loss and CER
elems = json.load(logfp)
self.cur_iter_no = elems.get('i', 0)
self.metrics['best_val_loss'] = elems.get('best_val_loss', float('inf'))
self.metrics['best_val_acc'] = elems.get('best_val_acc', 0)
self.metrics['best_test_loss'] = elems.get('best_test_loss', float('inf'))
self.metrics['best_test_acc'] = elems.get('best_test_acc', 0)
self.lr_weight = elems.get('weight', 1.0)
self.no_label_updates = elems.get('num_label_updates', 0)
print_rank(f'Resuming from status_log: cur_iter: {self.cur_iter_no}')
def run(self):
'''Trigger training.
This is a simple wrapper to the `train` method.
'''
print_rank('server started')
self.train()
print_rank('server terminated')
def train(self):
'''Main method for training.'''
self.run_stats = {
'secsPerClientRound': [],
'secsPerClient': [],
'secsPerClientTraining': [],
'secsPerClientSetup': [],
'secsPerClientFull': [],
'secsPerRoundHousekeeping': [],
'secsPerRoundTotal': [],
'mpiCosts': []
}
run.log('Max iterations', self.max_iteration)
try:
self.worker_trainer.model.cuda() if torch.cuda.is_available() else None
# Do an initial validation round to understand the pretrained model's validation accuracy
# Skip if we resumed from a checkpoint (cur_iter_no > 0)
eval_list = []
if self.cur_iter_no == 0:
if self.config['server_config']['initial_rec']:
eval_list.append('test')
if self.config['server_config']['initial_val']:
eval_list.append('val')
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
print_rank("Running {} at itr={}".format(eval_list, self.cur_iter_no))
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
eval_list = [] # some cleanup
# Dump all the information in aggregate_metric
print_rank('Saving Model Before Starting Training', loglevel=logging.INFO)
for token in ['best_val_loss', 'best_val_acc', 'best_test_acc', 'latest']:
self.worker_trainer.save(
model_path=self.model_path,
token=token,
config=self.config['server_config']
)
# Training loop
self.worker_trainer.model.train()
for i in range(self.cur_iter_no, self.max_iteration):
begin = time.time()
metrics_payload = {}
def log_metric(k, v):
metrics_payload[k] = v
print_rank('==== iteration {}'.format(i))
log_metric('Current iteration', i)
# Initial value for the learning rate of the worker
initial_lr = self.initial_lr_client * self.lr_weight
print_rank('Client learning rate {}'.format(initial_lr))
# Run training on clients
self.worker_trainer.model.zero_grad()
self.train_loss = []
server_data = (
initial_lr,
[p.data.to(torch.device('cpu')) for p in self.worker_trainer.model.parameters()]
)
# Random number of clients per iteration
if len(self.num_clients_per_iteration) > 1:
num_clients_curr_iter = random.randint(
self.num_clients_per_iteration[0],
self.num_clients_per_iteration[1]
)
else:
num_clients_curr_iter = self.num_clients_per_iteration[0]
log_metric('Clients for round', num_clients_curr_iter)
# Perform annealing in quantization threshold
if self.quant_thresh is not None:
self.config['client_config']['quant_thresh'] *= self.config['client_config'].get('quant_anneal', 1.0)
self.quant_thresh = self.config['client_config']['quant_thresh']
log_metric('Quantization Thresh.', self.config['client_config']['quant_thresh'])
# Create the pool of clients -- sample from this pool to assign to workers
sampled_idx_clients = random.sample(self.client_idx_list,
num_clients_curr_iter) if num_clients_curr_iter > 0 else self.client_idx_list
sampled_clients = [
Client(
client_id,
self.config,
self.config['client_config']['type'] == 'optimization',
None
) for client_id in sampled_idx_clients
]
# Initialize stats
clients_begin = time.time()
client_losses = []
client_mag_grads = []
client_mean_grads = []
client_var_grads = []
client_norm_grads = []
self.run_stats['secsPerClient'].append([])
self.run_stats['secsPerClientFull'].append([])
self.run_stats['secsPerClientTraining'].append([])
self.run_stats['secsPerClientSetup'].append([])
self.run_stats['mpiCosts'].append([])
# Check if we want privacy metrics
apply_privacy_metrics = self.config.get('privacy_metrics_config', None) and \
self.config['privacy_metrics_config']['apply_metrics']
adaptive_leakage = apply_privacy_metrics and \
self.config['privacy_metrics_config'].get('adaptive_leakage_threshold', None)
if apply_privacy_metrics:
privacy_metrics_stats = defaultdict(list)
# Initialize profiler
profiler = None
if self.do_profiling:
profiler = cProfile.Profile()
profiler.enable()
# Reset gradient for the model before assigning the new gradients
self.worker_trainer.model.zero_grad()
for client_output in self.process_clients(sampled_clients, server_data, self.clients_in_parallel):
# Process client output
client_timestamp = client_output['ts']
client_stats = client_output['cs']
client_loss = client_output['tl']
client_mag_grad = client_output['mg']
client_mean_grad = client_output['ng']
client_var_grad = client_output['vg']
client_norm_grad = client_output['rg']
client_payload = client_output['pl']
if apply_privacy_metrics:
privacy_stats = client_output['ps']
for metric, value in privacy_stats.items():
privacy_metrics_stats[metric].append(value)
self.run_stats['mpiCosts'][-1].append(time.time() - client_timestamp)
# Get actual pseudo-gradients for aggregation
payload_processed = self.strategy.process_individual_payload(self.worker_trainer, client_payload)
if not payload_processed:
print_rank('Dropping client', loglevel=logging.DEBUG)
num_clients_curr_iter -= 1
continue
# Aggregate stats
self.train_loss.append(client_loss)
client_losses.append(client_loss)
client_mag_grads.append(client_mag_grad.item())
client_mean_grads.append(client_mean_grad.item())
client_var_grads.append(client_var_grad.item())
client_norm_grads.append(client_norm_grad.item())
# Mark the end of client processing
client_end = time.time()
self.run_stats['secsPerClientFull'][-1].append(client_stats['full cost'])
self.run_stats['secsPerClientTraining'][-1].append(client_stats['training'])
self.run_stats['secsPerClientSetup'][-1].append(client_stats['setup'])
self.run_stats['secsPerClient'][-1].append(client_end - clients_begin)
# Tear down profiler
if self.do_profiling:
profiler.disable()
stats = pstats.Stats(profiler)
stats.sort_stats('cumulative').print_stats()
# Prepare output
client_mag_grads = np.array(client_mag_grads)
client_mean_grads = np.array(client_mean_grads)
client_var_grads = np.array(client_var_grads)
client_norm_grads = np.array(client_norm_grads)
client_stats = (client_mag_grads, client_mean_grads, client_var_grads)
dump_norm_stats = self.config.get('dump_norm_stats', False)
if dump_norm_stats:
with open(os.path.join(self.model_path, 'norm_stats.txt'), 'a', encoding='utf-8') as outF:
outF.write('{}\n'.format(json.dumps(list(client_norm_grads))))
# Print the privacy metrics
if apply_privacy_metrics:
for metric, values in privacy_metrics_stats.items():
if metric == 'Dropped clients':
log_metric(metric, sum(values))
else:
log_metric(metric, max(values))
if type(adaptive_leakage) is float:
values = privacy_metrics_stats['Practical epsilon (Max leakage)']
new_threshold = list(sorted(values))[int(adaptive_leakage*len(values))]
print_rank('Updating leakage threshold to {}'.format(new_threshold))
self.config['privacy_metrics_config']['max_allowed_leakage'] = new_threshold
# Mark that all clients have been processed
end = time.time()
self.run_stats['secsPerClientRound'].append(end - begin)
begin = end
# Log the training loss to tensorboard/AML
log_metric('Training loss', sum(self.train_loss))
# Combine payloads
self.losses = self.strategy.combine_payloads(
worker_trainer=self.worker_trainer,
curr_iter=i,
num_clients_curr_iter=num_clients_curr_iter,
client_stats=client_stats,
logger=log_metric,
)
# Run a couple of iterations of training data on the server
if self.server_trainer is not None:
print_rank('Running replay iterations on server')
if 'updatable_names' in self.server_trainer_config:
set_component_wise_lr(
self.worker_trainer.model,
self.server_optimizer_config,
self.server_trainer_config['updatable_names']
)
self.server_trainer.prepare_iteration(self.worker_trainer.model)
self.server_trainer.train_desired_samples(self.server_replay_iterations)
self.worker_trainer.model.load_state_dict(self.server_trainer.model.state_dict())
torch.cuda.empty_cache()
# Update a sampling scheduler
print_rank('Run ss scheduler')
self.worker_trainer.run_ss_scheduler()
# Run inference and score on val/test depending on the iter. number
if ((i+1) % self.val_freq) == 0:
eval_list.append("val")
if ((i+1) % self.req_freq) == 0 :
eval_list.append("test")
if len(eval_list)> 0:
print_rank('Running {} at itr={}'.format(eval_list,i+1))
self.metrics['worker_trainer'] = self.worker_trainer
self.metrics = self.evaluation.run(eval_list, self.metrics, metric_logger=run.log)
self.losses = self.evaluation.losses
eval_list = []
# Create a schedule for the initial_lr (for the worker)
if 'val' in eval_list:
run.log('LR for agg. opt.', get_lr(self.worker_trainer.optimizer))
if not (self.losses[0] < self.metrics['best_val_loss']):
self.lr_weight *= self.lr_decay_factor
print_rank('LOG: Client weight of learning rate {}..'.format(self.lr_weight))
# Backup the current best models
self.backup_models(i)
# Fall back to the best model if the option is enabled
self.fall_back_to_prev_best_status()
# Logging the latest best values
update_json_log(
self.log_path,
{
'i': i + 1,
'best_val_loss': float(self.metrics['best_val_loss']),
'best_val_acc': float(self.metrics['best_val_acc']),
'best_test_loss': float(self.metrics['best_test_loss']),
'best_test_acc': float(self.metrics['best_test_acc']),
'weight': float(self.lr_weight),
'num_label_updates': int(self.no_label_updates)
},
)
end = time.time()
# Aggregate stats
self.run_stats['secsPerRoundHousekeeping'].append(end - begin)
self.run_stats['secsPerRoundTotal'].append(self.run_stats['secsPerClientRound'][-1] + \
self.run_stats['secsPerRoundHousekeeping'][-1])
log_metric('secsPerRoundTotal', self.run_stats['secsPerRoundTotal'][-1])
if self.do_profiling:
log_metric('secsPerClientRound', self.run_stats['secsPerClientRound'][-1])
log_metric('secsPerRoundHousekeeping', self.run_stats['secsPerRoundHousekeeping'][-1])
metrics_for_stats = [
'secsPerClient',
'secsPerClientTraining',
'secsPerClientFull',
'secsPerClientSetup',
'mpiCosts',
]
for metric in metrics_for_stats:
log_metric(f'{metric}Mean', np.mean(self.run_stats[metric][-1]))
log_metric(f'{metric}Median', np.median(self.run_stats[metric][-1]))
log_metric(f'{metric}Max', max(self.run_stats[metric][-1]))
for k in self.run_stats:
if k in metrics_for_stats:
print_rank('{}: {}'.format(k, max(self.run_stats[k][-1])), loglevel=logging.DEBUG)
else:
print_rank('{}: {}'.format(k, self.run_stats[k][-1]), loglevel=logging.DEBUG)
# Log all the metrics
for k in metrics_payload:
run.log(k, metrics_payload[k])
finally: # perform cleanup even if error was raised above
self.terminate_workers(terminate=(not self.do_clustering))
def backup_models(self, i):
'''Save the current best models.
Save CER model, the best loss model and the best WER model. This occurs
at a specified period.
Args:
i: no. of iterations.
'''
# Always save the latest model
self.worker_trainer.save(
model_path=self.model_path,
token='latest',
config=self.config['server_config'],
)
if (i % self.model_backup_freq) == 0: # save the current best models
self.worker_trainer.save(
model_path=self.model_path,
token='epoch{}'.format(i),
config=self.config['server_config']
)
for bodyname in ['best_val_acc', 'best_val_loss', 'best_test_acc']:
src_model_path = os.path.join(self.model_path, '{}_model.tar'.format(bodyname))
if os.path.exists(src_model_path):
dst_model_path = os.path.join(self.model_path, 'epoch{}_{}_model.tar'.format(i, bodyname))
shutil.copyfile(src_model_path, dst_model_path)
print_rank('Saved {}'.format(dst_model_path))
def fall_back_to_prev_best_status(self):
'''Go back to the past best status and switch to the recent best model.'''
if self.fall_back_to_best_model:
print_rank('falling back to model {}'.format(self.best_model_path))
# Save current learning rate
tmp_lr = get_lr(self.worker_trainer.optimizer)
# Load previous best model
self.worker_trainer.load(self.best_model_path, update_lr_scheduler=False, update_ss_scheduler=False)
# Update previous learning rate on optimizer
for g in self.worker_trainer.optimizer.param_groups:
g['lr'] = tmp_lr
if self.server_trainer is not None:
self.server_trainer.model = self.worker_trainer.model # make sure that the models are in sync
def select_server(server_type, config):
'''Select a server type using different possible strings.
Right now this just returns `OptimizationServer`, but this
function could be useful when there are multiple choices of
server.
Args:
server_type (str): indicates server choice.
config (dict): config parsed from YAML, passed so that
parameters can be used to select a given server.
'''
return OptimizationServer
| 45.271973 | 121 | 0.590534 | [
"MIT"
] | simra/msrflute | core/server.py | 27,299 | Python |
from .civet import Civet
from .building_blocks import *
from .builtin_scenario_sources import *
from .builtin_analyzers import *
from .builtin_outputs import *
| 26.666667 | 39 | 0.81875 | [
"MIT"
] | PMKielstra/Civet | civet/__init__.py | 160 | Python |
KEYBOARD_INTERRUPT = 1
ARGUMENT_ERROR = 2
# When playlists, albums, artists, users aren't found.
URI_NOT_FOUND_ERROR = 5
| 17.571429 | 54 | 0.772358 | [
"MIT"
] | DARHALL/spotify-downloader | spotdl/command_line/exitcodes.py | 123 | Python |
import random
import numpy as np
def write_to_file(filename,no_locations,no_agents):
info_dict={}
#ID enumerates from 0 to n-1
header='Location Index:Agents:Time Interval'
n=random.randint(10,20)
f=open(filename,'w')
f.write(str(n)+'\n')
f.write(header+'\n')
for i in range(n):
line=str(random.randint(0,no_locations-1))+':'
for i in range(random.randint(0,20)):
line+=str(random.randint(0,no_agents-1))+','
line+=str(random.randint(0,no_agents-1))
line+=':'+str(random.choice([10,30,45,60]))+'\n'
f.write(line)
write_to_file('monday_events.txt',10,100)
write_to_file('tuesday_events.txt',10,100)
write_to_file('wednesday_events.txt',10,100)
write_to_file('thursday_events.txt',10,100)
write_to_file('friday_events.txt',10,100)
write_to_file('saturday_events.txt',5,100)
write_to_file('sunday_events.txt',2,100)
| 27.064516 | 51 | 0.72944 | [
"BSD-3-Clause"
] | healthbadge/episimmer | examples/Basic_Disease_Models/Example_1/generate_events.py | 839 | Python |
# Generated by Django 2.2.2 on 2019-09-01 09:08
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=255, unique=True)),
('name', models.CharField(max_length=255)),
('is_active', models.BooleanField(default=True)),
('is_staff', models.BooleanField(default=False)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
| 50.029412 | 266 | 0.637272 | [
"MIT"
] | Huxteen/Django-recipe-app-API-Docker-TDD-CI-CD | app/core/migrations/0001_initial.py | 1,701 | Python |
"""
Thank you Funkwhale for inspiration on the HTTP signatures parts <3
https://funkwhale.audio/
"""
import datetime
import logging
from typing import Union
import pytz
from Crypto.PublicKey.RSA import RsaKey
from requests_http_signature import HTTPSignatureHeaderAuth
from federation.types import RequestType
from federation.utils.network import parse_http_date
from federation.utils.text import encode_if_text
logger = logging.getLogger("federation")
def get_http_authentication(private_key: RsaKey, private_key_id: str) -> HTTPSignatureHeaderAuth:
"""
Get HTTP signature authentication for a request.
"""
key = private_key.exportKey()
return HTTPSignatureHeaderAuth(
headers=["(request-target)", "user-agent", "host", "date"],
algorithm="rsa-sha256",
key=key,
key_id=private_key_id,
)
def verify_request_signature(request: RequestType, public_key: Union[str, bytes]):
"""
Verify HTTP signature in request against a public key.
"""
key = encode_if_text(public_key)
date_header = request.headers.get("Date")
if not date_header:
raise ValueError("Rquest Date header is missing")
ts = parse_http_date(date_header)
dt = datetime.datetime.utcfromtimestamp(ts).replace(tzinfo=pytz.utc)
past_delta = datetime.timedelta(hours=24)
future_delta = datetime.timedelta(seconds=30)
now = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)
if dt < now - past_delta or dt > now + future_delta:
raise ValueError("Request Date is too far in future or past")
HTTPSignatureHeaderAuth.verify(request, key_resolver=lambda **kwargs: key)
| 31.769231 | 97 | 0.734867 | [
"BSD-3-Clause"
] | jaywink/federation | federation/protocols/activitypub/signing.py | 1,652 | Python |
# -*- coding=utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from ctypes import c_uint64
import numpy as np
from .utils import CxxPointer, _call_with_growing_buffer
from .ffi import chfl_match
class Selection(CxxPointer):
"""
Select atoms in a :py:class:`Frame` with a selection language.
The selection language is built by combining basic operations. Each basic
operation follows the ``<selector>[(<variable>)] <operator> <value>``
structure, where ``<operator>`` is a comparison operator in
``== != < <= > >=``. Refer to the `full documentation
<selections-doc>`_ to know the allowed selectors and how to use them.
.. selections-doc: https://chemfiles.org/chemfiles/latest/selections.html
"""
def __init__(self, selection):
"""
Create a new :py:class:`Selection` from the given ``selection`` string.
"""
ptr = self.ffi.chfl_selection(selection.encode("utf8"))
super(Selection, self).__init__(ptr, is_const=False)
def __copy__(self):
return Selection.from_mutable_ptr(None, self.ffi.chfl_selection_copy(self.ptr))
def __repr__(self):
return "Selection('{}')".format(self.string)
@property
def size(self):
"""
Get the size of this :py:class:`Selection`.
The size of a selection is the number of atoms we are selecting
together. This value is 1 for the 'atom' context, 2 for the 'pair' and
'bond' context, 3 for the 'three' and 'angles' contextes and 4 for the
'four' and 'dihedral' contextes.
"""
size = c_uint64()
self.ffi.chfl_selection_size(self.ptr, size)
return size.value
@property
def string(self):
"""
Get the selection string used to create this :py:class:`Selection`.
"""
return _call_with_growing_buffer(
lambda buffer, size: self.ffi.chfl_selection_string(self.ptr, buffer, size),
initial=128,
)
def evaluate(self, frame):
"""
Evaluate a :py:class:`Selection` for a given :py:class:`Frame`, and
return a list of matching atoms, either as a list of index or a list
of tuples of indexes.
"""
matching = c_uint64()
self.ffi.chfl_selection_evaluate(self.mut_ptr, frame.ptr, matching)
matches = np.zeros(matching.value, chfl_match)
self.ffi.chfl_selection_matches(self.mut_ptr, matches, matching)
size = self.size
result = []
for match in matches:
assert match[0] == size
atoms = match[1]
if size == 1:
result.append(atoms[0])
elif size == 2:
result.append((atoms[0], atoms[1]))
elif size == 3:
result.append((atoms[0], atoms[1], atoms[2]))
elif size == 4:
result.append((atoms[0], atoms[1], atoms[2], atoms[3]))
return result
| 34.390805 | 88 | 0.612634 | [
"BSD-3-Clause"
] | Luthaf/Chemharp-python | chemfiles/selection.py | 2,992 | Python |
from rest_framework import serializers
from .models import Canteen
from accounts.serializers import UserForSerializer
from model_location.serializers import CityViewSerializer
from model_media.serializers import MediaViewSerializer
# Canteen model serializer
class CanteenSerializer(serializers.ModelSerializer):
class Meta:
model = Canteen
fields = '__all__'
# Canteen model serializer to view
class CanteenViewSerializer(serializers.ModelSerializer):
user = UserForSerializer(read_only=True)
city = CityViewSerializer(read_only=True)
images = MediaViewSerializer(read_only=True, many=True)
class Meta:
model = Canteen
fields = '__all__'
| 29 | 59 | 0.777299 | [
"Apache-2.0"
] | SanjarbekSaminjonov/Musofirlar.Backend | models/model_canteen/serializers.py | 696 | Python |
# MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
| 50.318182 | 80 | 0.775971 | [
"MIT"
] | FilipSchad/packit | packit/cli/__init__.py | 1,107 | Python |
from subprocess import Popen as pop
pop('gcc --version', shell=False)
pop('/bin/gcc --version', shell=False)
pop(var, shell=False)
pop(['ls', '-l'], shell=False)
pop(['/bin/ls', '-l'], shell=False)
pop('../ls -l', shell=False)
pop('c:\\hello\\something', shell=False)
pop('c:/hello/something_else', shell=False)
| 22.571429 | 43 | 0.655063 | [
"Apache-2.0"
] | Appleinc123/bandit | examples/partial_path_process.py | 316 | Python |
import os
import random
from riscv_definitions import *
NONE = 0
CF_J = 1
CF_BR = 2
CF_RET = 3
MEM_R = 4
MEM_W = 5
CSR = 6
PREFIX = '_p'
MAIN = '_l'
SUFFIX = '_s'
class Word():
def __init__(self, label: int, insts: list, tpe=NONE, xregs=[], fregs=[], imms=[], symbols=[], populated=False):
self.label = label
self.tpe = tpe
self.insts = insts
self.len_insts = len(insts)
self.xregs = xregs
self.fregs = fregs
self.imms = imms
self.symbols = symbols
self.operands = xregs + fregs + [ imm[0] for imm in imms ] + symbols
self.populated = populated
self.ret_insts = []
def pop_inst(self, inst, opvals):
for (op, val) in opvals.items():
inst = inst.replace(op, val)
return inst
def populate(self, opvals, part=MAIN):
for op in self.operands:
assert op in opvals.keys(), \
'{} is not in label {} Word opvals'.format(op, self.label)
pop_insts = []
for inst in self.insts:
p_inst = self.pop_inst(inst, opvals)
pop_insts.append(p_inst)
ret_insts = [ '{:<8}{:<42}'.format(part + str(self.label) + ':',
pop_insts.pop(0)) ]
for i in range(len(pop_insts)):
ret_insts.append('{:8}{:<42}'.format('', pop_insts.pop(0)))
self.populated = True
self.ret_insts = ret_insts
def reset_label(self, new_label, part):
old_label = self.label
self.label = new_label
if self.populated:
self.ret_insts[0] = '{:8}{:<42}'.format(part + str(self.label) + ':',
self.ret_insts[0][8:])
return (old_label, new_label)
else:
return None
def repop_label(self, label_map, max_label, part):
if self.populated:
for i in range(len(self.ret_insts)):
inst = self.ret_insts[i]
tmps = inst.split(', ' + part)
if len(tmps) > 1:
label = tmps[1].split(' ')[0]
old = int(label)
new = label_map.get(old, random.randint(self.label + 1, max_label))
new_inst = inst[8:].replace(part + '{}'.format(old), part + '{}'.format(new))
inst = '{:<8}{:<50}'.format(inst[0:8], new_inst)
self.ret_insts[i] = inst
else:
return
def get_insts(self):
assert self.populated, \
'Word is not populated'
return self.ret_insts
def word_jal(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ syntax ]
return (tpe, insts)
def word_jalr(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_J
insts = [ 'la xreg1, symbol', syntax ]
symbols.append('symbol')
return (tpe, insts)
# Need to update
def word_branch(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_BR
insts = [ syntax ]
return (tpe, insts)
def word_ret(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CF_RET
if syntax == 'mret': epc = 'mepc'
elif syntax == 'sret': epc = 'sepc'
else: epc = 'uepc'
insts = [ 'la xreg0, symbol',
'csrrw zero, {}, xreg0'.format(epc),
syntax ]
xregs.append('xreg0')
symbols.append('symbol')
return (tpe, insts)
def word_mem_r(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_R
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_mem_w(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol' ] + mask_addr + [ syntax ]
symbols.append('symbol')
return (tpe, insts)
def word_atomic(opcode, syntax, xregs, fregs, imms, symbols):
tpe = MEM_W
rand = random.random()
if rand < 0.1:
mask_addr = [ 'lui xreg2, 0xffe00',
'xor xreg1, xreg1, xreg2' ]
xregs.append('xreg2')
else:
mask_addr = []
insts = [ 'la xreg1, symbol',
'addi xreg1, xreg1, imm6' ] + \
mask_addr + \
[ syntax ]
if opcode in rv64.keys():
imms.append(('imm6', 8))
else:
imms.append(('imm6', 4))
symbols.append('symbol')
return (tpe, insts)
def word_csr_r(opcode, syntax, xregs, fregs, imms, symbols):
csr = random.choice(csr_names)
if 'pmpaddr' in csr:
tpe = MEM_R
insts = [ 'la xreg1, symbol',
'srai xreg1, xreg1, 1',
syntax.format(csr) ]
symbols.append('symbol')
else:
tpe = CSR
insts = [ 'xor xreg1, xreg1, xreg1']
for i in range(random.randint(0, 3)):
set_bits = random.choice([1, 3])
offset = random.randint(0, 31)
insts = insts + \
['addi xreg{}, zero, {}'.format(i+2, set_bits),
'slli xreg{}, xreg{}, {}'.format(i+2, i+2, offset),
'add xreg1, xreg1, xreg{}'.format(i+2)
]
xregs.append('xreg{}'.format(i+2))
insts.append(syntax.format(csr))
return (tpe, insts)
def word_csr_i(opcode, syntax, xregs, fregs, imms, symbols):
tpe = CSR
csr = random.choice(csr_names)
insts = [ syntax.format(csr) ]
return (tpe, insts)
def word_sfence(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
pt_symbol = random.choice([ 'pt0', 'pt1', 'pt2', 'pt3' ])
imms += [ ('uimm1', 1), ('uimm6', 8) ]
insts = [ 'li xreg0, uimm1',
'la xreg1, {}'.format(pt_symbol),
'addi xreg1, xreg1, uimm6' ] + \
[ syntax ]
return (tpe, insts)
def word_fp(opcode, syntax, xregs, fregs, imms, symbols):
tpe = NONE
# rm = random.choice([ 'rne', 'rtz', 'rdn',
# 'rup', 'rmm', 'dyn'])
# Unset rounding mode testing
rm = 'rne'
insts = [ syntax.format(rm) ]
return (tpe, insts)
""" Opcodes_words
Dictionary of opcodes - word generation functions
to handle opcodes which need special instructions
"""
opcodes_words = {
'jal': (['jal'], word_jal),
'jalr': (['jalr'], word_jalr),
'branch': (list(rv32i_btype.keys()), word_branch),
'ret': (['mret', 'sret', 'uret'], word_ret),
'mem_r': (['lb', 'lh', 'lw', 'ld', 'lbu', 'lhu', 'lwu', \
'flw', 'fld', 'flq'], word_mem_r),
'mem_w': (['sb', 'sh', 'sw', 'sd', 'fsw', 'fsd', 'fsq'], word_mem_w),
'atomic': (list(rv32a.keys()) + list(rv64a.keys()), word_atomic),
'csr_r': (['csrrw', 'csrrs', 'csrrc'], word_csr_r),
'csr_i': (['csrrwi', 'csrrsi', 'csrrci'], word_csr_i),
'sfence': (['sfence.vma'], word_sfence),
'fp': (list(rv32f.keys()) + list(rv64f.keys()) + list(rv32d.keys()) + \
list(rv64d.keys()) + list(rv32q.keys()) + list(rv64q.keys()),
word_fp)
}
| 28.525097 | 116 | 0.525311 | [
"BSD-3-Clause"
] | compsec-snu/difuzz-rtl | Fuzzer/src/word.py | 7,388 | Python |
"""URLs for the ``django-frequently`` application."""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$',
views.EntryCategoryListView.as_view(),
name='frequently_list'),
url(r'^your-question/$',
views.EntryCreateView.as_view(),
name='frequently_submit_question'),
url(r'^(?P<slug>[a-z-0-9]+)/$',
views.EntryDetailView.as_view(),
name='frequently_entry_detail'),
]
| 22.85 | 53 | 0.625821 | [
"MIT"
] | bitlabstudio/django-frequently | frequently/urls.py | 457 | Python |
# Support various prediction methods for predicting cluster membership
# of new or unseen points. There are several ways to interpret how
# to do this correctly, so we provide several methods for
# the different use cases that may arise.
import numpy as np
from sklearn.neighbors import KDTree, BallTree
from .dist_metrics import DistanceMetric
from ._hdbscan_tree import compute_stability, labelling_at_cut, recurse_leaf_dfs
from ._prediction_utils import (get_tree_row_with_child,
dist_membership_vector,
outlier_membership_vector,
prob_in_some_cluster,
all_points_dist_membership_vector,
all_points_outlier_membership_vector,
all_points_prob_in_some_cluster)
from warnings import warn
class PredictionData(object):
"""
Extra data that allows for faster prediction if cached.
Parameters
----------
data : array (n_samples, n_features)
The original data set that was clustered
condensed_tree : CondensedTree
The condensed tree object created by a clustering
min_samples : int
The min_samples value used in clustering
tree_type : string, optional
Which type of space tree to use for core distance computation.
One of:
* ``kdtree``
* ``balltree``
metric : string, optional
The metric used to determine distance for the clustering.
This is the metric that will be used for the space tree to determine
core distances etc.
**kwargs :
Any further arguments to the metric.
Attributes
----------
raw_data : array (n_samples, n_features)
The original data set that was clustered
tree : KDTree or BallTree
A space partitioning tree that can be queried for nearest neighbors.
core_distances : array (n_samples,)
The core distances for every point in the original data set.
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
cluster_tree : structured array
A version of the condensed tree that only contains clusters, not
individual points.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
"""
_tree_type_map = {'kdtree': KDTree, 'balltree': BallTree}
def _clusters_below(self, cluster):
result = []
to_process = [cluster]
while to_process:
result.extend(to_process)
to_process = \
self.cluster_tree['child'][np.in1d(self.cluster_tree['parent'],
to_process)]
to_process = to_process.tolist()
return result
def _recurse_leaf_dfs(self, current_node):
children = self.cluster_tree[self.cluster_tree['parent'] ==
current_node]['child']
if len(children) == 0:
return [current_node, ]
else:
return sum(
[recurse_leaf_dfs(self.cluster_tree, child) for child in children], [])
def __init__(self, data, condensed_tree, min_samples,
tree_type='kdtree', metric='euclidean', **kwargs):
self.raw_data = data
self.tree = self._tree_type_map[tree_type](self.raw_data,
metric=metric, **kwargs)
self.core_distances = self.tree.query(data, k=min_samples)[0][:, -1]
self.dist_metric = DistanceMetric.get_metric(metric, **kwargs)
selected_clusters = condensed_tree._select_clusters()
# raw_condensed_tree = condensed_tree.to_numpy()
raw_condensed_tree = condensed_tree._raw_tree
self.cluster_map = {c: n for n, c in enumerate(sorted(list(selected_clusters)))}
self.reverse_cluster_map = {n: c for c, n in self.cluster_map.items()}
self.cluster_tree = raw_condensed_tree[raw_condensed_tree['child_size']
> 1]
self.max_lambdas = {}
self.leaf_max_lambdas = {}
self.exemplars = []
all_clusters = set(np.hstack([self.cluster_tree['parent'],
self.cluster_tree['child']]))
for cluster in all_clusters:
self.leaf_max_lambdas[cluster] = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == cluster].max()
for cluster in selected_clusters:
self.max_lambdas[cluster] = \
raw_condensed_tree['lambda_val'][raw_condensed_tree['parent']
== cluster].max()
for sub_cluster in self._clusters_below(cluster):
self.cluster_map[sub_cluster] = self.cluster_map[cluster]
self.max_lambdas[sub_cluster] = self.max_lambdas[cluster]
cluster_exemplars = np.array([], dtype=np.int64)
for leaf in self._recurse_leaf_dfs(cluster):
leaf_max_lambda = raw_condensed_tree['lambda_val'][
raw_condensed_tree['parent'] == leaf].max()
points = raw_condensed_tree['child'][
(raw_condensed_tree['parent'] == leaf) &
(raw_condensed_tree['lambda_val'] == leaf_max_lambda)]
cluster_exemplars = np.hstack([cluster_exemplars, points])
self.exemplars.append(self.raw_data[cluster_exemplars])
def _find_neighbor_and_lambda(neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Find the nearest mutual reachability neighbor of a point, and compute
the associated lambda value for the point, given the mutual reachability
distance to a nearest neighbor.
Parameters
----------
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
neighbor : int
The index into the full raw data set of the nearest mutual reachability
distance neighbor of the point.
lambda_ : float
The lambda value at which this point joins/merges with `neighbor`.
"""
neighbor_core_distances = core_distances[neighbor_indices]
point_core_distances = neighbor_distances[min_samples] * np.ones(
neighbor_indices.shape[0])
mr_distances = np.vstack((
neighbor_core_distances,
point_core_distances,
neighbor_distances
)).max(axis=0)
nn_index = mr_distances.argmin()
nearest_neighbor = neighbor_indices[nn_index]
if mr_distances[nn_index] > 0.0:
lambda_ = 1. / mr_distances[nn_index]
else:
lambda_ = np.finfo(np.double).max
return nearest_neighbor, lambda_
def _extend_condensed_tree(tree, neighbor_indices, neighbor_distances,
core_distances, min_samples):
"""
Create a new condensed tree with an additional point added, allowing for
computations as if this point had been part of the original tree. Note
that this makes as little change to the tree as possible, with no
re-optimizing/re-condensing so that the selected clusters remain
effectively unchanged.
Parameters
----------
tree : structured array
The raw format condensed tree to update.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
min_samples : int
The min_samples value used to generate core distances.
Returns
-------
new_tree : structured array
The original tree with an extra row providing the parent cluster
and lambda information for a new point given index -1.
"""
tree_root = tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] <= lambda_:
# New point departs with the old
new_tree_row = (potential_cluster, -1, 1,
neighbor_tree_row['lambda_val'])
else:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
tree[tree['child'] ==
potential_cluster]['lambda_val'] >= lambda_:
potential_cluster = tree['parent'][tree['child']
== potential_cluster][0]
new_tree_row = (potential_cluster, -1, 1, lambda_)
return np.append(tree, new_tree_row)
def _find_cluster_and_probability(tree, cluster_tree, neighbor_indices,
neighbor_distances, core_distances,
cluster_map, max_lambdas,
min_samples):
"""
Return the cluster label (of the original clustering) and membership
probability of a new data point.
Parameters
----------
tree : CondensedTree
The condensed tree associated with the clustering.
cluster_tree : structured_array
The raw form of the condensed tree with only cluster information (no
data on individual points). This is significantly more compact.
neighbor_indices : array (2 * min_samples, )
An array of raw distance based nearest neighbor indices.
neighbor_distances : array (2 * min_samples, )
An array of raw distances to the nearest neighbors.
core_distances : array (n_samples, )
An array of core distances for all points
cluster_map : dict
A dictionary mapping cluster numbers in the condensed tree to labels
in the final selected clustering.
max_lambdas : dict
A dictionary mapping cluster numbers in the condensed tree to the
maximum lambda value seen in that cluster.
min_samples : int
The min_samples value used to generate core distances.
"""
raw_tree = tree._raw_tree
tree_root = cluster_tree['parent'].min()
nearest_neighbor, lambda_ = _find_neighbor_and_lambda(neighbor_indices,
neighbor_distances,
core_distances,
min_samples
)
neighbor_tree_row = get_tree_row_with_child(raw_tree, nearest_neighbor)
potential_cluster = neighbor_tree_row['parent']
if neighbor_tree_row['lambda_val'] > lambda_:
# Find appropriate cluster based on lambda of new point
while potential_cluster > tree_root and \
cluster_tree['lambda_val'][cluster_tree['child']
== potential_cluster] >= lambda_:
potential_cluster = cluster_tree['parent'][cluster_tree['child']
== potential_cluster][0]
if potential_cluster in cluster_map:
cluster_label = cluster_map[potential_cluster]
else:
cluster_label = -1
if cluster_label >= 0:
max_lambda = max_lambdas[potential_cluster]
if max_lambda > 0.0:
lambda_ = min(max_lambda, lambda_)
prob = (lambda_ / max_lambda)
else:
prob = 1.0
else:
prob = 0.0
return cluster_label, prob
def approximate_predict(clusterer, points_to_predict):
"""Predict the cluster label of new points. The returned labels
will be those of the original clustering found by ``clusterer``,
and therefore are not (necessarily) the cluster labels that would
be found by clustering the original data combined with
``points_to_predict``, hence the 'approximate' label.
If you simply wish to assign new points to an existing clustering
in the 'best' way possible, this is the function to use. If you
want to predict how ``points_to_predict`` would cluster with
the original data under HDBSCAN the most efficient existing approach
is to simply recluster with the new point(s) added to the original dataset.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
labels : array (n_samples,)
The predicted labels of the ``points_to_predict``
probabilities : array (n_samples,)
The soft cluster scores for each of the ``points_to_predict``
See Also
--------
:py:func:`hdbscan.predict.membership_vector`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
if clusterer.prediction_data_ is None:
raise ValueError('Clusterer does not have prediction data!'
' Try fitting with prediction_data=True set,'
' or run generate_prediction_data on the clusterer')
points_to_predict = np.asarray(points_to_predict)
if points_to_predict.shape[1] != \
clusterer.prediction_data_.raw_data.shape[1]:
raise ValueError('New points dimension does not match fit data!')
if clusterer.prediction_data_.cluster_tree.shape[0] == 0:
warn('Clusterer does not have any defined clusters, new data'
' will be automatically predicted as noise.')
labels = -1 * np.ones(points_to_predict.shape[0], dtype=np.int32)
probabilities = np.zeros(points_to_predict.shape[0], dtype=np.float32)
return labels, probabilities
labels = np.empty(points_to_predict.shape[0], dtype=np.int)
probabilities = np.empty(points_to_predict.shape[0], dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
label, prob = _find_cluster_and_probability(
clusterer.condensed_tree_,
clusterer.prediction_data_.cluster_tree,
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
clusterer.prediction_data_.cluster_map,
clusterer.prediction_data_.max_lambdas,
min_samples
)
labels[i] = label
probabilities[i] = prob
return labels, probabilities
def membership_vector(clusterer, points_to_predict):
"""Predict soft cluster membership. The result produces a vector
for each point in ``points_to_predict`` that gives a probability that
the given point is a member of a cluster for each of the selected clusters
of the ``clusterer``.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
points_to_predict : array, or array-like (n_samples, n_features)
The new data points to predict cluster labels for. They should
have the same dimensionality as the original dataset over which
clusterer was fit.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` is a member of cluster ``j`` is
in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(
sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
result = np.empty((points_to_predict.shape[0], clusters.shape[0]),
dtype=np.float64)
min_samples = clusterer.min_samples or clusterer.min_cluster_size
neighbor_distances, neighbor_indices = \
clusterer.prediction_data_.tree.query(points_to_predict,
k=2 * min_samples)
for i in range(points_to_predict.shape[0]):
# We need to find where in the tree the new point would go
# for the purposes of outlier membership approximation
nearest_neighbor, lambda_ = \
_find_neighbor_and_lambda(
neighbor_indices[i],
neighbor_distances[i],
clusterer.prediction_data_.core_distances,
min_samples)
neighbor_tree_row = get_tree_row_with_child(
clusterer.condensed_tree_._raw_tree, nearest_neighbor)
if neighbor_tree_row['lambda_val'] <= lambda_:
lambda_ = neighbor_tree_row['lambda_val']
distance_vec = dist_membership_vector(
points_to_predict[i],
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vec = outlier_membership_vector(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result[i] = distance_vec ** 0.5 * outlier_vec ** 2.0
result[i] /= result[i].sum()
result[i] *= prob_in_some_cluster(
nearest_neighbor,
lambda_,
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
return result
def all_points_membership_vectors(clusterer):
"""Predict soft cluster membership vectors for all points in the
original dataset the clusterer was trained on. This function is more
efficient by making use of the fact that all points are already in the
condensed tree, and processing in bulk.
Parameters
----------
clusterer : HDBSCAN
A clustering object that has been fit to the data and
either had ``prediction_data=True`` set, or called the
``generate_prediction_data`` method after the fact.
This method does not work if the clusterer was trained
with ``metric='precomputed'``.
Returns
-------
membership_vectors : array (n_samples, n_clusters)
The probability that point ``i`` of the original dataset is a member of
cluster ``j`` is in ``membership_vectors[i, j]``.
See Also
--------
:py:func:`hdbscan.predict.predict`
:py:func:`hdbscan.predict.all_points_membership_vectors`
"""
clusters = np.array(sorted(list(clusterer.condensed_tree_._select_clusters()))).astype(np.intp)
all_points = clusterer.prediction_data_.raw_data
# When no clusters found, return array of 0's
if clusters.size == 0:
return np.zeros(all_points.shape[0])
distance_vecs = all_points_dist_membership_vector(
all_points,
clusterer.prediction_data_.exemplars,
clusterer.prediction_data_.dist_metric)
outlier_vecs = all_points_outlier_membership_vector(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
in_cluster_probs = all_points_prob_in_some_cluster(
clusters,
clusterer.condensed_tree_._raw_tree,
clusterer.prediction_data_.leaf_max_lambdas,
clusterer.prediction_data_.cluster_tree)
result = distance_vecs * outlier_vecs
row_sums = result.sum(axis=1)
result = result / row_sums[:, np.newaxis]
result *= in_cluster_probs[:, np.newaxis]
return result
| 37.967509 | 99 | 0.636256 | [
"BSD-3-Clause"
] | CKrawczyk/hdbscan | hdbscan/prediction.py | 21,034 | Python |
"""
Views for PubSite app.
"""
from django.conf import settings
from django.contrib.auth.views import (
PasswordResetView,
PasswordResetDoneView,
PasswordResetConfirmView,
PasswordResetCompleteView,
)
from django.shortcuts import render
import requests
import logging
logger = logging.getLogger(__name__)
def _get_context(page_name):
return {
"pages": settings.PUBLIC_PAGES,
"current_page_name": page_name,
}
# Regular index
# def index(request):
# """
# View for the static index page
# """
# return render(request, 'public/home.html', _get_context('Home'))
def index(request):
"""
View for the static index page
"""
return render(request, "public/home.html", _get_context("Home"))
def about(request):
"""
View for the static chapter history page.
"""
return render(request, "public/about.html", _get_context("About"))
def activities(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/activities.html",
_get_context("Service & Activities"),
)
def rush(request):
"""
View for the static chapter service page.
"""
return render(
request,
"public/rush.html",
_get_context("Rush"),
)
def campaign(request):
"""
View for the campaign service page.
"""
# Overrride requests Session authentication handling
class NoRebuildAuthSession(requests.Session):
def rebuild_auth(self, prepared_request, response):
"""
No code here means requests will always preserve the Authorization
header when redirected.
Be careful not to leak your credentials to untrusted hosts!
"""
url = "https://api.givebutter.com/v1/transactions/"
headers = {"Authorization": f"Bearer {settings.GIVEBUTTER_API_KEY}"}
response = None
# Create custom requests session
session = NoRebuildAuthSession()
# Make GET request to server, timeout in seconds
try:
r = session.get(url, headers=headers, timeout=0.75)
if r.status_code == 200:
response = r.json()
else:
logger.error(f"ERROR in request: {r.status_code}")
except requests.exceptions.Timeout:
logger.warning("Connection to GiveButter API Timed out")
except requests.ConnectionError:
logger.warning("Connection to GiveButter API could not be resolved")
except requests.exceptions.RequestException:
logger.error(
"An unknown issue occurred while trying to retrieve GiveButter Donor List"
)
# Grab context object to use later
ctx = _get_context("Campaign")
# Check for successful response, if so - filter, sort, and format data
if response and "data" in response:
response = response["data"] # Pull data from GET response object
logger.debug(f"GiveButter API Response: {response}")
# Filter by only successful transactions, then sort by amount descending
successful_txs = [tx for tx in response if tx["status"] == "succeeded"]
sorted_txs = sorted(successful_txs, key=lambda tx: tx["amount"], reverse=True)
# Clean data to a list of dictionaries & remove unnecessary data
transactions = [
{
"name": tx["giving_space"]["name"],
"amount": tx["giving_space"]["amount"],
"message": tx["giving_space"]["message"],
}
for tx in sorted_txs[:20]
]
# Attach transaction dictionary & length to context object
ctx["transactions"] = transactions
ctx["num_txs"] = len(successful_txs)
return render(
request,
"public/campaign.html",
ctx,
)
def permission_denied(request):
"""
View for 403 (Permission Denied) error.
"""
return render(
request,
"common/403.html",
_get_context("Permission Denied"),
)
def handler404(request, exception):
""" """
return render(request, "common/404.html", _get_context("Page Not Found"))
class ResetPassword(PasswordResetView):
template_name = "password_reset/password_reset_form.html"
class ResetPasswordDone(PasswordResetDoneView):
template_name = "password_reset/password_reset_done.html"
class ResetPasswordConfirm(PasswordResetConfirmView):
template_name = "password_reset/password_reset_confirm.html"
class ResetPasswordComplete(PasswordResetCompleteView):
template_name = "password_reset/password_reset_complete.html"
| 27.440476 | 86 | 0.652061 | [
"MIT"
] | Jacobvs/sigmapi-web | sigmapiweb/apps/PubSite/views.py | 4,610 | Python |
import requests
import datetime
class BearerAuth(requests.auth.AuthBase):
def __init__(self, token):
self.token = token
def __call__(self, r):
r.headers["authorization"] = "Bearer " + self.token
return r
class MintMobile:
def __init__(self, phone_number, password):
self.phone_number = phone_number
self.password = password
self.token=""
self.id=""
self.family_members=[]
self.info={}
def login(self):
#print("Logging Into " + self.phone_number)
r=requests.post('https://w3b-api.ultramobile.com/v1/mint/login?', json = {"msisdn":self.phone_number,"password":self.password})
if r.status_code == 200:
response=r.json()
self.id=response['id']
self.token=response['token']
self.info[self.id]={"phone_number":self.phone_number}
self.master_account_details()
return True
else:
return False
def master_account_details(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'?', auth=BearerAuth(str(self.token)))
response=r.json()
self.info[self.id]['line_name']=response['firstName']
self.info[self.id]['endOfCycle']=self.epoch_days_remaining(response['plan']['endOfCycle'])
self.info[self.id]['months']=response['plan']['months']
self.info[self.id]['exp']=self.epoch_days_remaining(response['plan']['exp'])
def data_remaining(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/data?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['remaining4G'])
self.info[self.id]['remaining4G']=response['remaining4G_GB']
return self.info
def conv_MB_to_GB(self,input_megabyte):
gigabyte = 1.0/1024
convert_gb = gigabyte * input_megabyte
convert_gb=round(convert_gb, 2)
return convert_gb
def epoch_days_remaining(self,epoch):
dt1 = datetime.datetime.fromtimestamp(epoch)
dt2 = datetime.datetime.now()
delta = dt1 - dt2
return delta.days
def get_family_members(self):
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+str(self.id)+'/multi-line?', auth=BearerAuth(str(self.token)))
response=r.json()
for activeMembers in response['activeMembers']:
self.family_members.append(activeMembers['id'])
self.info[activeMembers['id']]={}
#self.info[activeMembers['id']]={"phone_number":activeMembers['msisdn'],"line_name":activeMembers['nickName']}
self.info[activeMembers['id']]["phone_number"]=activeMembers['msisdn']
self.info[activeMembers['id']]["line_name"]=activeMembers['nickName']
self.info[activeMembers['id']]["endOfCycle"]=self.epoch_days_remaining(activeMembers['currentPlan']["rechargeDate"])
self.info[activeMembers['id']]["months"]=activeMembers['currentPlan']["duration"]
self.info[activeMembers['id']]["exp"]=self.epoch_days_remaining(activeMembers['nextPlan']["renewalDate"])
self.family_data_remaining()
def family_data_remaining(self):
for member in self.family_members:
r=requests.get('https://w3b-api.ultramobile.com/v1/mint/account/'+self.id+'/multi-line/'+member+'/usage?', auth=BearerAuth(str(self.token)))
response=r.json()
response['remaining4G_GB']=self.conv_MB_to_GB(response['data']['remaining4G'])
self.info[member]['remaining4G']=response['remaining4G_GB']
def get_all_data_remaining(self):
self.login()
self.data_remaining()
self.get_family_members()
return self.info
def lines(self):
self.login()
self.get_family_members()
return self.info.keys()
| 41.347368 | 152 | 0.643075 | [
"MIT"
] | KTibow/HA-Mint-Mobile | custom_components/mintmobile/api.py | 3,928 | Python |
import argparse
import json
import insomniac.__version__ as __version__
from insomniac import network
from insomniac.activation import activation_controller
from insomniac.network import HTTP_OK
from insomniac.params import parse_arguments
from insomniac.utils import *
def run(activation_code="", starter_conf_file_path=None):
if not __version__.__debug_mode__:
print_timeless(COLOR_OKGREEN + __version__.__logo__ + COLOR_ENDC)
print_version()
activation_code_from_args = _get_activation_code_from_args()
if activation_code_from_args is not None:
activation_code = activation_code_from_args
activation_controller.validate(activation_code)
if not activation_controller.is_activated:
from insomniac.session import InsomniacSession
print_timeless("Using insomniac session-manager without extra-features")
insomniac_session = InsomniacSession(starter_conf_file_path)
else:
from insomniac.extra_features.session import ExtendedInsomniacSession
insomniac_session = ExtendedInsomniacSession(starter_conf_file_path)
insomniac_session.run()
def is_newer_version_available():
def versiontuple(v):
return tuple(map(int, (v.split("."))))
current_version = __version__.__version__
latest_version = _get_latest_version('insomniac')
if latest_version is not None and versiontuple(latest_version) > versiontuple(current_version):
return True, latest_version
return False, None
def print_version():
print_timeless_ui(COLOR_HEADER + f"Engine v{__version__.__version__}" + COLOR_ENDC)
is_new_version_available, latest_version = is_newer_version_available()
if is_new_version_available and insomniac_globals.is_insomniac():
print_timeless(COLOR_HEADER + f"Newer version is available (v{latest_version}). Please run" + COLOR_ENDC)
print_timeless(COLOR_HEADER + COLOR_BOLD + "python3 -m pip install insomniac --upgrade" + COLOR_ENDC)
print_timeless("")
def _get_latest_version(package):
latest_version = None
code, body, _ = network.get(f"https://pypi.python.org/pypi/{package}/json")
if code == HTTP_OK and body is not None:
json_package = json.loads(body)
latest_version = json_package['info']['version']
return latest_version
def _get_activation_code_from_args():
parser = ArgumentParser(add_help=False)
parser.add_argument('--activation-code')
try:
args, _ = parser.parse_known_args()
except (argparse.ArgumentError, TypeError):
return None
return args.activation_code
class ArgumentParser(argparse.ArgumentParser):
def error(self, message):
pass
| 34.922078 | 113 | 0.755299 | [
"MIT"
] | felipe4334/Insomniac | insomniac/__init__.py | 2,689 | Python |
from marshmallow import fields
from .base import BaseSchema
class ScheduledAnalysisSchema(BaseSchema):
analysis_system_instance = fields.Url(required=True)
sample = fields.Url(required=True)
analysis_scheduled = fields.DateTime(required=True)
priority = fields.Int(required=True)
| 27.181818 | 56 | 0.782609 | [
"MIT"
] | mass-project/mass_api_client | mass_api_client/schemas/scheduled_analysis.py | 299 | Python |
import re
class Normalizer:
"""Normalizer return the text replaced with 'repl'.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
Args:
url_repl (str): replace all urls in text with this
tag_repl (str): replace all tags in text with this
emoji_repl (str): replace all emojis in text with this
email_repl (str): replace all emails in text with this
tel_repl (str): replace all tels in text with this
"""
def __init__(self, url_repl='[URL]', tag_repl='[TAG]', emoji_repl='[EMOJI]', email_repl='[EMAIL]', tel_repl='[TEL]'):
# repls
self.url_repl = url_repl
self.tag_repl = tag_repl
self.emoji_repl = emoji_repl
self.email_repl = email_repl
self.tel_repl = tel_repl
self._normalize = []
self._init_normalize()
def normalize(self, text: str) -> str:
"""Normalize text.
Args:
text (str): text to be normalized
"""
for normalize_fn, repl in self._normalize:
text = normalize_fn(text, repl)
return text
def _init_normalize(self) -> None:
"""Initialize normalize function.
If 'repl' is None, normalization is not applied to the pattern corresponding to 'repl'.
"""
if self.url_repl is not None:
self._normalize.append((self._url_normalize, self.url_repl))
if self.tag_repl is not None:
self._normalize.append((self._tag_normalize, self.tag_repl))
if self.emoji_repl is not None:
self._normalize.append((self._emoji_normalize, self.emoji_repl))
if self.email_repl is not None:
self._normalize.append((self._email_normalize, self.email_repl))
if self.tel_repl is not None:
self._normalize.append((self._tel_normalize, self.tel_repl))
def _url_normalize(self, text: str, repl: str, regex=re.compile(r'(https?|ftp|www)\S+')) -> str:
"""Return the string obtained by replacing all urls in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all urls in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tag_normalize(self, text: str, repl: str, regex=re.compile(r'<[^>]*>')) -> str:
"""Return the string obtained by replacing all HTML tags in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all HTML tags in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _emoji_normalize(self, text: str, repl: str, regex=re.compile(r'\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f48b\u200d\U0001f469|\U0001f9d1\U0001f3fb\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff\u200d\U0001f91d\u200d\U0001f9d1\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f469\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fb\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3ff|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f469\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f468\U0001f3fc\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fd\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3fe\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fb|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fc|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fd|\U0001f468\U0001f3ff\u200d\U0001f91d\u200d\U0001f468\U0001f3fe|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f48b\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f48b\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f3f4\U000e0067\U000e0062\U000e0065\U000e006e\U000e0067\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0073\U000e0063\U000e0074\U000e007f|\U0001f3f4\U000e0067\U000e0062\U000e0077\U000e006c\U000e0073\U000e007f|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f468\u200d\u2764\ufe0f\u200d\U0001f468|\U0001f469\u200d\u2764\ufe0f\u200d\U0001f469|\U0001f441\ufe0f\u200d\U0001f5e8\ufe0f|\U0001f471\U0001f3fb\u200d\u2642\ufe0f|\U0001f471\U0001f3fc\u200d\u2642\ufe0f|\U0001f471\U0001f3fd\u200d\u2642\ufe0f|\U0001f471\U0001f3fe\u200d\u2642\ufe0f|\U0001f471\U0001f3ff\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2640\ufe0f|\U0001f471\U0001f3fc\u200d\u2640\ufe0f|\U0001f471\U0001f3fd\u200d\u2640\ufe0f|\U0001f471\U0001f3fe\u200d\u2640\ufe0f|\U0001f471\U0001f3ff\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642\ufe0f|\U0001f64d\U0001f3fc\u200d\u2642\ufe0f|\U0001f64d\U0001f3fd\u200d\u2642\ufe0f|\U0001f64d\U0001f3fe\u200d\u2642\ufe0f|\U0001f64d\U0001f3ff\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640\ufe0f|\U0001f64d\U0001f3fc\u200d\u2640\ufe0f|\U0001f64d\U0001f3fd\u200d\u2640\ufe0f|\U0001f64d\U0001f3fe\u200d\u2640\ufe0f|\U0001f64d\U0001f3ff\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642\ufe0f|\U0001f64e\U0001f3fc\u200d\u2642\ufe0f|\U0001f64e\U0001f3fd\u200d\u2642\ufe0f|\U0001f64e\U0001f3fe\u200d\u2642\ufe0f|\U0001f64e\U0001f3ff\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640\ufe0f|\U0001f64e\U0001f3fc\u200d\u2640\ufe0f|\U0001f64e\U0001f3fd\u200d\u2640\ufe0f|\U0001f64e\U0001f3fe\u200d\u2640\ufe0f|\U0001f64e\U0001f3ff\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2642\ufe0f|\U0001f645\U0001f3fc\u200d\u2642\ufe0f|\U0001f645\U0001f3fd\u200d\u2642\ufe0f|\U0001f645\U0001f3fe\u200d\u2642\ufe0f|\U0001f645\U0001f3ff\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2640\ufe0f|\U0001f645\U0001f3fc\u200d\u2640\ufe0f|\U0001f645\U0001f3fd\u200d\u2640\ufe0f|\U0001f645\U0001f3fe\u200d\u2640\ufe0f|\U0001f645\U0001f3ff\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2642\ufe0f|\U0001f646\U0001f3fc\u200d\u2642\ufe0f|\U0001f646\U0001f3fd\u200d\u2642\ufe0f|\U0001f646\U0001f3fe\u200d\u2642\ufe0f|\U0001f646\U0001f3ff\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2640\ufe0f|\U0001f646\U0001f3fc\u200d\u2640\ufe0f|\U0001f646\U0001f3fd\u200d\u2640\ufe0f|\U0001f646\U0001f3fe\u200d\u2640\ufe0f|\U0001f646\U0001f3ff\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2642\ufe0f|\U0001f481\U0001f3fc\u200d\u2642\ufe0f|\U0001f481\U0001f3fd\u200d\u2642\ufe0f|\U0001f481\U0001f3fe\u200d\u2642\ufe0f|\U0001f481\U0001f3ff\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2640\ufe0f|\U0001f481\U0001f3fc\u200d\u2640\ufe0f|\U0001f481\U0001f3fd\u200d\u2640\ufe0f|\U0001f481\U0001f3fe\u200d\u2640\ufe0f|\U0001f481\U0001f3ff\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642\ufe0f|\U0001f64b\U0001f3fc\u200d\u2642\ufe0f|\U0001f64b\U0001f3fd\u200d\u2642\ufe0f|\U0001f64b\U0001f3fe\u200d\u2642\ufe0f|\U0001f64b\U0001f3ff\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640\ufe0f|\U0001f64b\U0001f3fc\u200d\u2640\ufe0f|\U0001f64b\U0001f3fd\u200d\u2640\ufe0f|\U0001f64b\U0001f3fe\u200d\u2640\ufe0f|\U0001f64b\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cf\U0001f3ff\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2642\ufe0f|\U0001f647\U0001f3fc\u200d\u2642\ufe0f|\U0001f647\U0001f3fd\u200d\u2642\ufe0f|\U0001f647\U0001f3fe\u200d\u2642\ufe0f|\U0001f647\U0001f3ff\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2640\ufe0f|\U0001f647\U0001f3fc\u200d\u2640\ufe0f|\U0001f647\U0001f3fd\u200d\u2640\ufe0f|\U0001f647\U0001f3fe\u200d\u2640\ufe0f|\U0001f647\U0001f3ff\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2642\ufe0f|\U0001f926\U0001f3fc\u200d\u2642\ufe0f|\U0001f926\U0001f3fd\u200d\u2642\ufe0f|\U0001f926\U0001f3fe\u200d\u2642\ufe0f|\U0001f926\U0001f3ff\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2640\ufe0f|\U0001f926\U0001f3fc\u200d\u2640\ufe0f|\U0001f926\U0001f3fd\u200d\u2640\ufe0f|\U0001f926\U0001f3fe\u200d\u2640\ufe0f|\U0001f926\U0001f3ff\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2642\ufe0f|\U0001f937\U0001f3fc\u200d\u2642\ufe0f|\U0001f937\U0001f3fd\u200d\u2642\ufe0f|\U0001f937\U0001f3fe\u200d\u2642\ufe0f|\U0001f937\U0001f3ff\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2640\ufe0f|\U0001f937\U0001f3fc\u200d\u2640\ufe0f|\U0001f937\U0001f3fd\u200d\u2640\ufe0f|\U0001f937\U0001f3fe\u200d\u2640\ufe0f|\U0001f937\U0001f3ff\u200d\u2640\ufe0f|\U0001f468\U0001f3fb\u200d\u2695\ufe0f|\U0001f468\U0001f3fc\u200d\u2695\ufe0f|\U0001f468\U0001f3fd\u200d\u2695\ufe0f|\U0001f468\U0001f3fe\u200d\u2695\ufe0f|\U0001f468\U0001f3ff\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695\ufe0f|\U0001f469\U0001f3fc\u200d\u2695\ufe0f|\U0001f469\U0001f3fd\u200d\u2695\ufe0f|\U0001f469\U0001f3fe\u200d\u2695\ufe0f|\U0001f469\U0001f3ff\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2696\ufe0f|\U0001f468\U0001f3fc\u200d\u2696\ufe0f|\U0001f468\U0001f3fd\u200d\u2696\ufe0f|\U0001f468\U0001f3fe\u200d\u2696\ufe0f|\U0001f468\U0001f3ff\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696\ufe0f|\U0001f469\U0001f3fc\u200d\u2696\ufe0f|\U0001f469\U0001f3fd\u200d\u2696\ufe0f|\U0001f469\U0001f3fe\u200d\u2696\ufe0f|\U0001f469\U0001f3ff\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2708\ufe0f|\U0001f468\U0001f3fc\u200d\u2708\ufe0f|\U0001f468\U0001f3fd\u200d\u2708\ufe0f|\U0001f468\U0001f3fe\u200d\u2708\ufe0f|\U0001f468\U0001f3ff\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708\ufe0f|\U0001f469\U0001f3fc\u200d\u2708\ufe0f|\U0001f469\U0001f3fd\u200d\u2708\ufe0f|\U0001f469\U0001f3fe\u200d\u2708\ufe0f|\U0001f469\U0001f3ff\u200d\u2708\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642\ufe0f|\U0001f46e\U0001f3fc\u200d\u2642\ufe0f|\U0001f46e\U0001f3fd\u200d\u2642\ufe0f|\U0001f46e\U0001f3fe\u200d\u2642\ufe0f|\U0001f46e\U0001f3ff\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640\ufe0f|\U0001f46e\U0001f3fc\u200d\u2640\ufe0f|\U0001f46e\U0001f3fd\u200d\u2640\ufe0f|\U0001f46e\U0001f3fe\u200d\u2640\ufe0f|\U0001f46e\U0001f3ff\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2642\ufe0f|\U0001f575\U0001f3fb\u200d\u2642\ufe0f|\U0001f575\U0001f3fc\u200d\u2642\ufe0f|\U0001f575\U0001f3fd\u200d\u2642\ufe0f|\U0001f575\U0001f3fe\u200d\u2642\ufe0f|\U0001f575\U0001f3ff\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2640\ufe0f|\U0001f575\U0001f3fb\u200d\u2640\ufe0f|\U0001f575\U0001f3fc\u200d\u2640\ufe0f|\U0001f575\U0001f3fd\u200d\u2640\ufe0f|\U0001f575\U0001f3fe\u200d\u2640\ufe0f|\U0001f575\U0001f3ff\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2642\ufe0f|\U0001f482\U0001f3fc\u200d\u2642\ufe0f|\U0001f482\U0001f3fd\u200d\u2642\ufe0f|\U0001f482\U0001f3fe\u200d\u2642\ufe0f|\U0001f482\U0001f3ff\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2640\ufe0f|\U0001f482\U0001f3fc\u200d\u2640\ufe0f|\U0001f482\U0001f3fd\u200d\u2640\ufe0f|\U0001f482\U0001f3fe\u200d\u2640\ufe0f|\U0001f482\U0001f3ff\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2642\ufe0f|\U0001f477\U0001f3fc\u200d\u2642\ufe0f|\U0001f477\U0001f3fd\u200d\u2642\ufe0f|\U0001f477\U0001f3fe\u200d\u2642\ufe0f|\U0001f477\U0001f3ff\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2640\ufe0f|\U0001f477\U0001f3fc\u200d\u2640\ufe0f|\U0001f477\U0001f3fd\u200d\u2640\ufe0f|\U0001f477\U0001f3fe\u200d\u2640\ufe0f|\U0001f477\U0001f3ff\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2642\ufe0f|\U0001f473\U0001f3fc\u200d\u2642\ufe0f|\U0001f473\U0001f3fd\u200d\u2642\ufe0f|\U0001f473\U0001f3fe\u200d\u2642\ufe0f|\U0001f473\U0001f3ff\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2640\ufe0f|\U0001f473\U0001f3fc\u200d\u2640\ufe0f|\U0001f473\U0001f3fd\u200d\u2640\ufe0f|\U0001f473\U0001f3fe\u200d\u2640\ufe0f|\U0001f473\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9b9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d9\U0001f3ff\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642\ufe0f|\U0001f9da\U0001f3fc\u200d\u2642\ufe0f|\U0001f9da\U0001f3fd\u200d\u2642\ufe0f|\U0001f9da\U0001f3fe\u200d\u2642\ufe0f|\U0001f9da\U0001f3ff\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640\ufe0f|\U0001f9da\U0001f3fc\u200d\u2640\ufe0f|\U0001f9da\U0001f3fd\u200d\u2640\ufe0f|\U0001f9da\U0001f3fe\u200d\u2640\ufe0f|\U0001f9da\U0001f3ff\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642\ufe0f|\U0001f9db\U0001f3fc\u200d\u2642\ufe0f|\U0001f9db\U0001f3fd\u200d\u2642\ufe0f|\U0001f9db\U0001f3fe\u200d\u2642\ufe0f|\U0001f9db\U0001f3ff\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640\ufe0f|\U0001f9db\U0001f3fc\u200d\u2640\ufe0f|\U0001f9db\U0001f3fd\u200d\u2640\ufe0f|\U0001f9db\U0001f3fe\u200d\u2640\ufe0f|\U0001f9db\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dc\U0001f3ff\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9dd\U0001f3ff\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2642\ufe0f|\U0001f486\U0001f3fc\u200d\u2642\ufe0f|\U0001f486\U0001f3fd\u200d\u2642\ufe0f|\U0001f486\U0001f3fe\u200d\u2642\ufe0f|\U0001f486\U0001f3ff\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2640\ufe0f|\U0001f486\U0001f3fc\u200d\u2640\ufe0f|\U0001f486\U0001f3fd\u200d\u2640\ufe0f|\U0001f486\U0001f3fe\u200d\u2640\ufe0f|\U0001f486\U0001f3ff\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2642\ufe0f|\U0001f487\U0001f3fc\u200d\u2642\ufe0f|\U0001f487\U0001f3fd\u200d\u2642\ufe0f|\U0001f487\U0001f3fe\u200d\u2642\ufe0f|\U0001f487\U0001f3ff\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2640\ufe0f|\U0001f487\U0001f3fc\u200d\u2640\ufe0f|\U0001f487\U0001f3fd\u200d\u2640\ufe0f|\U0001f487\U0001f3fe\u200d\u2640\ufe0f|\U0001f487\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2642\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fc\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fe\u200d\u2640\ufe0f|\U0001f9cd\U0001f3ff\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2642\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fc\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fd\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fe\u200d\u2640\ufe0f|\U0001f9ce\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c3\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d6\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d7\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cc\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cc\U0001f3ff\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2642\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fc\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fd\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fe\u200d\u2640\ufe0f|\U0001f3c4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2642\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fc\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fd\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fe\u200d\u2640\ufe0f|\U0001f6a3\U0001f3ff\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2642\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fc\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fd\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fe\u200d\u2640\ufe0f|\U0001f3ca\U0001f3ff\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2642\ufe0f|\u26f9\U0001f3fb\u200d\u2642\ufe0f|\u26f9\U0001f3fc\u200d\u2642\ufe0f|\u26f9\U0001f3fd\u200d\u2642\ufe0f|\u26f9\U0001f3fe\u200d\u2642\ufe0f|\u26f9\U0001f3ff\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2640\ufe0f|\u26f9\U0001f3fb\u200d\u2640\ufe0f|\u26f9\U0001f3fc\u200d\u2640\ufe0f|\u26f9\U0001f3fd\u200d\u2640\ufe0f|\u26f9\U0001f3fe\u200d\u2640\ufe0f|\u26f9\U0001f3ff\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2642\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2642\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fb\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fc\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fd\u200d\u2640\ufe0f|\U0001f3cb\U0001f3fe\u200d\u2640\ufe0f|\U0001f3cb\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b4\U0001f3ff\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2642\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fc\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fd\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fe\u200d\u2640\ufe0f|\U0001f6b5\U0001f3ff\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2642\ufe0f|\U0001f938\U0001f3fc\u200d\u2642\ufe0f|\U0001f938\U0001f3fd\u200d\u2642\ufe0f|\U0001f938\U0001f3fe\u200d\u2642\ufe0f|\U0001f938\U0001f3ff\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2640\ufe0f|\U0001f938\U0001f3fc\u200d\u2640\ufe0f|\U0001f938\U0001f3fd\u200d\u2640\ufe0f|\U0001f938\U0001f3fe\u200d\u2640\ufe0f|\U0001f938\U0001f3ff\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642\ufe0f|\U0001f93d\U0001f3fc\u200d\u2642\ufe0f|\U0001f93d\U0001f3fd\u200d\u2642\ufe0f|\U0001f93d\U0001f3fe\u200d\u2642\ufe0f|\U0001f93d\U0001f3ff\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640\ufe0f|\U0001f93d\U0001f3fc\u200d\u2640\ufe0f|\U0001f93d\U0001f3fd\u200d\u2640\ufe0f|\U0001f93d\U0001f3fe\u200d\u2640\ufe0f|\U0001f93d\U0001f3ff\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642\ufe0f|\U0001f93e\U0001f3fc\u200d\u2642\ufe0f|\U0001f93e\U0001f3fd\u200d\u2642\ufe0f|\U0001f93e\U0001f3fe\u200d\u2642\ufe0f|\U0001f93e\U0001f3ff\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640\ufe0f|\U0001f93e\U0001f3fc\u200d\u2640\ufe0f|\U0001f93e\U0001f3fd\u200d\u2640\ufe0f|\U0001f93e\U0001f3fe\u200d\u2640\ufe0f|\U0001f93e\U0001f3ff\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2642\ufe0f|\U0001f939\U0001f3fc\u200d\u2642\ufe0f|\U0001f939\U0001f3fd\u200d\u2642\ufe0f|\U0001f939\U0001f3fe\u200d\u2642\ufe0f|\U0001f939\U0001f3ff\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2640\ufe0f|\U0001f939\U0001f3fc\u200d\u2640\ufe0f|\U0001f939\U0001f3fd\u200d\u2640\ufe0f|\U0001f939\U0001f3fe\u200d\u2640\ufe0f|\U0001f939\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2642\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fc\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fd\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fe\u200d\u2640\ufe0f|\U0001f9d8\U0001f3ff\u200d\u2640\ufe0f|\U0001f9d1\u200d\U0001f91d\u200d\U0001f9d1|\U0001f469\u200d\u2764\u200d\U0001f468|\U0001f468\u200d\u2764\u200d\U0001f468|\U0001f469\u200d\u2764\u200d\U0001f469|\U0001f468\u200d\U0001f469\u200d\U0001f466|\U0001f468\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f469\u200d\U0001f467|\U0001f468\u200d\U0001f466\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f466|\U0001f468\u200d\U0001f467\u200d\U0001f467|\U0001f469\u200d\U0001f466\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f466|\U0001f469\u200d\U0001f467\u200d\U0001f467|\U0001f441\u200d\U0001f5e8\ufe0f|\U0001f441\ufe0f\u200d\U0001f5e8|\U0001f471\u200d\u2642\ufe0f|\U0001f471\U0001f3fb\u200d\u2642|\U0001f471\U0001f3fc\u200d\u2642|\U0001f471\U0001f3fd\u200d\u2642|\U0001f471\U0001f3fe\u200d\u2642|\U0001f471\U0001f3ff\u200d\u2642|\U0001f468\U0001f3fb\u200d\U0001f9b0|\U0001f468\U0001f3fc\u200d\U0001f9b0|\U0001f468\U0001f3fd\u200d\U0001f9b0|\U0001f468\U0001f3fe\u200d\U0001f9b0|\U0001f468\U0001f3ff\u200d\U0001f9b0|\U0001f468\U0001f3fb\u200d\U0001f9b1|\U0001f468\U0001f3fc\u200d\U0001f9b1|\U0001f468\U0001f3fd\u200d\U0001f9b1|\U0001f468\U0001f3fe\u200d\U0001f9b1|\U0001f468\U0001f3ff\u200d\U0001f9b1|\U0001f468\U0001f3fb\u200d\U0001f9b3|\U0001f468\U0001f3fc\u200d\U0001f9b3|\U0001f468\U0001f3fd\u200d\U0001f9b3|\U0001f468\U0001f3fe\u200d\U0001f9b3|\U0001f468\U0001f3ff\u200d\U0001f9b3|\U0001f468\U0001f3fb\u200d\U0001f9b2|\U0001f468\U0001f3fc\u200d\U0001f9b2|\U0001f468\U0001f3fd\u200d\U0001f9b2|\U0001f468\U0001f3fe\u200d\U0001f9b2|\U0001f468\U0001f3ff\u200d\U0001f9b2|\U0001f471\u200d\u2640\ufe0f|\U0001f471\U0001f3fb\u200d\u2640|\U0001f471\U0001f3fc\u200d\u2640|\U0001f471\U0001f3fd\u200d\u2640|\U0001f471\U0001f3fe\u200d\u2640|\U0001f471\U0001f3ff\u200d\u2640|\U0001f469\U0001f3fb\u200d\U0001f9b0|\U0001f469\U0001f3fc\u200d\U0001f9b0|\U0001f469\U0001f3fd\u200d\U0001f9b0|\U0001f469\U0001f3fe\u200d\U0001f9b0|\U0001f469\U0001f3ff\u200d\U0001f9b0|\U0001f469\U0001f3fb\u200d\U0001f9b1|\U0001f469\U0001f3fc\u200d\U0001f9b1|\U0001f469\U0001f3fd\u200d\U0001f9b1|\U0001f469\U0001f3fe\u200d\U0001f9b1|\U0001f469\U0001f3ff\u200d\U0001f9b1|\U0001f469\U0001f3fb\u200d\U0001f9b3|\U0001f469\U0001f3fc\u200d\U0001f9b3|\U0001f469\U0001f3fd\u200d\U0001f9b3|\U0001f469\U0001f3fe\u200d\U0001f9b3|\U0001f469\U0001f3ff\u200d\U0001f9b3|\U0001f469\U0001f3fb\u200d\U0001f9b2|\U0001f469\U0001f3fc\u200d\U0001f9b2|\U0001f469\U0001f3fd\u200d\U0001f9b2|\U0001f469\U0001f3fe\u200d\U0001f9b2|\U0001f469\U0001f3ff\u200d\U0001f9b2|\U0001f64d\u200d\u2642\ufe0f|\U0001f64d\U0001f3fb\u200d\u2642|\U0001f64d\U0001f3fc\u200d\u2642|\U0001f64d\U0001f3fd\u200d\u2642|\U0001f64d\U0001f3fe\u200d\u2642|\U0001f64d\U0001f3ff\u200d\u2642|\U0001f64d\u200d\u2640\ufe0f|\U0001f64d\U0001f3fb\u200d\u2640|\U0001f64d\U0001f3fc\u200d\u2640|\U0001f64d\U0001f3fd\u200d\u2640|\U0001f64d\U0001f3fe\u200d\u2640|\U0001f64d\U0001f3ff\u200d\u2640|\U0001f64e\u200d\u2642\ufe0f|\U0001f64e\U0001f3fb\u200d\u2642|\U0001f64e\U0001f3fc\u200d\u2642|\U0001f64e\U0001f3fd\u200d\u2642|\U0001f64e\U0001f3fe\u200d\u2642|\U0001f64e\U0001f3ff\u200d\u2642|\U0001f64e\u200d\u2640\ufe0f|\U0001f64e\U0001f3fb\u200d\u2640|\U0001f64e\U0001f3fc\u200d\u2640|\U0001f64e\U0001f3fd\u200d\u2640|\U0001f64e\U0001f3fe\u200d\u2640|\U0001f64e\U0001f3ff\u200d\u2640|\U0001f645\u200d\u2642\ufe0f|\U0001f645\U0001f3fb\u200d\u2642|\U0001f645\U0001f3fc\u200d\u2642|\U0001f645\U0001f3fd\u200d\u2642|\U0001f645\U0001f3fe\u200d\u2642|\U0001f645\U0001f3ff\u200d\u2642|\U0001f645\u200d\u2640\ufe0f|\U0001f645\U0001f3fb\u200d\u2640|\U0001f645\U0001f3fc\u200d\u2640|\U0001f645\U0001f3fd\u200d\u2640|\U0001f645\U0001f3fe\u200d\u2640|\U0001f645\U0001f3ff\u200d\u2640|\U0001f646\u200d\u2642\ufe0f|\U0001f646\U0001f3fb\u200d\u2642|\U0001f646\U0001f3fc\u200d\u2642|\U0001f646\U0001f3fd\u200d\u2642|\U0001f646\U0001f3fe\u200d\u2642|\U0001f646\U0001f3ff\u200d\u2642|\U0001f646\u200d\u2640\ufe0f|\U0001f646\U0001f3fb\u200d\u2640|\U0001f646\U0001f3fc\u200d\u2640|\U0001f646\U0001f3fd\u200d\u2640|\U0001f646\U0001f3fe\u200d\u2640|\U0001f646\U0001f3ff\u200d\u2640|\U0001f481\u200d\u2642\ufe0f|\U0001f481\U0001f3fb\u200d\u2642|\U0001f481\U0001f3fc\u200d\u2642|\U0001f481\U0001f3fd\u200d\u2642|\U0001f481\U0001f3fe\u200d\u2642|\U0001f481\U0001f3ff\u200d\u2642|\U0001f481\u200d\u2640\ufe0f|\U0001f481\U0001f3fb\u200d\u2640|\U0001f481\U0001f3fc\u200d\u2640|\U0001f481\U0001f3fd\u200d\u2640|\U0001f481\U0001f3fe\u200d\u2640|\U0001f481\U0001f3ff\u200d\u2640|\U0001f64b\u200d\u2642\ufe0f|\U0001f64b\U0001f3fb\u200d\u2642|\U0001f64b\U0001f3fc\u200d\u2642|\U0001f64b\U0001f3fd\u200d\u2642|\U0001f64b\U0001f3fe\u200d\u2642|\U0001f64b\U0001f3ff\u200d\u2642|\U0001f64b\u200d\u2640\ufe0f|\U0001f64b\U0001f3fb\u200d\u2640|\U0001f64b\U0001f3fc\u200d\u2640|\U0001f64b\U0001f3fd\u200d\u2640|\U0001f64b\U0001f3fe\u200d\u2640|\U0001f64b\U0001f3ff\u200d\u2640|\U0001f9cf\u200d\u2642\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2642|\U0001f9cf\U0001f3fc\u200d\u2642|\U0001f9cf\U0001f3fd\u200d\u2642|\U0001f9cf\U0001f3fe\u200d\u2642|\U0001f9cf\U0001f3ff\u200d\u2642|\U0001f9cf\u200d\u2640\ufe0f|\U0001f9cf\U0001f3fb\u200d\u2640|\U0001f9cf\U0001f3fc\u200d\u2640|\U0001f9cf\U0001f3fd\u200d\u2640|\U0001f9cf\U0001f3fe\u200d\u2640|\U0001f9cf\U0001f3ff\u200d\u2640|\U0001f647\u200d\u2642\ufe0f|\U0001f647\U0001f3fb\u200d\u2642|\U0001f647\U0001f3fc\u200d\u2642|\U0001f647\U0001f3fd\u200d\u2642|\U0001f647\U0001f3fe\u200d\u2642|\U0001f647\U0001f3ff\u200d\u2642|\U0001f647\u200d\u2640\ufe0f|\U0001f647\U0001f3fb\u200d\u2640|\U0001f647\U0001f3fc\u200d\u2640|\U0001f647\U0001f3fd\u200d\u2640|\U0001f647\U0001f3fe\u200d\u2640|\U0001f647\U0001f3ff\u200d\u2640|\U0001f926\u200d\u2642\ufe0f|\U0001f926\U0001f3fb\u200d\u2642|\U0001f926\U0001f3fc\u200d\u2642|\U0001f926\U0001f3fd\u200d\u2642|\U0001f926\U0001f3fe\u200d\u2642|\U0001f926\U0001f3ff\u200d\u2642|\U0001f926\u200d\u2640\ufe0f|\U0001f926\U0001f3fb\u200d\u2640|\U0001f926\U0001f3fc\u200d\u2640|\U0001f926\U0001f3fd\u200d\u2640|\U0001f926\U0001f3fe\u200d\u2640|\U0001f926\U0001f3ff\u200d\u2640|\U0001f937\u200d\u2642\ufe0f|\U0001f937\U0001f3fb\u200d\u2642|\U0001f937\U0001f3fc\u200d\u2642|\U0001f937\U0001f3fd\u200d\u2642|\U0001f937\U0001f3fe\u200d\u2642|\U0001f937\U0001f3ff\u200d\u2642|\U0001f937\u200d\u2640\ufe0f|\U0001f937\U0001f3fb\u200d\u2640|\U0001f937\U0001f3fc\u200d\u2640|\U0001f937\U0001f3fd\u200d\u2640|\U0001f937\U0001f3fe\u200d\u2640|\U0001f937\U0001f3ff\u200d\u2640|\U0001f468\u200d\u2695\ufe0f|\U0001f468\U0001f3fb\u200d\u2695|\U0001f468\U0001f3fc\u200d\u2695|\U0001f468\U0001f3fd\u200d\u2695|\U0001f468\U0001f3fe\u200d\u2695|\U0001f468\U0001f3ff\u200d\u2695|\U0001f469\u200d\u2695\ufe0f|\U0001f469\U0001f3fb\u200d\u2695|\U0001f469\U0001f3fc\u200d\u2695|\U0001f469\U0001f3fd\u200d\u2695|\U0001f469\U0001f3fe\u200d\u2695|\U0001f469\U0001f3ff\u200d\u2695|\U0001f468\U0001f3fb\u200d\U0001f393|\U0001f468\U0001f3fc\u200d\U0001f393|\U0001f468\U0001f3fd\u200d\U0001f393|\U0001f468\U0001f3fe\u200d\U0001f393|\U0001f468\U0001f3ff\u200d\U0001f393|\U0001f469\U0001f3fb\u200d\U0001f393|\U0001f469\U0001f3fc\u200d\U0001f393|\U0001f469\U0001f3fd\u200d\U0001f393|\U0001f469\U0001f3fe\u200d\U0001f393|\U0001f469\U0001f3ff\u200d\U0001f393|\U0001f468\U0001f3fb\u200d\U0001f3eb|\U0001f468\U0001f3fc\u200d\U0001f3eb|\U0001f468\U0001f3fd\u200d\U0001f3eb|\U0001f468\U0001f3fe\u200d\U0001f3eb|\U0001f468\U0001f3ff\u200d\U0001f3eb|\U0001f469\U0001f3fb\u200d\U0001f3eb|\U0001f469\U0001f3fc\u200d\U0001f3eb|\U0001f469\U0001f3fd\u200d\U0001f3eb|\U0001f469\U0001f3fe\u200d\U0001f3eb|\U0001f469\U0001f3ff\u200d\U0001f3eb|\U0001f468\u200d\u2696\ufe0f|\U0001f468\U0001f3fb\u200d\u2696|\U0001f468\U0001f3fc\u200d\u2696|\U0001f468\U0001f3fd\u200d\u2696|\U0001f468\U0001f3fe\u200d\u2696|\U0001f468\U0001f3ff\u200d\u2696|\U0001f469\u200d\u2696\ufe0f|\U0001f469\U0001f3fb\u200d\u2696|\U0001f469\U0001f3fc\u200d\u2696|\U0001f469\U0001f3fd\u200d\u2696|\U0001f469\U0001f3fe\u200d\u2696|\U0001f469\U0001f3ff\u200d\u2696|\U0001f468\U0001f3fb\u200d\U0001f33e|\U0001f468\U0001f3fc\u200d\U0001f33e|\U0001f468\U0001f3fd\u200d\U0001f33e|\U0001f468\U0001f3fe\u200d\U0001f33e|\U0001f468\U0001f3ff\u200d\U0001f33e|\U0001f469\U0001f3fb\u200d\U0001f33e|\U0001f469\U0001f3fc\u200d\U0001f33e|\U0001f469\U0001f3fd\u200d\U0001f33e|\U0001f469\U0001f3fe\u200d\U0001f33e|\U0001f469\U0001f3ff\u200d\U0001f33e|\U0001f468\U0001f3fb\u200d\U0001f373|\U0001f468\U0001f3fc\u200d\U0001f373|\U0001f468\U0001f3fd\u200d\U0001f373|\U0001f468\U0001f3fe\u200d\U0001f373|\U0001f468\U0001f3ff\u200d\U0001f373|\U0001f469\U0001f3fb\u200d\U0001f373|\U0001f469\U0001f3fc\u200d\U0001f373|\U0001f469\U0001f3fd\u200d\U0001f373|\U0001f469\U0001f3fe\u200d\U0001f373|\U0001f469\U0001f3ff\u200d\U0001f373|\U0001f468\U0001f3fb\u200d\U0001f527|\U0001f468\U0001f3fc\u200d\U0001f527|\U0001f468\U0001f3fd\u200d\U0001f527|\U0001f468\U0001f3fe\u200d\U0001f527|\U0001f468\U0001f3ff\u200d\U0001f527|\U0001f469\U0001f3fb\u200d\U0001f527|\U0001f469\U0001f3fc\u200d\U0001f527|\U0001f469\U0001f3fd\u200d\U0001f527|\U0001f469\U0001f3fe\u200d\U0001f527|\U0001f469\U0001f3ff\u200d\U0001f527|\U0001f468\U0001f3fb\u200d\U0001f3ed|\U0001f468\U0001f3fc\u200d\U0001f3ed|\U0001f468\U0001f3fd\u200d\U0001f3ed|\U0001f468\U0001f3fe\u200d\U0001f3ed|\U0001f468\U0001f3ff\u200d\U0001f3ed|\U0001f469\U0001f3fb\u200d\U0001f3ed|\U0001f469\U0001f3fc\u200d\U0001f3ed|\U0001f469\U0001f3fd\u200d\U0001f3ed|\U0001f469\U0001f3fe\u200d\U0001f3ed|\U0001f469\U0001f3ff\u200d\U0001f3ed|\U0001f468\U0001f3fb\u200d\U0001f4bc|\U0001f468\U0001f3fc\u200d\U0001f4bc|\U0001f468\U0001f3fd\u200d\U0001f4bc|\U0001f468\U0001f3fe\u200d\U0001f4bc|\U0001f468\U0001f3ff\u200d\U0001f4bc|\U0001f469\U0001f3fb\u200d\U0001f4bc|\U0001f469\U0001f3fc\u200d\U0001f4bc|\U0001f469\U0001f3fd\u200d\U0001f4bc|\U0001f469\U0001f3fe\u200d\U0001f4bc|\U0001f469\U0001f3ff\u200d\U0001f4bc|\U0001f468\U0001f3fb\u200d\U0001f52c|\U0001f468\U0001f3fc\u200d\U0001f52c|\U0001f468\U0001f3fd\u200d\U0001f52c|\U0001f468\U0001f3fe\u200d\U0001f52c|\U0001f468\U0001f3ff\u200d\U0001f52c|\U0001f469\U0001f3fb\u200d\U0001f52c|\U0001f469\U0001f3fc\u200d\U0001f52c|\U0001f469\U0001f3fd\u200d\U0001f52c|\U0001f469\U0001f3fe\u200d\U0001f52c|\U0001f469\U0001f3ff\u200d\U0001f52c|\U0001f468\U0001f3fb\u200d\U0001f4bb|\U0001f468\U0001f3fc\u200d\U0001f4bb|\U0001f468\U0001f3fd\u200d\U0001f4bb|\U0001f468\U0001f3fe\u200d\U0001f4bb|\U0001f468\U0001f3ff\u200d\U0001f4bb|\U0001f469\U0001f3fb\u200d\U0001f4bb|\U0001f469\U0001f3fc\u200d\U0001f4bb|\U0001f469\U0001f3fd\u200d\U0001f4bb|\U0001f469\U0001f3fe\u200d\U0001f4bb|\U0001f469\U0001f3ff\u200d\U0001f4bb|\U0001f468\U0001f3fb\u200d\U0001f3a4|\U0001f468\U0001f3fc\u200d\U0001f3a4|\U0001f468\U0001f3fd\u200d\U0001f3a4|\U0001f468\U0001f3fe\u200d\U0001f3a4|\U0001f468\U0001f3ff\u200d\U0001f3a4|\U0001f469\U0001f3fb\u200d\U0001f3a4|\U0001f469\U0001f3fc\u200d\U0001f3a4|\U0001f469\U0001f3fd\u200d\U0001f3a4|\U0001f469\U0001f3fe\u200d\U0001f3a4|\U0001f469\U0001f3ff\u200d\U0001f3a4|\U0001f468\U0001f3fb\u200d\U0001f3a8|\U0001f468\U0001f3fc\u200d\U0001f3a8|\U0001f468\U0001f3fd\u200d\U0001f3a8|\U0001f468\U0001f3fe\u200d\U0001f3a8|\U0001f468\U0001f3ff\u200d\U0001f3a8|\U0001f469\U0001f3fb\u200d\U0001f3a8|\U0001f469\U0001f3fc\u200d\U0001f3a8|\U0001f469\U0001f3fd\u200d\U0001f3a8|\U0001f469\U0001f3fe\u200d\U0001f3a8|\U0001f469\U0001f3ff\u200d\U0001f3a8|\U0001f468\u200d\u2708\ufe0f|\U0001f468\U0001f3fb\u200d\u2708|\U0001f468\U0001f3fc\u200d\u2708|\U0001f468\U0001f3fd\u200d\u2708|\U0001f468\U0001f3fe\u200d\u2708|\U0001f468\U0001f3ff\u200d\u2708|\U0001f469\u200d\u2708\ufe0f|\U0001f469\U0001f3fb\u200d\u2708|\U0001f469\U0001f3fc\u200d\u2708|\U0001f469\U0001f3fd\u200d\u2708|\U0001f469\U0001f3fe\u200d\u2708|\U0001f469\U0001f3ff\u200d\u2708|\U0001f468\U0001f3fb\u200d\U0001f680|\U0001f468\U0001f3fc\u200d\U0001f680|\U0001f468\U0001f3fd\u200d\U0001f680|\U0001f468\U0001f3fe\u200d\U0001f680|\U0001f468\U0001f3ff\u200d\U0001f680|\U0001f469\U0001f3fb\u200d\U0001f680|\U0001f469\U0001f3fc\u200d\U0001f680|\U0001f469\U0001f3fd\u200d\U0001f680|\U0001f469\U0001f3fe\u200d\U0001f680|\U0001f469\U0001f3ff\u200d\U0001f680|\U0001f468\U0001f3fb\u200d\U0001f692|\U0001f468\U0001f3fc\u200d\U0001f692|\U0001f468\U0001f3fd\u200d\U0001f692|\U0001f468\U0001f3fe\u200d\U0001f692|\U0001f468\U0001f3ff\u200d\U0001f692|\U0001f469\U0001f3fb\u200d\U0001f692|\U0001f469\U0001f3fc\u200d\U0001f692|\U0001f469\U0001f3fd\u200d\U0001f692|\U0001f469\U0001f3fe\u200d\U0001f692|\U0001f469\U0001f3ff\u200d\U0001f692|\U0001f46e\u200d\u2642\ufe0f|\U0001f46e\U0001f3fb\u200d\u2642|\U0001f46e\U0001f3fc\u200d\u2642|\U0001f46e\U0001f3fd\u200d\u2642|\U0001f46e\U0001f3fe\u200d\u2642|\U0001f46e\U0001f3ff\u200d\u2642|\U0001f46e\u200d\u2640\ufe0f|\U0001f46e\U0001f3fb\u200d\u2640|\U0001f46e\U0001f3fc\u200d\u2640|\U0001f46e\U0001f3fd\u200d\u2640|\U0001f46e\U0001f3fe\u200d\u2640|\U0001f46e\U0001f3ff\u200d\u2640|\U0001f575\u200d\u2642\ufe0f|\U0001f575\ufe0f\u200d\u2642|\U0001f575\U0001f3fb\u200d\u2642|\U0001f575\U0001f3fc\u200d\u2642|\U0001f575\U0001f3fd\u200d\u2642|\U0001f575\U0001f3fe\u200d\u2642|\U0001f575\U0001f3ff\u200d\u2642|\U0001f575\u200d\u2640\ufe0f|\U0001f575\ufe0f\u200d\u2640|\U0001f575\U0001f3fb\u200d\u2640|\U0001f575\U0001f3fc\u200d\u2640|\U0001f575\U0001f3fd\u200d\u2640|\U0001f575\U0001f3fe\u200d\u2640|\U0001f575\U0001f3ff\u200d\u2640|\U0001f482\u200d\u2642\ufe0f|\U0001f482\U0001f3fb\u200d\u2642|\U0001f482\U0001f3fc\u200d\u2642|\U0001f482\U0001f3fd\u200d\u2642|\U0001f482\U0001f3fe\u200d\u2642|\U0001f482\U0001f3ff\u200d\u2642|\U0001f482\u200d\u2640\ufe0f|\U0001f482\U0001f3fb\u200d\u2640|\U0001f482\U0001f3fc\u200d\u2640|\U0001f482\U0001f3fd\u200d\u2640|\U0001f482\U0001f3fe\u200d\u2640|\U0001f482\U0001f3ff\u200d\u2640|\U0001f477\u200d\u2642\ufe0f|\U0001f477\U0001f3fb\u200d\u2642|\U0001f477\U0001f3fc\u200d\u2642|\U0001f477\U0001f3fd\u200d\u2642|\U0001f477\U0001f3fe\u200d\u2642|\U0001f477\U0001f3ff\u200d\u2642|\U0001f477\u200d\u2640\ufe0f|\U0001f477\U0001f3fb\u200d\u2640|\U0001f477\U0001f3fc\u200d\u2640|\U0001f477\U0001f3fd\u200d\u2640|\U0001f477\U0001f3fe\u200d\u2640|\U0001f477\U0001f3ff\u200d\u2640|\U0001f473\u200d\u2642\ufe0f|\U0001f473\U0001f3fb\u200d\u2642|\U0001f473\U0001f3fc\u200d\u2642|\U0001f473\U0001f3fd\u200d\u2642|\U0001f473\U0001f3fe\u200d\u2642|\U0001f473\U0001f3ff\u200d\u2642|\U0001f473\u200d\u2640\ufe0f|\U0001f473\U0001f3fb\u200d\u2640|\U0001f473\U0001f3fc\u200d\u2640|\U0001f473\U0001f3fd\u200d\u2640|\U0001f473\U0001f3fe\u200d\u2640|\U0001f473\U0001f3ff\u200d\u2640|\U0001f9b8\u200d\u2642\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2642|\U0001f9b8\U0001f3fc\u200d\u2642|\U0001f9b8\U0001f3fd\u200d\u2642|\U0001f9b8\U0001f3fe\u200d\u2642|\U0001f9b8\U0001f3ff\u200d\u2642|\U0001f9b8\u200d\u2640\ufe0f|\U0001f9b8\U0001f3fb\u200d\u2640|\U0001f9b8\U0001f3fc\u200d\u2640|\U0001f9b8\U0001f3fd\u200d\u2640|\U0001f9b8\U0001f3fe\u200d\u2640|\U0001f9b8\U0001f3ff\u200d\u2640|\U0001f9b9\u200d\u2642\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2642|\U0001f9b9\U0001f3fc\u200d\u2642|\U0001f9b9\U0001f3fd\u200d\u2642|\U0001f9b9\U0001f3fe\u200d\u2642|\U0001f9b9\U0001f3ff\u200d\u2642|\U0001f9b9\u200d\u2640\ufe0f|\U0001f9b9\U0001f3fb\u200d\u2640|\U0001f9b9\U0001f3fc\u200d\u2640|\U0001f9b9\U0001f3fd\u200d\u2640|\U0001f9b9\U0001f3fe\u200d\u2640|\U0001f9b9\U0001f3ff\u200d\u2640|\U0001f9d9\u200d\u2642\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2642|\U0001f9d9\U0001f3fc\u200d\u2642|\U0001f9d9\U0001f3fd\u200d\u2642|\U0001f9d9\U0001f3fe\u200d\u2642|\U0001f9d9\U0001f3ff\u200d\u2642|\U0001f9d9\u200d\u2640\ufe0f|\U0001f9d9\U0001f3fb\u200d\u2640|\U0001f9d9\U0001f3fc\u200d\u2640|\U0001f9d9\U0001f3fd\u200d\u2640|\U0001f9d9\U0001f3fe\u200d\u2640|\U0001f9d9\U0001f3ff\u200d\u2640|\U0001f9da\u200d\u2642\ufe0f|\U0001f9da\U0001f3fb\u200d\u2642|\U0001f9da\U0001f3fc\u200d\u2642|\U0001f9da\U0001f3fd\u200d\u2642|\U0001f9da\U0001f3fe\u200d\u2642|\U0001f9da\U0001f3ff\u200d\u2642|\U0001f9da\u200d\u2640\ufe0f|\U0001f9da\U0001f3fb\u200d\u2640|\U0001f9da\U0001f3fc\u200d\u2640|\U0001f9da\U0001f3fd\u200d\u2640|\U0001f9da\U0001f3fe\u200d\u2640|\U0001f9da\U0001f3ff\u200d\u2640|\U0001f9db\u200d\u2642\ufe0f|\U0001f9db\U0001f3fb\u200d\u2642|\U0001f9db\U0001f3fc\u200d\u2642|\U0001f9db\U0001f3fd\u200d\u2642|\U0001f9db\U0001f3fe\u200d\u2642|\U0001f9db\U0001f3ff\u200d\u2642|\U0001f9db\u200d\u2640\ufe0f|\U0001f9db\U0001f3fb\u200d\u2640|\U0001f9db\U0001f3fc\u200d\u2640|\U0001f9db\U0001f3fd\u200d\u2640|\U0001f9db\U0001f3fe\u200d\u2640|\U0001f9db\U0001f3ff\u200d\u2640|\U0001f9dc\u200d\u2642\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2642|\U0001f9dc\U0001f3fc\u200d\u2642|\U0001f9dc\U0001f3fd\u200d\u2642|\U0001f9dc\U0001f3fe\u200d\u2642|\U0001f9dc\U0001f3ff\u200d\u2642|\U0001f9dc\u200d\u2640\ufe0f|\U0001f9dc\U0001f3fb\u200d\u2640|\U0001f9dc\U0001f3fc\u200d\u2640|\U0001f9dc\U0001f3fd\u200d\u2640|\U0001f9dc\U0001f3fe\u200d\u2640|\U0001f9dc\U0001f3ff\u200d\u2640|\U0001f9dd\u200d\u2642\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2642|\U0001f9dd\U0001f3fc\u200d\u2642|\U0001f9dd\U0001f3fd\u200d\u2642|\U0001f9dd\U0001f3fe\u200d\u2642|\U0001f9dd\U0001f3ff\u200d\u2642|\U0001f9dd\u200d\u2640\ufe0f|\U0001f9dd\U0001f3fb\u200d\u2640|\U0001f9dd\U0001f3fc\u200d\u2640|\U0001f9dd\U0001f3fd\u200d\u2640|\U0001f9dd\U0001f3fe\u200d\u2640|\U0001f9dd\U0001f3ff\u200d\u2640|\U0001f9de\u200d\u2642\ufe0f|\U0001f9de\u200d\u2640\ufe0f|\U0001f9df\u200d\u2642\ufe0f|\U0001f9df\u200d\u2640\ufe0f|\U0001f486\u200d\u2642\ufe0f|\U0001f486\U0001f3fb\u200d\u2642|\U0001f486\U0001f3fc\u200d\u2642|\U0001f486\U0001f3fd\u200d\u2642|\U0001f486\U0001f3fe\u200d\u2642|\U0001f486\U0001f3ff\u200d\u2642|\U0001f486\u200d\u2640\ufe0f|\U0001f486\U0001f3fb\u200d\u2640|\U0001f486\U0001f3fc\u200d\u2640|\U0001f486\U0001f3fd\u200d\u2640|\U0001f486\U0001f3fe\u200d\u2640|\U0001f486\U0001f3ff\u200d\u2640|\U0001f487\u200d\u2642\ufe0f|\U0001f487\U0001f3fb\u200d\u2642|\U0001f487\U0001f3fc\u200d\u2642|\U0001f487\U0001f3fd\u200d\u2642|\U0001f487\U0001f3fe\u200d\u2642|\U0001f487\U0001f3ff\u200d\u2642|\U0001f487\u200d\u2640\ufe0f|\U0001f487\U0001f3fb\u200d\u2640|\U0001f487\U0001f3fc\u200d\u2640|\U0001f487\U0001f3fd\u200d\u2640|\U0001f487\U0001f3fe\u200d\u2640|\U0001f487\U0001f3ff\u200d\u2640|\U0001f6b6\u200d\u2642\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2642|\U0001f6b6\U0001f3fc\u200d\u2642|\U0001f6b6\U0001f3fd\u200d\u2642|\U0001f6b6\U0001f3fe\u200d\u2642|\U0001f6b6\U0001f3ff\u200d\u2642|\U0001f6b6\u200d\u2640\ufe0f|\U0001f6b6\U0001f3fb\u200d\u2640|\U0001f6b6\U0001f3fc\u200d\u2640|\U0001f6b6\U0001f3fd\u200d\u2640|\U0001f6b6\U0001f3fe\u200d\u2640|\U0001f6b6\U0001f3ff\u200d\u2640|\U0001f9cd\u200d\u2642\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2642|\U0001f9cd\U0001f3fc\u200d\u2642|\U0001f9cd\U0001f3fd\u200d\u2642|\U0001f9cd\U0001f3fe\u200d\u2642|\U0001f9cd\U0001f3ff\u200d\u2642|\U0001f9cd\u200d\u2640\ufe0f|\U0001f9cd\U0001f3fb\u200d\u2640|\U0001f9cd\U0001f3fc\u200d\u2640|\U0001f9cd\U0001f3fd\u200d\u2640|\U0001f9cd\U0001f3fe\u200d\u2640|\U0001f9cd\U0001f3ff\u200d\u2640|\U0001f9ce\u200d\u2642\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2642|\U0001f9ce\U0001f3fc\u200d\u2642|\U0001f9ce\U0001f3fd\u200d\u2642|\U0001f9ce\U0001f3fe\u200d\u2642|\U0001f9ce\U0001f3ff\u200d\u2642|\U0001f9ce\u200d\u2640\ufe0f|\U0001f9ce\U0001f3fb\u200d\u2640|\U0001f9ce\U0001f3fc\u200d\u2640|\U0001f9ce\U0001f3fd\u200d\u2640|\U0001f9ce\U0001f3fe\u200d\u2640|\U0001f9ce\U0001f3ff\u200d\u2640|\U0001f468\U0001f3fb\u200d\U0001f9af|\U0001f468\U0001f3fc\u200d\U0001f9af|\U0001f468\U0001f3fd\u200d\U0001f9af|\U0001f468\U0001f3fe\u200d\U0001f9af|\U0001f468\U0001f3ff\u200d\U0001f9af|\U0001f469\U0001f3fb\u200d\U0001f9af|\U0001f469\U0001f3fc\u200d\U0001f9af|\U0001f469\U0001f3fd\u200d\U0001f9af|\U0001f469\U0001f3fe\u200d\U0001f9af|\U0001f469\U0001f3ff\u200d\U0001f9af|\U0001f468\U0001f3fb\u200d\U0001f9bc|\U0001f468\U0001f3fc\u200d\U0001f9bc|\U0001f468\U0001f3fd\u200d\U0001f9bc|\U0001f468\U0001f3fe\u200d\U0001f9bc|\U0001f468\U0001f3ff\u200d\U0001f9bc|\U0001f469\U0001f3fb\u200d\U0001f9bc|\U0001f469\U0001f3fc\u200d\U0001f9bc|\U0001f469\U0001f3fd\u200d\U0001f9bc|\U0001f469\U0001f3fe\u200d\U0001f9bc|\U0001f469\U0001f3ff\u200d\U0001f9bc|\U0001f468\U0001f3fb\u200d\U0001f9bd|\U0001f468\U0001f3fc\u200d\U0001f9bd|\U0001f468\U0001f3fd\u200d\U0001f9bd|\U0001f468\U0001f3fe\u200d\U0001f9bd|\U0001f468\U0001f3ff\u200d\U0001f9bd|\U0001f469\U0001f3fb\u200d\U0001f9bd|\U0001f469\U0001f3fc\u200d\U0001f9bd|\U0001f469\U0001f3fd\u200d\U0001f9bd|\U0001f469\U0001f3fe\u200d\U0001f9bd|\U0001f469\U0001f3ff\u200d\U0001f9bd|\U0001f3c3\u200d\u2642\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2642|\U0001f3c3\U0001f3fc\u200d\u2642|\U0001f3c3\U0001f3fd\u200d\u2642|\U0001f3c3\U0001f3fe\u200d\u2642|\U0001f3c3\U0001f3ff\u200d\u2642|\U0001f3c3\u200d\u2640\ufe0f|\U0001f3c3\U0001f3fb\u200d\u2640|\U0001f3c3\U0001f3fc\u200d\u2640|\U0001f3c3\U0001f3fd\u200d\u2640|\U0001f3c3\U0001f3fe\u200d\u2640|\U0001f3c3\U0001f3ff\u200d\u2640|\U0001f46f\u200d\u2642\ufe0f|\U0001f46f\u200d\u2640\ufe0f|\U0001f9d6\u200d\u2642\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2642|\U0001f9d6\U0001f3fc\u200d\u2642|\U0001f9d6\U0001f3fd\u200d\u2642|\U0001f9d6\U0001f3fe\u200d\u2642|\U0001f9d6\U0001f3ff\u200d\u2642|\U0001f9d6\u200d\u2640\ufe0f|\U0001f9d6\U0001f3fb\u200d\u2640|\U0001f9d6\U0001f3fc\u200d\u2640|\U0001f9d6\U0001f3fd\u200d\u2640|\U0001f9d6\U0001f3fe\u200d\u2640|\U0001f9d6\U0001f3ff\u200d\u2640|\U0001f9d7\u200d\u2642\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2642|\U0001f9d7\U0001f3fc\u200d\u2642|\U0001f9d7\U0001f3fd\u200d\u2642|\U0001f9d7\U0001f3fe\u200d\u2642|\U0001f9d7\U0001f3ff\u200d\u2642|\U0001f9d7\u200d\u2640\ufe0f|\U0001f9d7\U0001f3fb\u200d\u2640|\U0001f9d7\U0001f3fc\u200d\u2640|\U0001f9d7\U0001f3fd\u200d\u2640|\U0001f9d7\U0001f3fe\u200d\u2640|\U0001f9d7\U0001f3ff\u200d\u2640|\U0001f3cc\u200d\u2642\ufe0f|\U0001f3cc\ufe0f\u200d\u2642|\U0001f3cc\U0001f3fb\u200d\u2642|\U0001f3cc\U0001f3fc\u200d\u2642|\U0001f3cc\U0001f3fd\u200d\u2642|\U0001f3cc\U0001f3fe\u200d\u2642|\U0001f3cc\U0001f3ff\u200d\u2642|\U0001f3cc\u200d\u2640\ufe0f|\U0001f3cc\ufe0f\u200d\u2640|\U0001f3cc\U0001f3fb\u200d\u2640|\U0001f3cc\U0001f3fc\u200d\u2640|\U0001f3cc\U0001f3fd\u200d\u2640|\U0001f3cc\U0001f3fe\u200d\u2640|\U0001f3cc\U0001f3ff\u200d\u2640|\U0001f3c4\u200d\u2642\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2642|\U0001f3c4\U0001f3fc\u200d\u2642|\U0001f3c4\U0001f3fd\u200d\u2642|\U0001f3c4\U0001f3fe\u200d\u2642|\U0001f3c4\U0001f3ff\u200d\u2642|\U0001f3c4\u200d\u2640\ufe0f|\U0001f3c4\U0001f3fb\u200d\u2640|\U0001f3c4\U0001f3fc\u200d\u2640|\U0001f3c4\U0001f3fd\u200d\u2640|\U0001f3c4\U0001f3fe\u200d\u2640|\U0001f3c4\U0001f3ff\u200d\u2640|\U0001f6a3\u200d\u2642\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2642|\U0001f6a3\U0001f3fc\u200d\u2642|\U0001f6a3\U0001f3fd\u200d\u2642|\U0001f6a3\U0001f3fe\u200d\u2642|\U0001f6a3\U0001f3ff\u200d\u2642|\U0001f6a3\u200d\u2640\ufe0f|\U0001f6a3\U0001f3fb\u200d\u2640|\U0001f6a3\U0001f3fc\u200d\u2640|\U0001f6a3\U0001f3fd\u200d\u2640|\U0001f6a3\U0001f3fe\u200d\u2640|\U0001f6a3\U0001f3ff\u200d\u2640|\U0001f3ca\u200d\u2642\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2642|\U0001f3ca\U0001f3fc\u200d\u2642|\U0001f3ca\U0001f3fd\u200d\u2642|\U0001f3ca\U0001f3fe\u200d\u2642|\U0001f3ca\U0001f3ff\u200d\u2642|\U0001f3ca\u200d\u2640\ufe0f|\U0001f3ca\U0001f3fb\u200d\u2640|\U0001f3ca\U0001f3fc\u200d\u2640|\U0001f3ca\U0001f3fd\u200d\u2640|\U0001f3ca\U0001f3fe\u200d\u2640|\U0001f3ca\U0001f3ff\u200d\u2640|\u26f9\u200d\u2642\ufe0f|\u26f9\ufe0f\u200d\u2642|\u26f9\U0001f3fb\u200d\u2642|\u26f9\U0001f3fc\u200d\u2642|\u26f9\U0001f3fd\u200d\u2642|\u26f9\U0001f3fe\u200d\u2642|\u26f9\U0001f3ff\u200d\u2642|\u26f9\u200d\u2640\ufe0f|\u26f9\ufe0f\u200d\u2640|\u26f9\U0001f3fb\u200d\u2640|\u26f9\U0001f3fc\u200d\u2640|\u26f9\U0001f3fd\u200d\u2640|\u26f9\U0001f3fe\u200d\u2640|\u26f9\U0001f3ff\u200d\u2640|\U0001f3cb\u200d\u2642\ufe0f|\U0001f3cb\ufe0f\u200d\u2642|\U0001f3cb\U0001f3fb\u200d\u2642|\U0001f3cb\U0001f3fc\u200d\u2642|\U0001f3cb\U0001f3fd\u200d\u2642|\U0001f3cb\U0001f3fe\u200d\u2642|\U0001f3cb\U0001f3ff\u200d\u2642|\U0001f3cb\u200d\u2640\ufe0f|\U0001f3cb\ufe0f\u200d\u2640|\U0001f3cb\U0001f3fb\u200d\u2640|\U0001f3cb\U0001f3fc\u200d\u2640|\U0001f3cb\U0001f3fd\u200d\u2640|\U0001f3cb\U0001f3fe\u200d\u2640|\U0001f3cb\U0001f3ff\u200d\u2640|\U0001f6b4\u200d\u2642\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2642|\U0001f6b4\U0001f3fc\u200d\u2642|\U0001f6b4\U0001f3fd\u200d\u2642|\U0001f6b4\U0001f3fe\u200d\u2642|\U0001f6b4\U0001f3ff\u200d\u2642|\U0001f6b4\u200d\u2640\ufe0f|\U0001f6b4\U0001f3fb\u200d\u2640|\U0001f6b4\U0001f3fc\u200d\u2640|\U0001f6b4\U0001f3fd\u200d\u2640|\U0001f6b4\U0001f3fe\u200d\u2640|\U0001f6b4\U0001f3ff\u200d\u2640|\U0001f6b5\u200d\u2642\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2642|\U0001f6b5\U0001f3fc\u200d\u2642|\U0001f6b5\U0001f3fd\u200d\u2642|\U0001f6b5\U0001f3fe\u200d\u2642|\U0001f6b5\U0001f3ff\u200d\u2642|\U0001f6b5\u200d\u2640\ufe0f|\U0001f6b5\U0001f3fb\u200d\u2640|\U0001f6b5\U0001f3fc\u200d\u2640|\U0001f6b5\U0001f3fd\u200d\u2640|\U0001f6b5\U0001f3fe\u200d\u2640|\U0001f6b5\U0001f3ff\u200d\u2640|\U0001f938\u200d\u2642\ufe0f|\U0001f938\U0001f3fb\u200d\u2642|\U0001f938\U0001f3fc\u200d\u2642|\U0001f938\U0001f3fd\u200d\u2642|\U0001f938\U0001f3fe\u200d\u2642|\U0001f938\U0001f3ff\u200d\u2642|\U0001f938\u200d\u2640\ufe0f|\U0001f938\U0001f3fb\u200d\u2640|\U0001f938\U0001f3fc\u200d\u2640|\U0001f938\U0001f3fd\u200d\u2640|\U0001f938\U0001f3fe\u200d\u2640|\U0001f938\U0001f3ff\u200d\u2640|\U0001f93c\u200d\u2642\ufe0f|\U0001f93c\u200d\u2640\ufe0f|\U0001f93d\u200d\u2642\ufe0f|\U0001f93d\U0001f3fb\u200d\u2642|\U0001f93d\U0001f3fc\u200d\u2642|\U0001f93d\U0001f3fd\u200d\u2642|\U0001f93d\U0001f3fe\u200d\u2642|\U0001f93d\U0001f3ff\u200d\u2642|\U0001f93d\u200d\u2640\ufe0f|\U0001f93d\U0001f3fb\u200d\u2640|\U0001f93d\U0001f3fc\u200d\u2640|\U0001f93d\U0001f3fd\u200d\u2640|\U0001f93d\U0001f3fe\u200d\u2640|\U0001f93d\U0001f3ff\u200d\u2640|\U0001f93e\u200d\u2642\ufe0f|\U0001f93e\U0001f3fb\u200d\u2642|\U0001f93e\U0001f3fc\u200d\u2642|\U0001f93e\U0001f3fd\u200d\u2642|\U0001f93e\U0001f3fe\u200d\u2642|\U0001f93e\U0001f3ff\u200d\u2642|\U0001f93e\u200d\u2640\ufe0f|\U0001f93e\U0001f3fb\u200d\u2640|\U0001f93e\U0001f3fc\u200d\u2640|\U0001f93e\U0001f3fd\u200d\u2640|\U0001f93e\U0001f3fe\u200d\u2640|\U0001f93e\U0001f3ff\u200d\u2640|\U0001f939\u200d\u2642\ufe0f|\U0001f939\U0001f3fb\u200d\u2642|\U0001f939\U0001f3fc\u200d\u2642|\U0001f939\U0001f3fd\u200d\u2642|\U0001f939\U0001f3fe\u200d\u2642|\U0001f939\U0001f3ff\u200d\u2642|\U0001f939\u200d\u2640\ufe0f|\U0001f939\U0001f3fb\u200d\u2640|\U0001f939\U0001f3fc\u200d\u2640|\U0001f939\U0001f3fd\u200d\u2640|\U0001f939\U0001f3fe\u200d\u2640|\U0001f939\U0001f3ff\u200d\u2640|\U0001f9d8\u200d\u2642\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2642|\U0001f9d8\U0001f3fc\u200d\u2642|\U0001f9d8\U0001f3fd\u200d\u2642|\U0001f9d8\U0001f3fe\u200d\u2642|\U0001f9d8\U0001f3ff\u200d\u2642|\U0001f9d8\u200d\u2640\ufe0f|\U0001f9d8\U0001f3fb\u200d\u2640|\U0001f9d8\U0001f3fc\u200d\u2640|\U0001f9d8\U0001f3fd\u200d\u2640|\U0001f9d8\U0001f3fe\u200d\u2640|\U0001f9d8\U0001f3ff\u200d\u2640|\U0001f3f3\ufe0f\u200d\U0001f308|\U0001f3f4\u200d\u2620\ufe0f|\U0001f441\u200d\U0001f5e8|\U0001f471\u200d\u2642|\U0001f468\u200d\U0001f9b0|\U0001f468\u200d\U0001f9b1|\U0001f468\u200d\U0001f9b3|\U0001f468\u200d\U0001f9b2|\U0001f471\u200d\u2640|\U0001f469\u200d\U0001f9b0|\U0001f469\u200d\U0001f9b1|\U0001f469\u200d\U0001f9b3|\U0001f469\u200d\U0001f9b2|\U0001f64d\u200d\u2642|\U0001f64d\u200d\u2640|\U0001f64e\u200d\u2642|\U0001f64e\u200d\u2640|\U0001f645\u200d\u2642|\U0001f645\u200d\u2640|\U0001f646\u200d\u2642|\U0001f646\u200d\u2640|\U0001f481\u200d\u2642|\U0001f481\u200d\u2640|\U0001f64b\u200d\u2642|\U0001f64b\u200d\u2640|\U0001f9cf\u200d\u2642|\U0001f9cf\u200d\u2640|\U0001f647\u200d\u2642|\U0001f647\u200d\u2640|\U0001f926\u200d\u2642|\U0001f926\u200d\u2640|\U0001f937\u200d\u2642|\U0001f937\u200d\u2640|\U0001f468\u200d\u2695|\U0001f469\u200d\u2695|\U0001f468\u200d\U0001f393|\U0001f469\u200d\U0001f393|\U0001f468\u200d\U0001f3eb|\U0001f469\u200d\U0001f3eb|\U0001f468\u200d\u2696|\U0001f469\u200d\u2696|\U0001f468\u200d\U0001f33e|\U0001f469\u200d\U0001f33e|\U0001f468\u200d\U0001f373|\U0001f469\u200d\U0001f373|\U0001f468\u200d\U0001f527|\U0001f469\u200d\U0001f527|\U0001f468\u200d\U0001f3ed|\U0001f469\u200d\U0001f3ed|\U0001f468\u200d\U0001f4bc|\U0001f469\u200d\U0001f4bc|\U0001f468\u200d\U0001f52c|\U0001f469\u200d\U0001f52c|\U0001f468\u200d\U0001f4bb|\U0001f469\u200d\U0001f4bb|\U0001f468\u200d\U0001f3a4|\U0001f469\u200d\U0001f3a4|\U0001f468\u200d\U0001f3a8|\U0001f469\u200d\U0001f3a8|\U0001f468\u200d\u2708|\U0001f469\u200d\u2708|\U0001f468\u200d\U0001f680|\U0001f469\u200d\U0001f680|\U0001f468\u200d\U0001f692|\U0001f469\u200d\U0001f692|\U0001f46e\u200d\u2642|\U0001f46e\u200d\u2640|\U0001f575\u200d\u2642|\U0001f575\u200d\u2640|\U0001f482\u200d\u2642|\U0001f482\u200d\u2640|\U0001f477\u200d\u2642|\U0001f477\u200d\u2640|\U0001f473\u200d\u2642|\U0001f473\u200d\u2640|\U0001f9b8\u200d\u2642|\U0001f9b8\u200d\u2640|\U0001f9b9\u200d\u2642|\U0001f9b9\u200d\u2640|\U0001f9d9\u200d\u2642|\U0001f9d9\u200d\u2640|\U0001f9da\u200d\u2642|\U0001f9da\u200d\u2640|\U0001f9db\u200d\u2642|\U0001f9db\u200d\u2640|\U0001f9dc\u200d\u2642|\U0001f9dc\u200d\u2640|\U0001f9dd\u200d\u2642|\U0001f9dd\u200d\u2640|\U0001f9de\u200d\u2642|\U0001f9de\u200d\u2640|\U0001f9df\u200d\u2642|\U0001f9df\u200d\u2640|\U0001f486\u200d\u2642|\U0001f486\u200d\u2640|\U0001f487\u200d\u2642|\U0001f487\u200d\u2640|\U0001f6b6\u200d\u2642|\U0001f6b6\u200d\u2640|\U0001f9cd\u200d\u2642|\U0001f9cd\u200d\u2640|\U0001f9ce\u200d\u2642|\U0001f9ce\u200d\u2640|\U0001f468\u200d\U0001f9af|\U0001f469\u200d\U0001f9af|\U0001f468\u200d\U0001f9bc|\U0001f469\u200d\U0001f9bc|\U0001f468\u200d\U0001f9bd|\U0001f469\u200d\U0001f9bd|\U0001f3c3\u200d\u2642|\U0001f3c3\u200d\u2640|\U0001f46f\u200d\u2642|\U0001f46f\u200d\u2640|\U0001f9d6\u200d\u2642|\U0001f9d6\u200d\u2640|\U0001f9d7\u200d\u2642|\U0001f9d7\u200d\u2640|\U0001f3cc\u200d\u2642|\U0001f3cc\u200d\u2640|\U0001f3c4\u200d\u2642|\U0001f3c4\u200d\u2640|\U0001f6a3\u200d\u2642|\U0001f6a3\u200d\u2640|\U0001f3ca\u200d\u2642|\U0001f3ca\u200d\u2640|\u26f9\u200d\u2642|\u26f9\u200d\u2640|\U0001f3cb\u200d\u2642|\U0001f3cb\u200d\u2640|\U0001f6b4\u200d\u2642|\U0001f6b4\u200d\u2640|\U0001f6b5\u200d\u2642|\U0001f6b5\u200d\u2640|\U0001f938\u200d\u2642|\U0001f938\u200d\u2640|\U0001f93c\u200d\u2642|\U0001f93c\u200d\u2640|\U0001f93d\u200d\u2642|\U0001f93d\u200d\u2640|\U0001f93e\u200d\u2642|\U0001f93e\u200d\u2640|\U0001f939\u200d\u2642|\U0001f939\u200d\u2640|\U0001f9d8\u200d\u2642|\U0001f9d8\u200d\u2640|\U0001f468\u200d\U0001f466|\U0001f468\u200d\U0001f467|\U0001f469\u200d\U0001f466|\U0001f469\u200d\U0001f467|\U0001f415\u200d\U0001f9ba|\\#\ufe0f\u20e3|\\*\ufe0f\u20e3|0\ufe0f\u20e3|1\ufe0f\u20e3|2\ufe0f\u20e3|3\ufe0f\u20e3|4\ufe0f\u20e3|5\ufe0f\u20e3|6\ufe0f\u20e3|7\ufe0f\u20e3|8\ufe0f\u20e3|9\ufe0f\u20e3|\U0001f3f3\u200d\U0001f308|\U0001f3f4\u200d\u2620|\u263a\ufe0f|\u2639\ufe0f|\u2620\ufe0f|\u2763\ufe0f|\u2764\ufe0f|\U0001f573\ufe0f|\U0001f5e8\ufe0f|\U0001f5ef\ufe0f|\U0001f44b\U0001f3fb|\U0001f44b\U0001f3fc|\U0001f44b\U0001f3fd|\U0001f44b\U0001f3fe|\U0001f44b\U0001f3ff|\U0001f91a\U0001f3fb|\U0001f91a\U0001f3fc|\U0001f91a\U0001f3fd|\U0001f91a\U0001f3fe|\U0001f91a\U0001f3ff|\U0001f590\ufe0f|\U0001f590\U0001f3fb|\U0001f590\U0001f3fc|\U0001f590\U0001f3fd|\U0001f590\U0001f3fe|\U0001f590\U0001f3ff|\u270b\U0001f3fb|\u270b\U0001f3fc|\u270b\U0001f3fd|\u270b\U0001f3fe|\u270b\U0001f3ff|\U0001f596\U0001f3fb|\U0001f596\U0001f3fc|\U0001f596\U0001f3fd|\U0001f596\U0001f3fe|\U0001f596\U0001f3ff|\U0001f44c\U0001f3fb|\U0001f44c\U0001f3fc|\U0001f44c\U0001f3fd|\U0001f44c\U0001f3fe|\U0001f44c\U0001f3ff|\U0001f90f\U0001f3fb|\U0001f90f\U0001f3fc|\U0001f90f\U0001f3fd|\U0001f90f\U0001f3fe|\U0001f90f\U0001f3ff|\u270c\ufe0f|\u270c\U0001f3fb|\u270c\U0001f3fc|\u270c\U0001f3fd|\u270c\U0001f3fe|\u270c\U0001f3ff|\U0001f91e\U0001f3fb|\U0001f91e\U0001f3fc|\U0001f91e\U0001f3fd|\U0001f91e\U0001f3fe|\U0001f91e\U0001f3ff|\U0001f91f\U0001f3fb|\U0001f91f\U0001f3fc|\U0001f91f\U0001f3fd|\U0001f91f\U0001f3fe|\U0001f91f\U0001f3ff|\U0001f918\U0001f3fb|\U0001f918\U0001f3fc|\U0001f918\U0001f3fd|\U0001f918\U0001f3fe|\U0001f918\U0001f3ff|\U0001f919\U0001f3fb|\U0001f919\U0001f3fc|\U0001f919\U0001f3fd|\U0001f919\U0001f3fe|\U0001f919\U0001f3ff|\U0001f448\U0001f3fb|\U0001f448\U0001f3fc|\U0001f448\U0001f3fd|\U0001f448\U0001f3fe|\U0001f448\U0001f3ff|\U0001f449\U0001f3fb|\U0001f449\U0001f3fc|\U0001f449\U0001f3fd|\U0001f449\U0001f3fe|\U0001f449\U0001f3ff|\U0001f446\U0001f3fb|\U0001f446\U0001f3fc|\U0001f446\U0001f3fd|\U0001f446\U0001f3fe|\U0001f446\U0001f3ff|\U0001f595\U0001f3fb|\U0001f595\U0001f3fc|\U0001f595\U0001f3fd|\U0001f595\U0001f3fe|\U0001f595\U0001f3ff|\U0001f447\U0001f3fb|\U0001f447\U0001f3fc|\U0001f447\U0001f3fd|\U0001f447\U0001f3fe|\U0001f447\U0001f3ff|\u261d\ufe0f|\u261d\U0001f3fb|\u261d\U0001f3fc|\u261d\U0001f3fd|\u261d\U0001f3fe|\u261d\U0001f3ff|\U0001f44d\U0001f3fb|\U0001f44d\U0001f3fc|\U0001f44d\U0001f3fd|\U0001f44d\U0001f3fe|\U0001f44d\U0001f3ff|\U0001f44e\U0001f3fb|\U0001f44e\U0001f3fc|\U0001f44e\U0001f3fd|\U0001f44e\U0001f3fe|\U0001f44e\U0001f3ff|\u270a\U0001f3fb|\u270a\U0001f3fc|\u270a\U0001f3fd|\u270a\U0001f3fe|\u270a\U0001f3ff|\U0001f44a\U0001f3fb|\U0001f44a\U0001f3fc|\U0001f44a\U0001f3fd|\U0001f44a\U0001f3fe|\U0001f44a\U0001f3ff|\U0001f91b\U0001f3fb|\U0001f91b\U0001f3fc|\U0001f91b\U0001f3fd|\U0001f91b\U0001f3fe|\U0001f91b\U0001f3ff|\U0001f91c\U0001f3fb|\U0001f91c\U0001f3fc|\U0001f91c\U0001f3fd|\U0001f91c\U0001f3fe|\U0001f91c\U0001f3ff|\U0001f44f\U0001f3fb|\U0001f44f\U0001f3fc|\U0001f44f\U0001f3fd|\U0001f44f\U0001f3fe|\U0001f44f\U0001f3ff|\U0001f64c\U0001f3fb|\U0001f64c\U0001f3fc|\U0001f64c\U0001f3fd|\U0001f64c\U0001f3fe|\U0001f64c\U0001f3ff|\U0001f450\U0001f3fb|\U0001f450\U0001f3fc|\U0001f450\U0001f3fd|\U0001f450\U0001f3fe|\U0001f450\U0001f3ff|\U0001f932\U0001f3fb|\U0001f932\U0001f3fc|\U0001f932\U0001f3fd|\U0001f932\U0001f3fe|\U0001f932\U0001f3ff|\U0001f64f\U0001f3fb|\U0001f64f\U0001f3fc|\U0001f64f\U0001f3fd|\U0001f64f\U0001f3fe|\U0001f64f\U0001f3ff|\u270d\ufe0f|\u270d\U0001f3fb|\u270d\U0001f3fc|\u270d\U0001f3fd|\u270d\U0001f3fe|\u270d\U0001f3ff|\U0001f485\U0001f3fb|\U0001f485\U0001f3fc|\U0001f485\U0001f3fd|\U0001f485\U0001f3fe|\U0001f485\U0001f3ff|\U0001f933\U0001f3fb|\U0001f933\U0001f3fc|\U0001f933\U0001f3fd|\U0001f933\U0001f3fe|\U0001f933\U0001f3ff|\U0001f4aa\U0001f3fb|\U0001f4aa\U0001f3fc|\U0001f4aa\U0001f3fd|\U0001f4aa\U0001f3fe|\U0001f4aa\U0001f3ff|\U0001f9b5\U0001f3fb|\U0001f9b5\U0001f3fc|\U0001f9b5\U0001f3fd|\U0001f9b5\U0001f3fe|\U0001f9b5\U0001f3ff|\U0001f9b6\U0001f3fb|\U0001f9b6\U0001f3fc|\U0001f9b6\U0001f3fd|\U0001f9b6\U0001f3fe|\U0001f9b6\U0001f3ff|\U0001f442\U0001f3fb|\U0001f442\U0001f3fc|\U0001f442\U0001f3fd|\U0001f442\U0001f3fe|\U0001f442\U0001f3ff|\U0001f9bb\U0001f3fb|\U0001f9bb\U0001f3fc|\U0001f9bb\U0001f3fd|\U0001f9bb\U0001f3fe|\U0001f9bb\U0001f3ff|\U0001f443\U0001f3fb|\U0001f443\U0001f3fc|\U0001f443\U0001f3fd|\U0001f443\U0001f3fe|\U0001f443\U0001f3ff|\U0001f441\ufe0f|\U0001f476\U0001f3fb|\U0001f476\U0001f3fc|\U0001f476\U0001f3fd|\U0001f476\U0001f3fe|\U0001f476\U0001f3ff|\U0001f9d2\U0001f3fb|\U0001f9d2\U0001f3fc|\U0001f9d2\U0001f3fd|\U0001f9d2\U0001f3fe|\U0001f9d2\U0001f3ff|\U0001f466\U0001f3fb|\U0001f466\U0001f3fc|\U0001f466\U0001f3fd|\U0001f466\U0001f3fe|\U0001f466\U0001f3ff|\U0001f467\U0001f3fb|\U0001f467\U0001f3fc|\U0001f467\U0001f3fd|\U0001f467\U0001f3fe|\U0001f467\U0001f3ff|\U0001f9d1\U0001f3fb|\U0001f9d1\U0001f3fc|\U0001f9d1\U0001f3fd|\U0001f9d1\U0001f3fe|\U0001f9d1\U0001f3ff|\U0001f471\U0001f3fb|\U0001f471\U0001f3fc|\U0001f471\U0001f3fd|\U0001f471\U0001f3fe|\U0001f471\U0001f3ff|\U0001f468\U0001f3fb|\U0001f468\U0001f3fc|\U0001f468\U0001f3fd|\U0001f468\U0001f3fe|\U0001f468\U0001f3ff|\U0001f9d4\U0001f3fb|\U0001f9d4\U0001f3fc|\U0001f9d4\U0001f3fd|\U0001f9d4\U0001f3fe|\U0001f9d4\U0001f3ff|\U0001f469\U0001f3fb|\U0001f469\U0001f3fc|\U0001f469\U0001f3fd|\U0001f469\U0001f3fe|\U0001f469\U0001f3ff|\U0001f9d3\U0001f3fb|\U0001f9d3\U0001f3fc|\U0001f9d3\U0001f3fd|\U0001f9d3\U0001f3fe|\U0001f9d3\U0001f3ff|\U0001f474\U0001f3fb|\U0001f474\U0001f3fc|\U0001f474\U0001f3fd|\U0001f474\U0001f3fe|\U0001f474\U0001f3ff|\U0001f475\U0001f3fb|\U0001f475\U0001f3fc|\U0001f475\U0001f3fd|\U0001f475\U0001f3fe|\U0001f475\U0001f3ff|\U0001f64d\U0001f3fb|\U0001f64d\U0001f3fc|\U0001f64d\U0001f3fd|\U0001f64d\U0001f3fe|\U0001f64d\U0001f3ff|\U0001f64e\U0001f3fb|\U0001f64e\U0001f3fc|\U0001f64e\U0001f3fd|\U0001f64e\U0001f3fe|\U0001f64e\U0001f3ff|\U0001f645\U0001f3fb|\U0001f645\U0001f3fc|\U0001f645\U0001f3fd|\U0001f645\U0001f3fe|\U0001f645\U0001f3ff|\U0001f646\U0001f3fb|\U0001f646\U0001f3fc|\U0001f646\U0001f3fd|\U0001f646\U0001f3fe|\U0001f646\U0001f3ff|\U0001f481\U0001f3fb|\U0001f481\U0001f3fc|\U0001f481\U0001f3fd|\U0001f481\U0001f3fe|\U0001f481\U0001f3ff|\U0001f64b\U0001f3fb|\U0001f64b\U0001f3fc|\U0001f64b\U0001f3fd|\U0001f64b\U0001f3fe|\U0001f64b\U0001f3ff|\U0001f9cf\U0001f3fb|\U0001f9cf\U0001f3fc|\U0001f9cf\U0001f3fd|\U0001f9cf\U0001f3fe|\U0001f9cf\U0001f3ff|\U0001f647\U0001f3fb|\U0001f647\U0001f3fc|\U0001f647\U0001f3fd|\U0001f647\U0001f3fe|\U0001f647\U0001f3ff|\U0001f926\U0001f3fb|\U0001f926\U0001f3fc|\U0001f926\U0001f3fd|\U0001f926\U0001f3fe|\U0001f926\U0001f3ff|\U0001f937\U0001f3fb|\U0001f937\U0001f3fc|\U0001f937\U0001f3fd|\U0001f937\U0001f3fe|\U0001f937\U0001f3ff|\U0001f46e\U0001f3fb|\U0001f46e\U0001f3fc|\U0001f46e\U0001f3fd|\U0001f46e\U0001f3fe|\U0001f46e\U0001f3ff|\U0001f575\ufe0f|\U0001f575\U0001f3fb|\U0001f575\U0001f3fc|\U0001f575\U0001f3fd|\U0001f575\U0001f3fe|\U0001f575\U0001f3ff|\U0001f482\U0001f3fb|\U0001f482\U0001f3fc|\U0001f482\U0001f3fd|\U0001f482\U0001f3fe|\U0001f482\U0001f3ff|\U0001f477\U0001f3fb|\U0001f477\U0001f3fc|\U0001f477\U0001f3fd|\U0001f477\U0001f3fe|\U0001f477\U0001f3ff|\U0001f934\U0001f3fb|\U0001f934\U0001f3fc|\U0001f934\U0001f3fd|\U0001f934\U0001f3fe|\U0001f934\U0001f3ff|\U0001f478\U0001f3fb|\U0001f478\U0001f3fc|\U0001f478\U0001f3fd|\U0001f478\U0001f3fe|\U0001f478\U0001f3ff|\U0001f473\U0001f3fb|\U0001f473\U0001f3fc|\U0001f473\U0001f3fd|\U0001f473\U0001f3fe|\U0001f473\U0001f3ff|\U0001f472\U0001f3fb|\U0001f472\U0001f3fc|\U0001f472\U0001f3fd|\U0001f472\U0001f3fe|\U0001f472\U0001f3ff|\U0001f9d5\U0001f3fb|\U0001f9d5\U0001f3fc|\U0001f9d5\U0001f3fd|\U0001f9d5\U0001f3fe|\U0001f9d5\U0001f3ff|\U0001f935\U0001f3fb|\U0001f935\U0001f3fc|\U0001f935\U0001f3fd|\U0001f935\U0001f3fe|\U0001f935\U0001f3ff|\U0001f470\U0001f3fb|\U0001f470\U0001f3fc|\U0001f470\U0001f3fd|\U0001f470\U0001f3fe|\U0001f470\U0001f3ff|\U0001f930\U0001f3fb|\U0001f930\U0001f3fc|\U0001f930\U0001f3fd|\U0001f930\U0001f3fe|\U0001f930\U0001f3ff|\U0001f931\U0001f3fb|\U0001f931\U0001f3fc|\U0001f931\U0001f3fd|\U0001f931\U0001f3fe|\U0001f931\U0001f3ff|\U0001f47c\U0001f3fb|\U0001f47c\U0001f3fc|\U0001f47c\U0001f3fd|\U0001f47c\U0001f3fe|\U0001f47c\U0001f3ff|\U0001f385\U0001f3fb|\U0001f385\U0001f3fc|\U0001f385\U0001f3fd|\U0001f385\U0001f3fe|\U0001f385\U0001f3ff|\U0001f936\U0001f3fb|\U0001f936\U0001f3fc|\U0001f936\U0001f3fd|\U0001f936\U0001f3fe|\U0001f936\U0001f3ff|\U0001f9b8\U0001f3fb|\U0001f9b8\U0001f3fc|\U0001f9b8\U0001f3fd|\U0001f9b8\U0001f3fe|\U0001f9b8\U0001f3ff|\U0001f9b9\U0001f3fb|\U0001f9b9\U0001f3fc|\U0001f9b9\U0001f3fd|\U0001f9b9\U0001f3fe|\U0001f9b9\U0001f3ff|\U0001f9d9\U0001f3fb|\U0001f9d9\U0001f3fc|\U0001f9d9\U0001f3fd|\U0001f9d9\U0001f3fe|\U0001f9d9\U0001f3ff|\U0001f9da\U0001f3fb|\U0001f9da\U0001f3fc|\U0001f9da\U0001f3fd|\U0001f9da\U0001f3fe|\U0001f9da\U0001f3ff|\U0001f9db\U0001f3fb|\U0001f9db\U0001f3fc|\U0001f9db\U0001f3fd|\U0001f9db\U0001f3fe|\U0001f9db\U0001f3ff|\U0001f9dc\U0001f3fb|\U0001f9dc\U0001f3fc|\U0001f9dc\U0001f3fd|\U0001f9dc\U0001f3fe|\U0001f9dc\U0001f3ff|\U0001f9dd\U0001f3fb|\U0001f9dd\U0001f3fc|\U0001f9dd\U0001f3fd|\U0001f9dd\U0001f3fe|\U0001f9dd\U0001f3ff|\U0001f486\U0001f3fb|\U0001f486\U0001f3fc|\U0001f486\U0001f3fd|\U0001f486\U0001f3fe|\U0001f486\U0001f3ff|\U0001f487\U0001f3fb|\U0001f487\U0001f3fc|\U0001f487\U0001f3fd|\U0001f487\U0001f3fe|\U0001f487\U0001f3ff|\U0001f6b6\U0001f3fb|\U0001f6b6\U0001f3fc|\U0001f6b6\U0001f3fd|\U0001f6b6\U0001f3fe|\U0001f6b6\U0001f3ff|\U0001f9cd\U0001f3fb|\U0001f9cd\U0001f3fc|\U0001f9cd\U0001f3fd|\U0001f9cd\U0001f3fe|\U0001f9cd\U0001f3ff|\U0001f9ce\U0001f3fb|\U0001f9ce\U0001f3fc|\U0001f9ce\U0001f3fd|\U0001f9ce\U0001f3fe|\U0001f9ce\U0001f3ff|\U0001f3c3\U0001f3fb|\U0001f3c3\U0001f3fc|\U0001f3c3\U0001f3fd|\U0001f3c3\U0001f3fe|\U0001f3c3\U0001f3ff|\U0001f483\U0001f3fb|\U0001f483\U0001f3fc|\U0001f483\U0001f3fd|\U0001f483\U0001f3fe|\U0001f483\U0001f3ff|\U0001f57a\U0001f3fb|\U0001f57a\U0001f3fc|\U0001f57a\U0001f3fd|\U0001f57a\U0001f3fe|\U0001f57a\U0001f3ff|\U0001f574\ufe0f|\U0001f574\U0001f3fb|\U0001f574\U0001f3fc|\U0001f574\U0001f3fd|\U0001f574\U0001f3fe|\U0001f574\U0001f3ff|\U0001f9d6\U0001f3fb|\U0001f9d6\U0001f3fc|\U0001f9d6\U0001f3fd|\U0001f9d6\U0001f3fe|\U0001f9d6\U0001f3ff|\U0001f9d7\U0001f3fb|\U0001f9d7\U0001f3fc|\U0001f9d7\U0001f3fd|\U0001f9d7\U0001f3fe|\U0001f9d7\U0001f3ff|\U0001f3c7\U0001f3fb|\U0001f3c7\U0001f3fc|\U0001f3c7\U0001f3fd|\U0001f3c7\U0001f3fe|\U0001f3c7\U0001f3ff|\u26f7\ufe0f|\U0001f3c2\U0001f3fb|\U0001f3c2\U0001f3fc|\U0001f3c2\U0001f3fd|\U0001f3c2\U0001f3fe|\U0001f3c2\U0001f3ff|\U0001f3cc\ufe0f|\U0001f3cc\U0001f3fb|\U0001f3cc\U0001f3fc|\U0001f3cc\U0001f3fd|\U0001f3cc\U0001f3fe|\U0001f3cc\U0001f3ff|\U0001f3c4\U0001f3fb|\U0001f3c4\U0001f3fc|\U0001f3c4\U0001f3fd|\U0001f3c4\U0001f3fe|\U0001f3c4\U0001f3ff|\U0001f6a3\U0001f3fb|\U0001f6a3\U0001f3fc|\U0001f6a3\U0001f3fd|\U0001f6a3\U0001f3fe|\U0001f6a3\U0001f3ff|\U0001f3ca\U0001f3fb|\U0001f3ca\U0001f3fc|\U0001f3ca\U0001f3fd|\U0001f3ca\U0001f3fe|\U0001f3ca\U0001f3ff|\u26f9\ufe0f|\u26f9\U0001f3fb|\u26f9\U0001f3fc|\u26f9\U0001f3fd|\u26f9\U0001f3fe|\u26f9\U0001f3ff|\U0001f3cb\ufe0f|\U0001f3cb\U0001f3fb|\U0001f3cb\U0001f3fc|\U0001f3cb\U0001f3fd|\U0001f3cb\U0001f3fe|\U0001f3cb\U0001f3ff|\U0001f6b4\U0001f3fb|\U0001f6b4\U0001f3fc|\U0001f6b4\U0001f3fd|\U0001f6b4\U0001f3fe|\U0001f6b4\U0001f3ff|\U0001f6b5\U0001f3fb|\U0001f6b5\U0001f3fc|\U0001f6b5\U0001f3fd|\U0001f6b5\U0001f3fe|\U0001f6b5\U0001f3ff|\U0001f938\U0001f3fb|\U0001f938\U0001f3fc|\U0001f938\U0001f3fd|\U0001f938\U0001f3fe|\U0001f938\U0001f3ff|\U0001f93d\U0001f3fb|\U0001f93d\U0001f3fc|\U0001f93d\U0001f3fd|\U0001f93d\U0001f3fe|\U0001f93d\U0001f3ff|\U0001f93e\U0001f3fb|\U0001f93e\U0001f3fc|\U0001f93e\U0001f3fd|\U0001f93e\U0001f3fe|\U0001f93e\U0001f3ff|\U0001f939\U0001f3fb|\U0001f939\U0001f3fc|\U0001f939\U0001f3fd|\U0001f939\U0001f3fe|\U0001f939\U0001f3ff|\U0001f9d8\U0001f3fb|\U0001f9d8\U0001f3fc|\U0001f9d8\U0001f3fd|\U0001f9d8\U0001f3fe|\U0001f9d8\U0001f3ff|\U0001f6c0\U0001f3fb|\U0001f6c0\U0001f3fc|\U0001f6c0\U0001f3fd|\U0001f6c0\U0001f3fe|\U0001f6c0\U0001f3ff|\U0001f6cc\U0001f3fb|\U0001f6cc\U0001f3fc|\U0001f6cc\U0001f3fd|\U0001f6cc\U0001f3fe|\U0001f6cc\U0001f3ff|\U0001f46d\U0001f3fb|\U0001f46d\U0001f3fc|\U0001f46d\U0001f3fd|\U0001f46d\U0001f3fe|\U0001f46d\U0001f3ff|\U0001f46b\U0001f3fb|\U0001f46b\U0001f3fc|\U0001f46b\U0001f3fd|\U0001f46b\U0001f3fe|\U0001f46b\U0001f3ff|\U0001f46c\U0001f3fb|\U0001f46c\U0001f3fc|\U0001f46c\U0001f3fd|\U0001f46c\U0001f3fe|\U0001f46c\U0001f3ff|\U0001f5e3\ufe0f|\U0001f43f\ufe0f|\U0001f54a\ufe0f|\U0001f577\ufe0f|\U0001f578\ufe0f|\U0001f3f5\ufe0f|\u2618\ufe0f|\U0001f336\ufe0f|\U0001f37d\ufe0f|\U0001f5fa\ufe0f|\U0001f3d4\ufe0f|\u26f0\ufe0f|\U0001f3d5\ufe0f|\U0001f3d6\ufe0f|\U0001f3dc\ufe0f|\U0001f3dd\ufe0f|\U0001f3de\ufe0f|\U0001f3df\ufe0f|\U0001f3db\ufe0f|\U0001f3d7\ufe0f|\U0001f3d8\ufe0f|\U0001f3da\ufe0f|\u26e9\ufe0f|\U0001f3d9\ufe0f|\u2668\ufe0f|\U0001f3ce\ufe0f|\U0001f3cd\ufe0f|\U0001f6e3\ufe0f|\U0001f6e4\ufe0f|\U0001f6e2\ufe0f|\U0001f6f3\ufe0f|\u26f4\ufe0f|\U0001f6e5\ufe0f|\u2708\ufe0f|\U0001f6e9\ufe0f|\U0001f6f0\ufe0f|\U0001f6ce\ufe0f|\u23f1\ufe0f|\u23f2\ufe0f|\U0001f570\ufe0f|\U0001f321\ufe0f|\u2600\ufe0f|\u2601\ufe0f|\u26c8\ufe0f|\U0001f324\ufe0f|\U0001f325\ufe0f|\U0001f326\ufe0f|\U0001f327\ufe0f|\U0001f328\ufe0f|\U0001f329\ufe0f|\U0001f32a\ufe0f|\U0001f32b\ufe0f|\U0001f32c\ufe0f|\u2602\ufe0f|\u26f1\ufe0f|\u2744\ufe0f|\u2603\ufe0f|\u2604\ufe0f|\U0001f397\ufe0f|\U0001f39f\ufe0f|\U0001f396\ufe0f|\u26f8\ufe0f|\U0001f579\ufe0f|\u2660\ufe0f|\u2665\ufe0f|\u2666\ufe0f|\u2663\ufe0f|\u265f\ufe0f|\U0001f5bc\ufe0f|\U0001f576\ufe0f|\U0001f6cd\ufe0f|\u26d1\ufe0f|\U0001f399\ufe0f|\U0001f39a\ufe0f|\U0001f39b\ufe0f|\u260e\ufe0f|\U0001f5a5\ufe0f|\U0001f5a8\ufe0f|\u2328\ufe0f|\U0001f5b1\ufe0f|\U0001f5b2\ufe0f|\U0001f39e\ufe0f|\U0001f4fd\ufe0f|\U0001f56f\ufe0f|\U0001f5de\ufe0f|\U0001f3f7\ufe0f|\u2709\ufe0f|\U0001f5f3\ufe0f|\u270f\ufe0f|\u2712\ufe0f|\U0001f58b\ufe0f|\U0001f58a\ufe0f|\U0001f58c\ufe0f|\U0001f58d\ufe0f|\U0001f5c2\ufe0f|\U0001f5d2\ufe0f|\U0001f5d3\ufe0f|\U0001f587\ufe0f|\u2702\ufe0f|\U0001f5c3\ufe0f|\U0001f5c4\ufe0f|\U0001f5d1\ufe0f|\U0001f5dd\ufe0f|\u26cf\ufe0f|\u2692\ufe0f|\U0001f6e0\ufe0f|\U0001f5e1\ufe0f|\u2694\ufe0f|\U0001f6e1\ufe0f|\u2699\ufe0f|\U0001f5dc\ufe0f|\u2696\ufe0f|\u26d3\ufe0f|\u2697\ufe0f|\U0001f6cf\ufe0f|\U0001f6cb\ufe0f|\u26b0\ufe0f|\u26b1\ufe0f|\u26a0\ufe0f|\u2622\ufe0f|\u2623\ufe0f|\u2b06\ufe0f|\u2197\ufe0f|\u27a1\ufe0f|\u2198\ufe0f|\u2b07\ufe0f|\u2199\ufe0f|\u2b05\ufe0f|\u2196\ufe0f|\u2195\ufe0f|\u2194\ufe0f|\u21a9\ufe0f|\u21aa\ufe0f|\u2934\ufe0f|\u2935\ufe0f|\u269b\ufe0f|\U0001f549\ufe0f|\u2721\ufe0f|\u2638\ufe0f|\u262f\ufe0f|\u271d\ufe0f|\u2626\ufe0f|\u262a\ufe0f|\u262e\ufe0f|\u25b6\ufe0f|\u23ed\ufe0f|\u23ef\ufe0f|\u25c0\ufe0f|\u23ee\ufe0f|\u23f8\ufe0f|\u23f9\ufe0f|\u23fa\ufe0f|\u23cf\ufe0f|\u2640\ufe0f|\u2642\ufe0f|\u2695\ufe0f|\u267e\ufe0f|\u267b\ufe0f|\u269c\ufe0f|\u2611\ufe0f|\u2714\ufe0f|\u2716\ufe0f|\u303d\ufe0f|\u2733\ufe0f|\u2734\ufe0f|\u2747\ufe0f|\u203c\ufe0f|\u2049\ufe0f|\u3030\ufe0f|\xa9\ufe0f|\xae\ufe0f|\u2122\ufe0f|\\#\u20e3|\\*\u20e3|0\u20e3|1\u20e3|2\u20e3|3\u20e3|4\u20e3|5\u20e3|6\u20e3|7\u20e3|8\u20e3|9\u20e3|\U0001f170\ufe0f|\U0001f171\ufe0f|\u2139\ufe0f|\u24c2\ufe0f|\U0001f17e\ufe0f|\U0001f17f\ufe0f|\U0001f202\ufe0f|\U0001f237\ufe0f|\u3297\ufe0f|\u3299\ufe0f|\u25fc\ufe0f|\u25fb\ufe0f|\u25aa\ufe0f|\u25ab\ufe0f|\U0001f3f3\ufe0f|\U0001f1e6\U0001f1e8|\U0001f1e6\U0001f1e9|\U0001f1e6\U0001f1ea|\U0001f1e6\U0001f1eb|\U0001f1e6\U0001f1ec|\U0001f1e6\U0001f1ee|\U0001f1e6\U0001f1f1|\U0001f1e6\U0001f1f2|\U0001f1e6\U0001f1f4|\U0001f1e6\U0001f1f6|\U0001f1e6\U0001f1f7|\U0001f1e6\U0001f1f8|\U0001f1e6\U0001f1f9|\U0001f1e6\U0001f1fa|\U0001f1e6\U0001f1fc|\U0001f1e6\U0001f1fd|\U0001f1e6\U0001f1ff|\U0001f1e7\U0001f1e6|\U0001f1e7\U0001f1e7|\U0001f1e7\U0001f1e9|\U0001f1e7\U0001f1ea|\U0001f1e7\U0001f1eb|\U0001f1e7\U0001f1ec|\U0001f1e7\U0001f1ed|\U0001f1e7\U0001f1ee|\U0001f1e7\U0001f1ef|\U0001f1e7\U0001f1f1|\U0001f1e7\U0001f1f2|\U0001f1e7\U0001f1f3|\U0001f1e7\U0001f1f4|\U0001f1e7\U0001f1f6|\U0001f1e7\U0001f1f7|\U0001f1e7\U0001f1f8|\U0001f1e7\U0001f1f9|\U0001f1e7\U0001f1fb|\U0001f1e7\U0001f1fc|\U0001f1e7\U0001f1fe|\U0001f1e7\U0001f1ff|\U0001f1e8\U0001f1e6|\U0001f1e8\U0001f1e8|\U0001f1e8\U0001f1e9|\U0001f1e8\U0001f1eb|\U0001f1e8\U0001f1ec|\U0001f1e8\U0001f1ed|\U0001f1e8\U0001f1ee|\U0001f1e8\U0001f1f0|\U0001f1e8\U0001f1f1|\U0001f1e8\U0001f1f2|\U0001f1e8\U0001f1f3|\U0001f1e8\U0001f1f4|\U0001f1e8\U0001f1f5|\U0001f1e8\U0001f1f7|\U0001f1e8\U0001f1fa|\U0001f1e8\U0001f1fb|\U0001f1e8\U0001f1fc|\U0001f1e8\U0001f1fd|\U0001f1e8\U0001f1fe|\U0001f1e8\U0001f1ff|\U0001f1e9\U0001f1ea|\U0001f1e9\U0001f1ec|\U0001f1e9\U0001f1ef|\U0001f1e9\U0001f1f0|\U0001f1e9\U0001f1f2|\U0001f1e9\U0001f1f4|\U0001f1e9\U0001f1ff|\U0001f1ea\U0001f1e6|\U0001f1ea\U0001f1e8|\U0001f1ea\U0001f1ea|\U0001f1ea\U0001f1ec|\U0001f1ea\U0001f1ed|\U0001f1ea\U0001f1f7|\U0001f1ea\U0001f1f8|\U0001f1ea\U0001f1f9|\U0001f1ea\U0001f1fa|\U0001f1eb\U0001f1ee|\U0001f1eb\U0001f1ef|\U0001f1eb\U0001f1f0|\U0001f1eb\U0001f1f2|\U0001f1eb\U0001f1f4|\U0001f1eb\U0001f1f7|\U0001f1ec\U0001f1e6|\U0001f1ec\U0001f1e7|\U0001f1ec\U0001f1e9|\U0001f1ec\U0001f1ea|\U0001f1ec\U0001f1eb|\U0001f1ec\U0001f1ec|\U0001f1ec\U0001f1ed|\U0001f1ec\U0001f1ee|\U0001f1ec\U0001f1f1|\U0001f1ec\U0001f1f2|\U0001f1ec\U0001f1f3|\U0001f1ec\U0001f1f5|\U0001f1ec\U0001f1f6|\U0001f1ec\U0001f1f7|\U0001f1ec\U0001f1f8|\U0001f1ec\U0001f1f9|\U0001f1ec\U0001f1fa|\U0001f1ec\U0001f1fc|\U0001f1ec\U0001f1fe|\U0001f1ed\U0001f1f0|\U0001f1ed\U0001f1f2|\U0001f1ed\U0001f1f3|\U0001f1ed\U0001f1f7|\U0001f1ed\U0001f1f9|\U0001f1ed\U0001f1fa|\U0001f1ee\U0001f1e8|\U0001f1ee\U0001f1e9|\U0001f1ee\U0001f1ea|\U0001f1ee\U0001f1f1|\U0001f1ee\U0001f1f2|\U0001f1ee\U0001f1f3|\U0001f1ee\U0001f1f4|\U0001f1ee\U0001f1f6|\U0001f1ee\U0001f1f7|\U0001f1ee\U0001f1f8|\U0001f1ee\U0001f1f9|\U0001f1ef\U0001f1ea|\U0001f1ef\U0001f1f2|\U0001f1ef\U0001f1f4|\U0001f1ef\U0001f1f5|\U0001f1f0\U0001f1ea|\U0001f1f0\U0001f1ec|\U0001f1f0\U0001f1ed|\U0001f1f0\U0001f1ee|\U0001f1f0\U0001f1f2|\U0001f1f0\U0001f1f3|\U0001f1f0\U0001f1f5|\U0001f1f0\U0001f1f7|\U0001f1f0\U0001f1fc|\U0001f1f0\U0001f1fe|\U0001f1f0\U0001f1ff|\U0001f1f1\U0001f1e6|\U0001f1f1\U0001f1e7|\U0001f1f1\U0001f1e8|\U0001f1f1\U0001f1ee|\U0001f1f1\U0001f1f0|\U0001f1f1\U0001f1f7|\U0001f1f1\U0001f1f8|\U0001f1f1\U0001f1f9|\U0001f1f1\U0001f1fa|\U0001f1f1\U0001f1fb|\U0001f1f1\U0001f1fe|\U0001f1f2\U0001f1e6|\U0001f1f2\U0001f1e8|\U0001f1f2\U0001f1e9|\U0001f1f2\U0001f1ea|\U0001f1f2\U0001f1eb|\U0001f1f2\U0001f1ec|\U0001f1f2\U0001f1ed|\U0001f1f2\U0001f1f0|\U0001f1f2\U0001f1f1|\U0001f1f2\U0001f1f2|\U0001f1f2\U0001f1f3|\U0001f1f2\U0001f1f4|\U0001f1f2\U0001f1f5|\U0001f1f2\U0001f1f6|\U0001f1f2\U0001f1f7|\U0001f1f2\U0001f1f8|\U0001f1f2\U0001f1f9|\U0001f1f2\U0001f1fa|\U0001f1f2\U0001f1fb|\U0001f1f2\U0001f1fc|\U0001f1f2\U0001f1fd|\U0001f1f2\U0001f1fe|\U0001f1f2\U0001f1ff|\U0001f1f3\U0001f1e6|\U0001f1f3\U0001f1e8|\U0001f1f3\U0001f1ea|\U0001f1f3\U0001f1eb|\U0001f1f3\U0001f1ec|\U0001f1f3\U0001f1ee|\U0001f1f3\U0001f1f1|\U0001f1f3\U0001f1f4|\U0001f1f3\U0001f1f5|\U0001f1f3\U0001f1f7|\U0001f1f3\U0001f1fa|\U0001f1f3\U0001f1ff|\U0001f1f4\U0001f1f2|\U0001f1f5\U0001f1e6|\U0001f1f5\U0001f1ea|\U0001f1f5\U0001f1eb|\U0001f1f5\U0001f1ec|\U0001f1f5\U0001f1ed|\U0001f1f5\U0001f1f0|\U0001f1f5\U0001f1f1|\U0001f1f5\U0001f1f2|\U0001f1f5\U0001f1f3|\U0001f1f5\U0001f1f7|\U0001f1f5\U0001f1f8|\U0001f1f5\U0001f1f9|\U0001f1f5\U0001f1fc|\U0001f1f5\U0001f1fe|\U0001f1f6\U0001f1e6|\U0001f1f7\U0001f1ea|\U0001f1f7\U0001f1f4|\U0001f1f7\U0001f1f8|\U0001f1f7\U0001f1fa|\U0001f1f7\U0001f1fc|\U0001f1f8\U0001f1e6|\U0001f1f8\U0001f1e7|\U0001f1f8\U0001f1e8|\U0001f1f8\U0001f1e9|\U0001f1f8\U0001f1ea|\U0001f1f8\U0001f1ec|\U0001f1f8\U0001f1ed|\U0001f1f8\U0001f1ee|\U0001f1f8\U0001f1ef|\U0001f1f8\U0001f1f0|\U0001f1f8\U0001f1f1|\U0001f1f8\U0001f1f2|\U0001f1f8\U0001f1f3|\U0001f1f8\U0001f1f4|\U0001f1f8\U0001f1f7|\U0001f1f8\U0001f1f8|\U0001f1f8\U0001f1f9|\U0001f1f8\U0001f1fb|\U0001f1f8\U0001f1fd|\U0001f1f8\U0001f1fe|\U0001f1f8\U0001f1ff|\U0001f1f9\U0001f1e6|\U0001f1f9\U0001f1e8|\U0001f1f9\U0001f1e9|\U0001f1f9\U0001f1eb|\U0001f1f9\U0001f1ec|\U0001f1f9\U0001f1ed|\U0001f1f9\U0001f1ef|\U0001f1f9\U0001f1f0|\U0001f1f9\U0001f1f1|\U0001f1f9\U0001f1f2|\U0001f1f9\U0001f1f3|\U0001f1f9\U0001f1f4|\U0001f1f9\U0001f1f7|\U0001f1f9\U0001f1f9|\U0001f1f9\U0001f1fb|\U0001f1f9\U0001f1fc|\U0001f1f9\U0001f1ff|\U0001f1fa\U0001f1e6|\U0001f1fa\U0001f1ec|\U0001f1fa\U0001f1f2|\U0001f1fa\U0001f1f3|\U0001f1fa\U0001f1f8|\U0001f1fa\U0001f1fe|\U0001f1fa\U0001f1ff|\U0001f1fb\U0001f1e6|\U0001f1fb\U0001f1e8|\U0001f1fb\U0001f1ea|\U0001f1fb\U0001f1ec|\U0001f1fb\U0001f1ee|\U0001f1fb\U0001f1f3|\U0001f1fb\U0001f1fa|\U0001f1fc\U0001f1eb|\U0001f1fc\U0001f1f8|\U0001f1fd\U0001f1f0|\U0001f1fe\U0001f1ea|\U0001f1fe\U0001f1f9|\U0001f1ff\U0001f1e6|\U0001f1ff\U0001f1f2|\U0001f1ff\U0001f1fc|\U0001f600|\U0001f603|\U0001f604|\U0001f601|\U0001f606|\U0001f605|\U0001f923|\U0001f602|\U0001f642|\U0001f643|\U0001f609|\U0001f60a|\U0001f607|\U0001f970|\U0001f60d|\U0001f929|\U0001f618|\U0001f617|\u263a|\U0001f61a|\U0001f619|\U0001f60b|\U0001f61b|\U0001f61c|\U0001f92a|\U0001f61d|\U0001f911|\U0001f917|\U0001f92d|\U0001f92b|\U0001f914|\U0001f910|\U0001f928|\U0001f610|\U0001f611|\U0001f636|\U0001f60f|\U0001f612|\U0001f644|\U0001f62c|\U0001f925|\U0001f60c|\U0001f614|\U0001f62a|\U0001f924|\U0001f634|\U0001f637|\U0001f912|\U0001f915|\U0001f922|\U0001f92e|\U0001f927|\U0001f975|\U0001f976|\U0001f974|\U0001f635|\U0001f92f|\U0001f920|\U0001f973|\U0001f60e|\U0001f913|\U0001f9d0|\U0001f615|\U0001f61f|\U0001f641|\u2639|\U0001f62e|\U0001f62f|\U0001f632|\U0001f633|\U0001f97a|\U0001f626|\U0001f627|\U0001f628|\U0001f630|\U0001f625|\U0001f622|\U0001f62d|\U0001f631|\U0001f616|\U0001f623|\U0001f61e|\U0001f613|\U0001f629|\U0001f62b|\U0001f971|\U0001f624|\U0001f621|\U0001f620|\U0001f92c|\U0001f608|\U0001f47f|\U0001f480|\u2620|\U0001f4a9|\U0001f921|\U0001f479|\U0001f47a|\U0001f47b|\U0001f47d|\U0001f47e|\U0001f916|\U0001f63a|\U0001f638|\U0001f639|\U0001f63b|\U0001f63c|\U0001f63d|\U0001f640|\U0001f63f|\U0001f63e|\U0001f648|\U0001f649|\U0001f64a|\U0001f48b|\U0001f48c|\U0001f498|\U0001f49d|\U0001f496|\U0001f497|\U0001f493|\U0001f49e|\U0001f495|\U0001f49f|\u2763|\U0001f494|\u2764|\U0001f9e1|\U0001f49b|\U0001f49a|\U0001f499|\U0001f49c|\U0001f90e|\U0001f5a4|\U0001f90d|\U0001f4af|\U0001f4a2|\U0001f4a5|\U0001f4ab|\U0001f4a6|\U0001f4a8|\U0001f573|\U0001f4a3|\U0001f4ac|\U0001f5e8|\U0001f5ef|\U0001f4ad|\U0001f4a4|\U0001f44b|\U0001f91a|\U0001f590|\u270b|\U0001f596|\U0001f44c|\U0001f90f|\u270c|\U0001f91e|\U0001f91f|\U0001f918|\U0001f919|\U0001f448|\U0001f449|\U0001f446|\U0001f595|\U0001f447|\u261d|\U0001f44d|\U0001f44e|\u270a|\U0001f44a|\U0001f91b|\U0001f91c|\U0001f44f|\U0001f64c|\U0001f450|\U0001f932|\U0001f91d|\U0001f64f|\u270d|\U0001f485|\U0001f933|\U0001f4aa|\U0001f9be|\U0001f9bf|\U0001f9b5|\U0001f9b6|\U0001f442|\U0001f9bb|\U0001f443|\U0001f9e0|\U0001f9b7|\U0001f9b4|\U0001f440|\U0001f441|\U0001f445|\U0001f444|\U0001f476|\U0001f9d2|\U0001f466|\U0001f467|\U0001f9d1|\U0001f471|\U0001f468|\U0001f9d4|\U0001f469|\U0001f9d3|\U0001f474|\U0001f475|\U0001f64d|\U0001f64e|\U0001f645|\U0001f646|\U0001f481|\U0001f64b|\U0001f9cf|\U0001f647|\U0001f926|\U0001f937|\U0001f46e|\U0001f575|\U0001f482|\U0001f477|\U0001f934|\U0001f478|\U0001f473|\U0001f472|\U0001f9d5|\U0001f935|\U0001f470|\U0001f930|\U0001f931|\U0001f47c|\U0001f385|\U0001f936|\U0001f9b8|\U0001f9b9|\U0001f9d9|\U0001f9da|\U0001f9db|\U0001f9dc|\U0001f9dd|\U0001f9de|\U0001f9df|\U0001f486|\U0001f487|\U0001f6b6|\U0001f9cd|\U0001f9ce|\U0001f3c3|\U0001f483|\U0001f57a|\U0001f574|\U0001f46f|\U0001f9d6|\U0001f9d7|\U0001f93a|\U0001f3c7|\u26f7|\U0001f3c2|\U0001f3cc|\U0001f3c4|\U0001f6a3|\U0001f3ca|\u26f9|\U0001f3cb|\U0001f6b4|\U0001f6b5|\U0001f938|\U0001f93c|\U0001f93d|\U0001f93e|\U0001f939|\U0001f9d8|\U0001f6c0|\U0001f6cc|\U0001f46d|\U0001f46b|\U0001f46c|\U0001f48f|\U0001f491|\U0001f46a|\U0001f5e3|\U0001f464|\U0001f465|\U0001f463|\U0001f3fb|\U0001f3fc|\U0001f3fd|\U0001f3fe|\U0001f3ff|\U0001f9b0|\U0001f9b1|\U0001f9b3|\U0001f9b2|\U0001f435|\U0001f412|\U0001f98d|\U0001f9a7|\U0001f436|\U0001f415|\U0001f9ae|\U0001f429|\U0001f43a|\U0001f98a|\U0001f99d|\U0001f431|\U0001f408|\U0001f981|\U0001f42f|\U0001f405|\U0001f406|\U0001f434|\U0001f40e|\U0001f984|\U0001f993|\U0001f98c|\U0001f42e|\U0001f402|\U0001f403|\U0001f404|\U0001f437|\U0001f416|\U0001f417|\U0001f43d|\U0001f40f|\U0001f411|\U0001f410|\U0001f42a|\U0001f42b|\U0001f999|\U0001f992|\U0001f418|\U0001f98f|\U0001f99b|\U0001f42d|\U0001f401|\U0001f400|\U0001f439|\U0001f430|\U0001f407|\U0001f43f|\U0001f994|\U0001f987|\U0001f43b|\U0001f428|\U0001f43c|\U0001f9a5|\U0001f9a6|\U0001f9a8|\U0001f998|\U0001f9a1|\U0001f43e|\U0001f983|\U0001f414|\U0001f413|\U0001f423|\U0001f424|\U0001f425|\U0001f426|\U0001f427|\U0001f54a|\U0001f985|\U0001f986|\U0001f9a2|\U0001f989|\U0001f9a9|\U0001f99a|\U0001f99c|\U0001f438|\U0001f40a|\U0001f422|\U0001f98e|\U0001f40d|\U0001f432|\U0001f409|\U0001f995|\U0001f996|\U0001f433|\U0001f40b|\U0001f42c|\U0001f41f|\U0001f420|\U0001f421|\U0001f988|\U0001f419|\U0001f41a|\U0001f40c|\U0001f98b|\U0001f41b|\U0001f41c|\U0001f41d|\U0001f41e|\U0001f997|\U0001f577|\U0001f578|\U0001f982|\U0001f99f|\U0001f9a0|\U0001f490|\U0001f338|\U0001f4ae|\U0001f3f5|\U0001f339|\U0001f940|\U0001f33a|\U0001f33b|\U0001f33c|\U0001f337|\U0001f331|\U0001f332|\U0001f333|\U0001f334|\U0001f335|\U0001f33e|\U0001f33f|\u2618|\U0001f340|\U0001f341|\U0001f342|\U0001f343|\U0001f347|\U0001f348|\U0001f349|\U0001f34a|\U0001f34b|\U0001f34c|\U0001f34d|\U0001f96d|\U0001f34e|\U0001f34f|\U0001f350|\U0001f351|\U0001f352|\U0001f353|\U0001f95d|\U0001f345|\U0001f965|\U0001f951|\U0001f346|\U0001f954|\U0001f955|\U0001f33d|\U0001f336|\U0001f952|\U0001f96c|\U0001f966|\U0001f9c4|\U0001f9c5|\U0001f344|\U0001f95c|\U0001f330|\U0001f35e|\U0001f950|\U0001f956|\U0001f968|\U0001f96f|\U0001f95e|\U0001f9c7|\U0001f9c0|\U0001f356|\U0001f357|\U0001f969|\U0001f953|\U0001f354|\U0001f35f|\U0001f355|\U0001f32d|\U0001f96a|\U0001f32e|\U0001f32f|\U0001f959|\U0001f9c6|\U0001f95a|\U0001f373|\U0001f958|\U0001f372|\U0001f963|\U0001f957|\U0001f37f|\U0001f9c8|\U0001f9c2|\U0001f96b|\U0001f371|\U0001f358|\U0001f359|\U0001f35a|\U0001f35b|\U0001f35c|\U0001f35d|\U0001f360|\U0001f362|\U0001f363|\U0001f364|\U0001f365|\U0001f96e|\U0001f361|\U0001f95f|\U0001f960|\U0001f961|\U0001f980|\U0001f99e|\U0001f990|\U0001f991|\U0001f9aa|\U0001f366|\U0001f367|\U0001f368|\U0001f369|\U0001f36a|\U0001f382|\U0001f370|\U0001f9c1|\U0001f967|\U0001f36b|\U0001f36c|\U0001f36d|\U0001f36e|\U0001f36f|\U0001f37c|\U0001f95b|\u2615|\U0001f375|\U0001f376|\U0001f37e|\U0001f377|\U0001f378|\U0001f379|\U0001f37a|\U0001f37b|\U0001f942|\U0001f943|\U0001f964|\U0001f9c3|\U0001f9c9|\U0001f9ca|\U0001f962|\U0001f37d|\U0001f374|\U0001f944|\U0001f52a|\U0001f3fa|\U0001f30d|\U0001f30e|\U0001f30f|\U0001f310|\U0001f5fa|\U0001f5fe|\U0001f9ed|\U0001f3d4|\u26f0|\U0001f30b|\U0001f5fb|\U0001f3d5|\U0001f3d6|\U0001f3dc|\U0001f3dd|\U0001f3de|\U0001f3df|\U0001f3db|\U0001f3d7|\U0001f9f1|\U0001f3d8|\U0001f3da|\U0001f3e0|\U0001f3e1|\U0001f3e2|\U0001f3e3|\U0001f3e4|\U0001f3e5|\U0001f3e6|\U0001f3e8|\U0001f3e9|\U0001f3ea|\U0001f3eb|\U0001f3ec|\U0001f3ed|\U0001f3ef|\U0001f3f0|\U0001f492|\U0001f5fc|\U0001f5fd|\u26ea|\U0001f54c|\U0001f6d5|\U0001f54d|\u26e9|\U0001f54b|\u26f2|\u26fa|\U0001f301|\U0001f303|\U0001f3d9|\U0001f304|\U0001f305|\U0001f306|\U0001f307|\U0001f309|\u2668|\U0001f3a0|\U0001f3a1|\U0001f3a2|\U0001f488|\U0001f3aa|\U0001f682|\U0001f683|\U0001f684|\U0001f685|\U0001f686|\U0001f687|\U0001f688|\U0001f689|\U0001f68a|\U0001f69d|\U0001f69e|\U0001f68b|\U0001f68c|\U0001f68d|\U0001f68e|\U0001f690|\U0001f691|\U0001f692|\U0001f693|\U0001f694|\U0001f695|\U0001f696|\U0001f697|\U0001f698|\U0001f699|\U0001f69a|\U0001f69b|\U0001f69c|\U0001f3ce|\U0001f3cd|\U0001f6f5|\U0001f9bd|\U0001f9bc|\U0001f6fa|\U0001f6b2|\U0001f6f4|\U0001f6f9|\U0001f68f|\U0001f6e3|\U0001f6e4|\U0001f6e2|\u26fd|\U0001f6a8|\U0001f6a5|\U0001f6a6|\U0001f6d1|\U0001f6a7|\u2693|\u26f5|\U0001f6f6|\U0001f6a4|\U0001f6f3|\u26f4|\U0001f6e5|\U0001f6a2|\u2708|\U0001f6e9|\U0001f6eb|\U0001f6ec|\U0001fa82|\U0001f4ba|\U0001f681|\U0001f69f|\U0001f6a0|\U0001f6a1|\U0001f6f0|\U0001f680|\U0001f6f8|\U0001f6ce|\U0001f9f3|\u231b|\u23f3|\u231a|\u23f0|\u23f1|\u23f2|\U0001f570|\U0001f55b|\U0001f567|\U0001f550|\U0001f55c|\U0001f551|\U0001f55d|\U0001f552|\U0001f55e|\U0001f553|\U0001f55f|\U0001f554|\U0001f560|\U0001f555|\U0001f561|\U0001f556|\U0001f562|\U0001f557|\U0001f563|\U0001f558|\U0001f564|\U0001f559|\U0001f565|\U0001f55a|\U0001f566|\U0001f311|\U0001f312|\U0001f313|\U0001f314|\U0001f315|\U0001f316|\U0001f317|\U0001f318|\U0001f319|\U0001f31a|\U0001f31b|\U0001f31c|\U0001f321|\u2600|\U0001f31d|\U0001f31e|\U0001fa90|\u2b50|\U0001f31f|\U0001f320|\U0001f30c|\u2601|\u26c5|\u26c8|\U0001f324|\U0001f325|\U0001f326|\U0001f327|\U0001f328|\U0001f329|\U0001f32a|\U0001f32b|\U0001f32c|\U0001f300|\U0001f308|\U0001f302|\u2602|\u2614|\u26f1|\u26a1|\u2744|\u2603|\u26c4|\u2604|\U0001f525|\U0001f4a7|\U0001f30a|\U0001f383|\U0001f384|\U0001f386|\U0001f387|\U0001f9e8|\u2728|\U0001f388|\U0001f389|\U0001f38a|\U0001f38b|\U0001f38d|\U0001f38e|\U0001f38f|\U0001f390|\U0001f391|\U0001f9e7|\U0001f380|\U0001f381|\U0001f397|\U0001f39f|\U0001f3ab|\U0001f396|\U0001f3c6|\U0001f3c5|\U0001f947|\U0001f948|\U0001f949|\u26bd|\u26be|\U0001f94e|\U0001f3c0|\U0001f3d0|\U0001f3c8|\U0001f3c9|\U0001f3be|\U0001f94f|\U0001f3b3|\U0001f3cf|\U0001f3d1|\U0001f3d2|\U0001f94d|\U0001f3d3|\U0001f3f8|\U0001f94a|\U0001f94b|\U0001f945|\u26f3|\u26f8|\U0001f3a3|\U0001f93f|\U0001f3bd|\U0001f3bf|\U0001f6f7|\U0001f94c|\U0001f3af|\U0001fa80|\U0001fa81|\U0001f3b1|\U0001f52e|\U0001f9ff|\U0001f3ae|\U0001f579|\U0001f3b0|\U0001f3b2|\U0001f9e9|\U0001f9f8|\u2660|\u2665|\u2666|\u2663|\u265f|\U0001f0cf|\U0001f004|\U0001f3b4|\U0001f3ad|\U0001f5bc|\U0001f3a8|\U0001f9f5|\U0001f9f6|\U0001f453|\U0001f576|\U0001f97d|\U0001f97c|\U0001f9ba|\U0001f454|\U0001f455|\U0001f456|\U0001f9e3|\U0001f9e4|\U0001f9e5|\U0001f9e6|\U0001f457|\U0001f458|\U0001f97b|\U0001fa71|\U0001fa72|\U0001fa73|\U0001f459|\U0001f45a|\U0001f45b|\U0001f45c|\U0001f45d|\U0001f6cd|\U0001f392|\U0001f45e|\U0001f45f|\U0001f97e|\U0001f97f|\U0001f460|\U0001f461|\U0001fa70|\U0001f462|\U0001f451|\U0001f452|\U0001f3a9|\U0001f393|\U0001f9e2|\u26d1|\U0001f4ff|\U0001f484|\U0001f48d|\U0001f48e|\U0001f507|\U0001f508|\U0001f509|\U0001f50a|\U0001f4e2|\U0001f4e3|\U0001f4ef|\U0001f514|\U0001f515|\U0001f3bc|\U0001f3b5|\U0001f3b6|\U0001f399|\U0001f39a|\U0001f39b|\U0001f3a4|\U0001f3a7|\U0001f4fb|\U0001f3b7|\U0001f3b8|\U0001f3b9|\U0001f3ba|\U0001f3bb|\U0001fa95|\U0001f941|\U0001f4f1|\U0001f4f2|\u260e|\U0001f4de|\U0001f4df|\U0001f4e0|\U0001f50b|\U0001f50c|\U0001f4bb|\U0001f5a5|\U0001f5a8|\u2328|\U0001f5b1|\U0001f5b2|\U0001f4bd|\U0001f4be|\U0001f4bf|\U0001f4c0|\U0001f9ee|\U0001f3a5|\U0001f39e|\U0001f4fd|\U0001f3ac|\U0001f4fa|\U0001f4f7|\U0001f4f8|\U0001f4f9|\U0001f4fc|\U0001f50d|\U0001f50e|\U0001f56f|\U0001f4a1|\U0001f526|\U0001f3ee|\U0001fa94|\U0001f4d4|\U0001f4d5|\U0001f4d6|\U0001f4d7|\U0001f4d8|\U0001f4d9|\U0001f4da|\U0001f4d3|\U0001f4d2|\U0001f4c3|\U0001f4dc|\U0001f4c4|\U0001f4f0|\U0001f5de|\U0001f4d1|\U0001f516|\U0001f3f7|\U0001f4b0|\U0001f4b4|\U0001f4b5|\U0001f4b6|\U0001f4b7|\U0001f4b8|\U0001f4b3|\U0001f9fe|\U0001f4b9|\U0001f4b1|\U0001f4b2|\u2709|\U0001f4e7|\U0001f4e8|\U0001f4e9|\U0001f4e4|\U0001f4e5|\U0001f4e6|\U0001f4eb|\U0001f4ea|\U0001f4ec|\U0001f4ed|\U0001f4ee|\U0001f5f3|\u270f|\u2712|\U0001f58b|\U0001f58a|\U0001f58c|\U0001f58d|\U0001f4dd|\U0001f4bc|\U0001f4c1|\U0001f4c2|\U0001f5c2|\U0001f4c5|\U0001f4c6|\U0001f5d2|\U0001f5d3|\U0001f4c7|\U0001f4c8|\U0001f4c9|\U0001f4ca|\U0001f4cb|\U0001f4cc|\U0001f4cd|\U0001f4ce|\U0001f587|\U0001f4cf|\U0001f4d0|\u2702|\U0001f5c3|\U0001f5c4|\U0001f5d1|\U0001f512|\U0001f513|\U0001f50f|\U0001f510|\U0001f511|\U0001f5dd|\U0001f528|\U0001fa93|\u26cf|\u2692|\U0001f6e0|\U0001f5e1|\u2694|\U0001f52b|\U0001f3f9|\U0001f6e1|\U0001f527|\U0001f529|\u2699|\U0001f5dc|\u2696|\U0001f9af|\U0001f517|\u26d3|\U0001f9f0|\U0001f9f2|\u2697|\U0001f9ea|\U0001f9eb|\U0001f9ec|\U0001f52c|\U0001f52d|\U0001f4e1|\U0001f489|\U0001fa78|\U0001f48a|\U0001fa79|\U0001fa7a|\U0001f6aa|\U0001f6cf|\U0001f6cb|\U0001fa91|\U0001f6bd|\U0001f6bf|\U0001f6c1|\U0001fa92|\U0001f9f4|\U0001f9f7|\U0001f9f9|\U0001f9fa|\U0001f9fb|\U0001f9fc|\U0001f9fd|\U0001f9ef|\U0001f6d2|\U0001f6ac|\u26b0|\u26b1|\U0001f5ff|\U0001f3e7|\U0001f6ae|\U0001f6b0|\u267f|\U0001f6b9|\U0001f6ba|\U0001f6bb|\U0001f6bc|\U0001f6be|\U0001f6c2|\U0001f6c3|\U0001f6c4|\U0001f6c5|\u26a0|\U0001f6b8|\u26d4|\U0001f6ab|\U0001f6b3|\U0001f6ad|\U0001f6af|\U0001f6b1|\U0001f6b7|\U0001f4f5|\U0001f51e|\u2622|\u2623|\u2b06|\u2197|\u27a1|\u2198|\u2b07|\u2199|\u2b05|\u2196|\u2195|\u2194|\u21a9|\u21aa|\u2934|\u2935|\U0001f503|\U0001f504|\U0001f519|\U0001f51a|\U0001f51b|\U0001f51c|\U0001f51d|\U0001f6d0|\u269b|\U0001f549|\u2721|\u2638|\u262f|\u271d|\u2626|\u262a|\u262e|\U0001f54e|\U0001f52f|\u2648|\u2649|\u264a|\u264b|\u264c|\u264d|\u264e|\u264f|\u2650|\u2651|\u2652|\u2653|\u26ce|\U0001f500|\U0001f501|\U0001f502|\u25b6|\u23e9|\u23ed|\u23ef|\u25c0|\u23ea|\u23ee|\U0001f53c|\u23eb|\U0001f53d|\u23ec|\u23f8|\u23f9|\u23fa|\u23cf|\U0001f3a6|\U0001f505|\U0001f506|\U0001f4f6|\U0001f4f3|\U0001f4f4|\u2640|\u2642|\u2695|\u267e|\u267b|\u269c|\U0001f531|\U0001f4db|\U0001f530|\u2b55|\u2705|\u2611|\u2714|\u2716|\u274c|\u274e|\u2795|\u2796|\u2797|\u27b0|\u27bf|\u303d|\u2733|\u2734|\u2747|\u203c|\u2049|\u2753|\u2754|\u2755|\u2757|\u3030|\xa9|\xae|\u2122|\U0001f51f|\U0001f520|\U0001f521|\U0001f522|\U0001f523|\U0001f524|\U0001f170|\U0001f18e|\U0001f171|\U0001f191|\U0001f192|\U0001f193|\u2139|\U0001f194|\u24c2|\U0001f195|\U0001f196|\U0001f17e|\U0001f197|\U0001f17f|\U0001f198|\U0001f199|\U0001f19a|\U0001f201|\U0001f202|\U0001f237|\U0001f236|\U0001f22f|\U0001f250|\U0001f239|\U0001f21a|\U0001f232|\U0001f251|\U0001f238|\U0001f234|\U0001f233|\u3297|\u3299|\U0001f23a|\U0001f235|\U0001f534|\U0001f7e0|\U0001f7e1|\U0001f7e2|\U0001f535|\U0001f7e3|\U0001f7e4|\u26ab|\u26aa|\U0001f7e5|\U0001f7e7|\U0001f7e8|\U0001f7e9|\U0001f7e6|\U0001f7ea|\U0001f7eb|\u2b1b|\u2b1c|\u25fc|\u25fb|\u25fe|\u25fd|\u25aa|\u25ab|\U0001f536|\U0001f537|\U0001f538|\U0001f539|\U0001f53a|\U0001f53b|\U0001f4a0|\U0001f518|\U0001f533|\U0001f532|\U0001f3c1|\U0001f6a9|\U0001f38c|\U0001f3f4|\U0001f3f3')) -> str:
"""Return the string obtained by replacing all emojis in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all emojis in text with 'repl'
Reference:
akkez/emoji.py: Python emoji regexp / python emoji detection
https://gist.github.com/akkez/99ceeae2f13c9d8d9be7df0279e2c438
"""
text = regex.sub(repl, text)
return text
def _email_normalize(self, text: str, repl: str, regex=re.compile(r'[a-zA-Z0-9.!#$%&\'*+/=?^_`{|}~-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9.]+')) -> str:
"""Return the string obtained by replacing all email addresses in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all email addresses in text with 'repl'
"""
text = regex.sub(repl, text)
return text
def _tel_normalize(self, text: str, repl: str, regex=re.compile(r'[()+\d.\-]*[ ]?\d{2,4}[-. ]+\d{3,4}[-. ]+\d{3,4}')) -> str:
"""Return the string obtained by replacing all phone numbers in 'text' by the replacement 'repl'.
Args:
text (str): text to be replaced
repl (str): replace all phone numbers in text with 'repl'
"""
text = regex.sub(repl, text)
return text | 946.587629 | 87,778 | 0.831418 | [
"Apache-2.0"
] | awesome-archive/prenlp | prenlp/data/normalizer.py | 91,819 | Python |
"""BERT Training Script."""
import functools
from typing import Any, Callable, Dict, Tuple, Optional, Type
from absl import logging
from clu import metric_writers
from clu import periodic_actions
from flax import jax_utils
import flax.linen as nn
import jax
from jax.experimental import optimizers as jax_optimizers
import jax.numpy as jnp
import jax.profiler
import ml_collections
import numpy as np
from scenic.dataset_lib import dataset_utils
from scenic.projects.baselines.bert import bert_base_model
from scenic.projects.baselines.bert import train_utils as bert_train_utils
from scenic.train_lib import lr_schedules
from scenic.train_lib import optimizers
from scenic.train_lib import pretrain_utils
from scenic.train_lib import train_utils
def train_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
learning_rate_fn: Callable[[int], float],
loss_fn: bert_base_model.LossFn,
metrics_fn: bert_base_model.MetricFn,
config: ml_collections.ConfigDict,
debug: Optional[bool] = False
) -> Tuple[train_utils.TrainState, Dict[str, Tuple[float, int]], float]:
"""Runs a single step of training.
Given the state of the training and a batch of data, computes
the loss and updates the parameters of the model.
Note that in this code, the buffers of the first (train_state) and second
(batch) arguments are donated to the computation.
Args:
flax_model: A Flax model.
train_state: The state of training including the current global_step,
model_state, rng, and optimizer. The buffer of this argument can be
donated to the computation.
batch: A single batch of data. The buffer of this argument can be donated to
the computation.
learning_rate_fn: Learning rate scheduler which given the global_step
generates the learning rate.
loss_fn: A loss function that given logits, a batch, and parameters of the
model calculates the loss.
metrics_fn: A metrics function that given logits and batch of data,
calculates the metrics as well as the loss.
config: Configurations of the experiment.
debug: Whether the debug mode is enabled during training. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Updated state of training, computed metrics, and learning rate for logging.
"""
new_rng, rng = jax.random.split(train_state.rng)
# Bind the rng to the host/device we are on.
dropout_rng = train_utils.bind_rng_to_host_device(
rng, axis_name='batch', bind_to='device')
def training_loss_fn(params):
variables = {'params': params, **train_state.model_state}
output, new_model_state = flax_model.apply(
variables,
batch,
mutable=['batch_stats'],
train=True,
rngs={'dropout': dropout_rng},
debug=debug)
loss = loss_fn(output, batch, variables['params'])
return loss, (new_model_state, output)
compute_gradient_fn = jax.value_and_grad(training_loss_fn, has_aux=True)
step = train_state.global_step
lr = learning_rate_fn(step)
(train_cost,
(new_model_state,
output)), grad = compute_gradient_fn(train_state.optimizer.target)
del train_cost
# We clip gradients before pmean in BERT.
if config.get('max_grad_norm', None) is not None:
grad = jax_optimizers.clip_grads(grad, config.max_grad_norm)
# Re-use same axis_name as in the call to `pmap(...train_step...)` below.
grad = jax.lax.pmean(grad, axis_name='batch')
new_optimizer = train_state.optimizer.apply_gradient(grad, learning_rate=lr)
# Explicit weight decay, if necessary.
if config.get('explicit_weight_decay', None) is not None:
new_optimizer = new_optimizer.replace(
target=optimizers.tree_map_with_names(
functools.partial(
optimizers.decay_weight_fn,
lr=lr,
decay=config.explicit_weight_decay),
new_optimizer.target,
match_name_fn=lambda name: 'kernel' in name))
metrics = metrics_fn(output, batch)
new_train_state = train_state.replace( # pytype: disable=attribute-error
global_step=step + 1,
optimizer=new_optimizer,
model_state=new_model_state,
rng=new_rng)
return new_train_state, metrics, lr
def eval_step(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
metrics_fn: bert_base_model.MetricFn,
all_gather: bool = False,
debug: Optional[bool] = False
) -> Tuple[Dict[str, Tuple[float, int]], Optional[jnp.ndarray],
Optional[jnp.ndarray]]:
"""Runs a single step of training.
Note that in this code, the buffer of the second argument (batch) is donated
to the computation.
Assumed API of metrics_fn is:
```metrics = metrics_fn(logits, batch)
where batch is yielded by the batch iterator, and metrics is a dictionary
mapping metric name to a vector of per example measurements. eval_step will
aggregate (by summing) all per example measurements and divide by the
aggregated normalizers. For each given metric we compute:
1/N sum_{b in batch_iter} metric(b), where N is the sum of normalizer
over all batches.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data. a metrics function, that given logits and
batch of data, calculates the metrics as well as the loss.
metrics_fn: A metrics function, that given logits and batch of data,
calculates the metrics as well as the loss.
all_gather: If True, the function gather batch and output of
model in from all hosts, using `jax.lax.all_gather` and return it, e.g.,
for computing global metrics on CPU.
debug: Whether the debug mode is enabled during evaluation. `debug=True`
enables model specific logging/storing some values using
jax.host_callback.
Returns:
Calculated metrics and optionally output, and batch after all_gather.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
output = flax_model.apply(
variables, batch, train=False, mutable=False, debug=debug)
metrics = metrics_fn(output, batch)
if all_gather:
output = jax.lax.all_gather(output, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return metrics, output, batch
else:
return metrics, None, None
def representation_fn(
*,
flax_model: nn.Module,
train_state: train_utils.TrainState,
batch: bert_base_model.Batch,
representation_layer: str,
gather_to_host: bool = True
) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Feeds the inputs to the model and returns their representations.
Args:
flax_model: A Flax model.
train_state: TrainState, the state of training including the current
global_step, model_state, rng, and optimizer. The buffer of this argument
can be donated to the computation.
batch: A single batch of data from the dataset.
representation_layer: The name of the layer to use as the representation.
gather_to_host: Whether to gather results from all devices to the host,
rather than leaving them distributed.
Returns:
Representation learned by the model for the given inputs and the labels and
masks. If `gather_to_host` is True, these are collected from all hosts.
"""
variables = {
'params': train_state.optimizer.target,
**train_state.model_state
}
representation_layer_parts = representation_layer.split('/')
filter_rep = lambda mdl, _: mdl.name == representation_layer_parts[-1]
_, model_state = flax_model.apply(
variables,
batch,
train=False,
capture_intermediates=filter_rep,
mutable=['intermediates'],
transfer_mode=True,
debug=False)
if 'intermediates' not in model_state:
raise ValueError(f'Layer with name "{representation_layer}"'
' does not exist in your model.')
representation = model_state['intermediates']
for rep_layer in representation_layer_parts:
if rep_layer:
representation = representation[rep_layer]
representation = representation['__call__'][0]
if gather_to_host:
representation = jax.lax.all_gather(representation, 'batch')
batch = jax.lax.all_gather(batch, 'batch')
return representation, batch['label'], batch['batch_mask']
def train(
*,
rng: jnp.ndarray,
config: ml_collections.ConfigDict,
model_cls: Type[bert_base_model.BERTBaseModel],
dataset: dataset_utils.Dataset,
workdir: str,
writer: metric_writers.MetricWriter,
) -> Tuple[train_utils.TrainState, Dict[str, Any], Dict[str, Any]]:
"""Main training loop lives in this function.
Given the model class and dataset, it prepares the items needed to run the
training, including the TrainState.
Args:
rng: Jax rng key.
config: Configurations of the experiment.
model_cls: Model class; A model has a flax_module, a loss_fn, and a
metrics_fn associated with it.
dataset: The dataset that has train_iter, eval_iter, meta_data, and
optionally, test_iter.
workdir: Directory for checkpointing.
writer: CLU metrics writer instance.
Returns:
train_state that has the state of training (including current
global_step, model_state, rng, and the optimizer), train_summary
and eval_summary which are dict of metrics. These outputs are used for
regression testing.
"""
lead_host = jax.process_index() == 0
# Build the loss_fn, metrics, and flax_model.
model = model_cls(config, dataset.meta_data)
# Initialize model.
rng, init_rng = jax.random.split(rng)
(params, model_state, num_trainable_params,
gflops) = bert_train_utils.initialize_bert_model(
model_def=model.flax_model,
input_spec=dataset.meta_data['input_spec'],
config=config,
rngs=init_rng)
# Create optimizer.
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
optimizer = jax.jit(
optimizers.get_optimizer(config).create, backend='cpu')(
params)
rng, train_rng = jax.random.split(rng)
train_state = train_utils.TrainState(
global_step=0,
optimizer=optimizer,
model_state=model_state,
rng=train_rng,
accum_train_time=0)
start_step = train_state.global_step
if config.checkpoint:
train_state, start_step = train_utils.restore_checkpoint(
workdir, train_state)
if (start_step == 0 # Which means "no" checkpoint is restored!
and config.get('init_from') is not None):
restored_model_cfg = config.init_from.get('model_config')
init_checkpoint_path = config.init_from.get('checkpoint_path')
restored_train_state = pretrain_utils.restore_pretrained_checkpoint(
init_checkpoint_path, train_state, assert_exist=True)
# Load params from the init_model.
train_state = model.init_from_train_state( # pytype: disable=attribute-error
train_state, restored_train_state, restored_model_cfg)
del restored_train_state
# Replicate the optimzier, state, and rng.
train_state = jax_utils.replicate(train_state)
del params # Do not keep a copy of the initial params.
# Calculate the total number of training steps.
total_steps, steps_per_epoch = train_utils.get_num_training_steps(
config, dataset.meta_data)
# Get learning rate scheduler.
learning_rate_fn = lr_schedules.get_learning_rate_fn(config)
train_step_pmapped = jax.pmap(
functools.partial(
train_step,
flax_model=model.flax_model,
learning_rate_fn=learning_rate_fn,
loss_fn=model.loss_function,
metrics_fn=model.get_metrics_fn('train'),
config=config,
debug=config.debug_train),
axis_name='batch',
# We can donate both buffers of train_state and train_batch.
donate_argnums=(0, 1),
)
eval_step_pmapped = jax.pmap(
functools.partial(
eval_step,
flax_model=model.flax_model,
metrics_fn=model.get_metrics_fn('validation'),
all_gather=config.get('global_metrics', False),
debug=config.debug_eval),
axis_name='batch',
# We can donate the eval_batch's buffer.
donate_argnums=(1,),
)
if 'fewshot' in config:
representation_fn_pmaped = jax.pmap(
functools.partial(
representation_fn,
flax_model=model.flax_model,
representation_layer=config.fewshot.representation_layer),
# We can donate the batch's buffer.
donate_argnums=(1,),
axis_name='batch')
fewshotter = bert_train_utils.BERTFewShotEvaluator(representation_fn_pmaped,
config.fewshot)
log_eval_steps = config.get('log_eval_steps') or steps_per_epoch
if not log_eval_steps:
raise ValueError("'log_eval_steps' should be specified in the config.")
checkpoint_steps = config.get('checkpoint_steps') or log_eval_steps
log_summary_steps = config.get('log_summary_steps') or log_eval_steps
# Ceil rounding such that we include the last incomplete batch.
total_eval_steps = int(
np.ceil(dataset.meta_data['num_eval_examples'] / config.batch_size))
steps_per_eval = config.get('steps_per_eval') or total_eval_steps
# If `global_metrics` are set in the config and we are the the lead host
compute_global_metrics = False
if config.get('global_metrics', False) and lead_host:
compute_global_metrics = True
if compute_global_metrics:
global_metrics_evaluator = bert_train_utils.BERTGlobalEvaluator(
config.global_metrics)
train_metrics, extra_training_logs = [], []
train_summary, eval_summary = None, None
chrono = train_utils.Chrono(
first_step=start_step,
total_steps=total_steps,
steps_per_epoch=steps_per_epoch,
global_bs=config.batch_size,
accum_train_time=int(jax_utils.unreplicate(train_state.accum_train_time)),
example_type='example')
logging.info('Starting training loop at step %d.', start_step + 1)
report_progress = periodic_actions.ReportProgress(
num_train_steps=total_steps, writer=writer)
hooks = [report_progress]
if config.get('xprof', True) and lead_host:
hooks.append(periodic_actions.Profile(num_profile_steps=5, logdir=workdir))
if start_step == 0:
step0_log = {'num_trainable_params': num_trainable_params}
if gflops:
step0_log['gflops'] = gflops
writer.write_scalars(1, step0_log)
for step in range(start_step + 1, total_steps + 1):
with jax.profiler.StepTraceContext('train', step_num=step):
train_batch = next(dataset.train_iter)
train_state, t_metrics, lr = train_step_pmapped(
train_state=train_state, batch=train_batch)
# This will accumulate metrics in TPU memory up to the point that we log
# them. This is no problem for small metrics but may be a problem for
# large (e.g. segmentation) metrics. An alternative is to set
# `log_summary_steps` to a small number, or to use
# `train_utils.unreplicate_and_get` here instead of right before writing
# summaries, but that means in each step, we have data transfer between
# tpu and host, which might slow down the training.
train_metrics.append(t_metrics)
# Additional training logs: learning rate:
extra_training_logs.append({'learning_rate': lr})
for h in hooks:
h(step)
chrono.pause() # Below are once-in-a-while ops -> pause.
###################### LOG TRAIN SUMMARY ########################
if (step % log_summary_steps == 1) or (step == total_steps):
if lead_host:
chrono.tick(step, writer=writer)
# train_metrics is list of a dictionaries of metrics, where the shape of
# the metrics[key] is [n_local_devices]. However, because metric functions
# have a psum, we have already summed across the whole sharded batch, and
# what's returned is n_local_devices copies of the same summed metric.
# So we do unreplicate and fetch them to host using `unreplicate_and_get`.
train_summary = train_utils.log_train_summary(
step=step,
train_metrics=jax.tree_map(train_utils.unreplicate_and_get,
train_metrics),
extra_training_logs=jax.tree_map(train_utils.unreplicate_and_get,
extra_training_logs),
writer=writer)
# Reset metric accumulation for next evaluation cycle.
train_metrics, extra_training_logs = [], []
################### EVALUATION #######################
if (step % log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('eval'):
eval_metrics = []
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(
train_state)
for _ in range(steps_per_eval):
eval_batch = next(dataset.valid_iter)
e_metrics, e_output, e_batch = eval_step_pmapped(
train_state=train_state, batch=eval_batch)
eval_metrics.append(train_utils.unreplicate_and_get(e_metrics))
if compute_global_metrics:
# Unreplicate outputs of eval_step_pmapped that are coming from
# `lax.all_gather`, fetch to the host and add to the Evaluator:
e_batch_mask = train_utils.unreplicate_and_get(
e_batch['batch_mask']).astype(bool)
# Classification: 'label', regression: 'target'
t_key = 'label' if 'label' in e_batch else 'targets'
global_metrics_evaluator.add_batch_of_examples(
target=train_utils.unreplicate_and_get(
e_batch[t_key])[e_batch_mask],
output=train_utils.unreplicate_and_get(e_output)
[e_batch_mask])
del e_batch, e_output, e_batch_mask
eval_global_metrics_summary = None
if compute_global_metrics:
if (len(global_metrics_evaluator) !=
dataset.meta_data['num_eval_examples']):
# Make sure no example is lost (specially in multi-host setup).
raise ValueError(f'Number of eval examples should be '
f'{dataset.meta_data["num_eval_examples"]}, '
f'but it is {len(global_metrics_evaluator)}.')
eval_global_metrics_summary = (
global_metrics_evaluator.compute_metrics(
clear_annotations=True))
eval_summary = train_utils.log_eval_summary(
step=step,
eval_metrics=eval_metrics,
extra_eval_summary=eval_global_metrics_summary,
writer=writer)
writer.flush()
del eval_metrics, eval_global_metrics_summary
##################### CHECKPOINTING ###################
if ((step % checkpoint_steps == 0 and step > 0) or
(step == total_steps)) and config.checkpoint:
with report_progress.timed('checkpoint'):
# Sync model state across replicas.
train_state = train_utils.sync_model_state_across_replicas(train_state)
if lead_host:
train_state.replace( # pytype: disable=attribute-error
accum_train_time=chrono.accum_train_time)
train_utils.save_checkpoint(workdir, train_state)
##################### FEWSHOT EVALUATION ############################
if 'fewshot' in config:
# Compute few-shot on-the-fly evaluation.
if (step % config.fewshot.log_eval_steps == 1) or (step == total_steps):
with report_progress.timed('fewshot'):
results = fewshotter.run_all(train_state, config.fewshot.datasets)
fewshotter.log_fewshot_summary(
writer=writer, step=step, results=results)
del results
writer.write_scalars(step, {'zz/epoch': step / steps_per_epoch})
writer.flush()
chrono.resume() # un-pause now
# Wait until computations are done before exiting.
jax.random.normal(jax.random.PRNGKey(0), ()).block_until_ready()
# Return the train and eval summary after last step for regresesion testing.
return train_state, train_summary, eval_summary
| 41.412475 | 81 | 0.694782 | [
"Apache-2.0"
] | YYCOCO/scenic | scenic/projects/baselines/bert/trainer.py | 20,582 | Python |
__source__ = 'https://leetcode.com/problems/delete-node-in-a-linked-list/description/'
# https://github.com/kamyu104/LeetCode/blob/master/Python/delete-node-in-a-linked-list.py
# Time: O(1)
# Space: O(1)
#
# Description: Leetcode # 237. Delete Node in a Linked List
#
# Write a function to delete a node (except the tail) in a singly linked list,
# given only access to that node.
#
# Supposed the linked list is 1 -> 2 -> 3 -> 4 and you are given the third node
# with value 3, the linked list should become 1 -> 2 -> 4 after calling your function.
#
# Companies
# Adobe Apple Microsoft
# Related Topics
# Linked List
# Similar Questions
# Remove Linked List Elements
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
import unittest
class Solution:
# @param {ListNode} node
# @return {void} Do not return anything, modify node in-place instead.
def deleteNode(self, node):
if node and node.next:
node_to_delete = node.next
node.val = node_to_delete.val
node.next = node_to_delete.next
del node_to_delete
class TestMethods(unittest.TestCase):
def test_Local(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
unittest.main()
Java = '''
# Thought: https://leetcode.com/problems/delete-node-in-a-linked-list/solution/
Thought: We can't really delete the node, but we can kinda achieve the same effect
by instead removing the next node after copying its data into the node that we were asked to delete.
/**
* Definition for singly-linked list.
* public class ListNode {
* int val;
* ListNode next;
* ListNode(int x) { val = x; }
* }
*/
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
node.val = node.next.val;
node.next = node.next.next;
}
}
# 0ms 100%
class Solution {
public void deleteNode(ListNode node) {
if (node == null || node.next == null) {
return;
}
while (node.next.next != null) {
node.val = node.next.val;
node = node.next;
}
node.val = node.next.val;
node.next = null;
}
}
''' | 27.085366 | 100 | 0.642954 | [
"Apache-2.0"
] | JulyKikuAkita/PythonPrac | cs15211/DeleteNodrinaLinkedList.py | 2,221 | Python |
from unittest import TestCase
from btcmagic import transaction, convert
import os
import json
class TestTransaction(TestCase):
def setUp(self):
self.tx_bin = convert.hex_to_bytes(
'0100000001637aaf20d708fcff67bb688af6e41d1807e6883f736c50eacb6042bf6e6c829c010000008c493046022100da1e59d78bb88ca7c3e13a4a6f4e259d5dd8cb177d5f79199bf024b1f57121d50221008d1d9838606a62ed4bd011a6ce8a2042ae2dc38fd05381b50aa388a1c8bd9150014104d3b615c609e48ae81389f6617b50473bf4c93f63c9853cd038aa4f00a989ebd62ae8253555e24c88b939817da18cd4e7263fda6a0e815097589bb90a5a6b3ff1ffffffff03b9000000000000001976a9149fe14d50c95abd6ecddc5d61255cfe5aebeba7e988ac57300f00000000001976a914c0492db5f283a22274ef378cdffbe5ecbe29862b88ac00000000000000000a6a0810e2cdc1af05180100000000')
self.tx_obj = {
'ins': [
{
'sequence': 4294967295,
'script': b'I0F\x02!\x00\xda\x1eY\xd7\x8b\xb8\x8c\xa7\xc3\xe1:JoN%\x9d]\xd8\xcb\x17}_y\x19\x9b\xf0$\xb1\xf5q!\xd5\x02!\x00\x8d\x1d\x988`jb\xedK\xd0\x11\xa6\xce\x8a B\xae-\xc3\x8f\xd0S\x81\xb5\n\xa3\x88\xa1\xc8\xbd\x91P\x01A\x04\xd3\xb6\x15\xc6\t\xe4\x8a\xe8\x13\x89\xf6a{PG;\xf4\xc9?c\xc9\x85<\xd08\xaaO\x00\xa9\x89\xeb\xd6*\xe8%5U\xe2L\x88\xb99\x81}\xa1\x8c\xd4\xe7&?\xdaj\x0e\x81P\x97X\x9b\xb9\nZk?\xf1',
'outpoint': {'index': 1, 'hash': b'\x9c\x82ln\xbfB`\xcb\xeaPls?\x88\xe6\x07\x18\x1d\xe4\xf6\x8ah\xbbg\xff\xfc\x08\xd7 \xafzc'}
}
],
'locktime': 0,
'version': 1,
'outs': [
{
'value': 185,
'script': b'v\xa9\x14\x9f\xe1MP\xc9Z\xbdn\xcd\xdc]a%\\\xfeZ\xeb\xeb\xa7\xe9\x88\xac'
},
{
'value': 995415,
'script': b'v\xa9\x14\xc0I-\xb5\xf2\x83\xa2"t\xef7\x8c\xdf\xfb\xe5\xec\xbe)\x86+\x88\xac'
},
{
'value': 0,
'script': b'j\x08\x10\xe2\xcd\xc1\xaf\x05\x18\x01'
}
]
}
def test_deserialization(self):
tx_obj = transaction.deserialize(self.tx_bin)
self.assertEqual(tx_obj, self.tx_obj)
def test_serialization(self):
tx_bin = transaction.serialize(self.tx_obj)
self.assertEqual(tx_bin, self.tx_bin)
class TestSighash(TestCase):
def setUp(self):
loc = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
with open(os.path.join(loc, 'sighash.json')) as f:
self.data = json.load(f)
def test_sighash(self):
first = True
for vector in self.data:
# Ignore first header row in the JSON.
if first:
first = False
continue
tx = transaction.deserialize(convert.hex_to_bytes(vector[0]))
script = convert.hex_to_bytes(vector[1])
index = int(vector[2])
hashtype = int(vector[3]) & 0xffffffff # This must be unsigned int
sighash = convert.hex_to_bytes(vector[4])[::-1] # It's reversed for some reason?
my_sighash = transaction.sighash(tx, index, script, hashtype)
self.assertEqual(
sighash,
my_sighash,
'hashtype = {:x}'.format(hashtype)
)
| 44.733333 | 571 | 0.613115 | [
"MIT"
] | Dirbaio/btcmagic | btcmagic/test_transaction.py | 3,355 | Python |
from os import environ
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent.parent
DEBUG = True
CRON_ENABLED = False
if 'SIMONE_DB_NAME' in environ:
DATABASES = {
'default': {
'ENGINE': 'mysql.connector.django',
'NAME': environ['SIMONE_DB_NAME'],
'USER': environ['SIMONE_DB_USER'],
'PASSWORD': environ['SIMONE_DB_PASSWORD'],
'HOST': environ['SIMONE_DB_HOST'],
'PORT': environ.get('SIMONE_DB_PORT', '3306'),
'CONN_MAX_AGE': 300,
}
}
else:
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db' / 'db.sqlite3',
}
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'simple': {
'format': '%(asctime)s %(levelname)-5s %(name)s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%SZ',
}
},
'handlers': {
'console': {
'class': 'logging.StreamHandler',
'level': 'DEBUG',
'formatter': 'simple',
},
'file': {
'class': 'logging.handlers.WatchedFileHandler',
'level': 'DEBUG',
'formatter': 'simple',
'filename': 'django.log',
},
},
'root': {'level': 'DEBUG', 'handlers': ('console', 'file')},
'loggers': {
'django.db.backends': {
# comment out to see db queries
'level': 'INFO'
},
'slack_bolt': {
# super noisy
'level': 'INFO'
},
},
}
| 25.727273 | 73 | 0.490577 | [
"MIT"
] | ross/simone | simone/settings/dev.py | 1,698 | Python |
"""Nyamuk event."""
import socket
import nyamuk_const as NC
#mqtt event
EV_CONNACK = NC.CMD_CONNACK
EV_PUBLISH = NC.CMD_PUBLISH
EV_SUBACK = NC.CMD_SUBACK
#non mqtt event
EV_NET_ERR = 1000
class BaseEvent:
"""Event Base Class."""
def __init__(self, tipe):
self.type = tipe
class EventConnack(BaseEvent):
"""CONNACK received."""
def __init__(self, ret_code, session_present = 0):
BaseEvent.__init__(self, NC.CMD_CONNACK)
self.ret_code = ret_code
# v3.1.1 only
self.session_present = session_present
class EventPublish(BaseEvent):
"""PUBLISH received."""
def __init__(self, msg):
BaseEvent.__init__(self, NC.CMD_PUBLISH)
self.msg = msg
class EventSuback(BaseEvent):
"""SUBACK received."""
def __init__(self, mid, granted_qos):
BaseEvent.__init__(self, NC.CMD_SUBACK)
self.mid = mid
self.granted_qos = granted_qos
class EventUnsuback(BaseEvent):
"""UNSUBACK received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_UNSUBACK)
self.mid = mid
class EventPuback(BaseEvent):
"""PUBACK received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBACK)
self.mid = mid
class EventPubrec(BaseEvent):
"""PUBREC received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREC)
self.mid = mid
class EventPubrel(BaseEvent):
"""PUBREL received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBREL)
self.mid = mid
class EventPubcomp(BaseEvent):
"""PUBCOMP received."""
def __init__(self, mid):
BaseEvent.__init__(self, NC.CMD_PUBCOMP)
self.mid = mid
class EventNeterr(BaseEvent):
"""Network error event."""
def __init__(self, errnum, msg):
BaseEvent.__init__(self, EV_NET_ERR)
self.errnum = errnum
self.msg = msg
class EventPingResp(BaseEvent):
"""PINGRESP received."""
def __init__(self):
BaseEvent.__init__(self, NC.CMD_PINGRESP)
| 25.432099 | 54 | 0.653883 | [
"BSD-2-Clause"
] | MasterScott/nyamuk | nyamuk/event.py | 2,060 | Python |
# Generated by Django 2.2.7 on 2019-11-20 17:08
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('quiz', '0002_question_image'),
]
operations = [
migrations.RemoveField(
model_name='question',
name='answer',
),
migrations.AddField(
model_name='choice',
name='is_correct',
field=models.BooleanField(default=False, help_text='Mark right if this is the right choice'),
),
]
| 23.521739 | 105 | 0.5878 | [
"MIT"
] | chgo19/KnowledgeAppOne | quiz/migrations/0003_auto_20191120_2238.py | 541 | Python |
import uuid
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django_countries.fields import CountryField
from django.contrib.auth.models import AbstractUser
class User(AbstractUser):
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
slug = models.CharField(max_length=256, unique=True, null=True)
country = CountryField(_('Country'), blank=True, blank_label=_('Country'))
email = models.EmailField(_('email address'), blank=True, unique=True)
preferred_language = models.CharField(_('Preferred Language'), null=True, blank=True, max_length=100, choices=settings.LANGUAGES)
| 42.5 | 133 | 0.780882 | [
"MIT"
] | BeDjango/bedjango-starter | project_name/users/models.py | 680 | Python |
#!/usr/bin/python
# coding: utf-8
"""A simple webserver."""
# python 2.7 compatibility
from __future__ import print_function, unicode_literals
# based on tornado
import tornado.ioloop
import tornado.web
import tornado.websocket
import sys
import json
def make_app():
"""Create and return the main Tornado web application.
It will listen on the port assigned via `app.listen(port)`,
and will run on Tornado's main ioloop,
which can be started with `tornado.ioloop.IOLoop.current().start()`.
"""
return tornado.web.Application([
(r"/connect", ClientSocket),
(r"/(.*)", tornado.web.StaticFileHandler, {
"path": "client",
"default_filename": "index.html"
}),
], debug=True)
class ClientSocket(tornado.websocket.WebSocketHandler):
"""ClientSocket represents an active websocket connection to a client.
"""
def open(self):
"""Called when a websocket connection is initiated."""
# print some info about the opened connection
print("WebSocket opened",
"from user at {}".format(self.request.remote_ip))
def on_message(self, message):
"""Called when a websocket client sends a message."""
# print the message to the console
print("client sent: {!r}".format(message))
# try to parse the message
try:
parsed_message = json.loads(message)
except ValueError:
print("Failed to parse message: {!r}".format(message))
return
# if there's a "message" in the message, echo it
if "message" in parsed_message:
response = {
"client" : str(self.request.remote_ip),
"message" : parsed_message["message"]
}
# respond to the message
m = json.dumps(response)
self.write_message(m)
else:
print("message unhandled.")
def on_close(self):
"""Called when a client connection is closed for any reason."""
# print some info about the closed connection
print("WebSocket closed",
"by user at {}".format(self.request.remote_ip))
print("close code: {}".format(self.close_code))
print("close reason: {!r}".format(self.close_reason))
if __name__ == "__main__":
# print some basic info about the system
print("Running Tornado Web Server {}".format(tornado.version))
print("Using Python {}".format(sys.version))
# start the webapp on port 8888
app = make_app()
app.listen(8888)
print("webapp started on port 8888")
tornado.ioloop.IOLoop.current().start()
| 31.627907 | 74 | 0.606985 | [
"CC0-1.0"
] | mesilliac/multitude | 6/server.py | 2,720 | Python |
"""Dataset, producer, and config metadata."""
import logging
import warnings
import sqlalchemy as sa
from .._globals import REGISTRY as registry
from .. import _tools
from .. import backend as _backend
__all__ = ['Dataset', 'Producer', 'Config']
log = logging.getLogger(__name__)
@registry.mapped
class Dataset:
"""Git commit loaded into the database."""
__tablename__ = '__dataset__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
title = sa.Column(sa.Text, sa.CheckConstraint("title != ''"), nullable=False)
git_commit = sa.Column(sa.String(40), sa.CheckConstraint('length(git_commit) = 40'),
nullable=False, unique=True)
git_describe = sa.Column(sa.Text, sa.CheckConstraint("git_describe != ''"),
nullable=False, unique=True)
clean = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"))
exclude_raw = sa.Column(sa.Boolean(create_constraint=True), nullable=False)
@classmethod
def get_dataset(cls, *, bind, strict, fallback=None):
table = cls.__tablename__
log.debug('read %r from %r', table, bind)
try:
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
except sa.exc.OperationalError as e:
if 'no such table' in e.orig.args[0]:
pass
else:
log.exception('error selecting %r', table)
if strict: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
return fallback
except ValueError as e:
log.exception('error selecting %r', table)
if 'not enough values to unpack' in e.args[0] and not strict:
return fallback
else: # pragma: no cover
raise RuntimeError('failed to select %r from %r', table, bind) from e
except Exception as e: # pragma: no cover
log.exception('error selecting %r', table)
raise RuntimeError('failed to select %r from %r', table, bind) from e
else:
return result
@classmethod
def log_dataset(cls, params, *,
ignore_dirty: bool = False,
also_print: bool = False, print_file=None):
name = cls.__tablename__
log.info('git describe %(git_describe)r clean: %(clean)r', params)
log.debug('%s.title: %r', name, params['title'])
log.info('%s.git_commit: %r', name, params['git_commit'])
if 'version' in params:
log.info('%s.version: %r', name, params['version'])
log.debug('%s.exclude_raw: %r', name, params['exclude_raw'])
if also_print or print_file is not None:
print('git describe {git_describe!r}'
' clean: {clean!r}'.format_map(params),
file=print_file)
print(f"{name}.title: {params['title']!r}'",
file=print_file)
print(f"{name}.git_commit: {params['git_commit']!r}",
file=print_file)
if 'version' in params:
print(f"{name}.version: {params['version']!r}",
file=print_file)
print(f"{name}.exclude_raw: {params['exclude_raw']!r}",
file=print_file)
if not params['clean'] and not ignore_dirty:
warnings.warn(f'{name} not clean,'
' pass ignore_dirty=True to disable') # pragma: no cover
@registry.mapped
class Producer:
"""Name and version of the package that created a __dataset__."""
__tablename__ = '__producer__'
id = sa.Column(sa.Integer, sa.CheckConstraint('id = 1'), primary_key=True)
name = sa.Column(sa.Text, sa.CheckConstraint("name != ''"),
unique=True, nullable=False)
version = sa.Column(sa.Text, sa.CheckConstraint("version != ''"),
nullable=False)
@classmethod
def get_producer(cls, *, bind):
result, = _backend.iterrows(sa.select(cls), mappings=True, bind=bind)
return result
@classmethod
def log_producer(cls, params, *, also_print=False, print_file=None):
name = cls.__tablename__
log.info('%s.name: %s', name, params['name'])
log.info('%s.version: %s', name, params['version'])
if also_print or print_file is not None:
print(f"{name}.name: {params['name']}", file=print_file)
print(f"{name}.version: {params['version']}", file=print_file)
@registry.mapped
class Config:
"""Configuration setting from ``glottolog/config/*.ini``."""
__tablename__ = '_config'
filename = sa.Column(sa.String, sa.CheckConstraint("filename != ''"),
primary_key=True)
section = sa.Column(sa.String, sa.CheckConstraint("section != ''"),
primary_key=True)
option = sa.Column(sa.String, sa.CheckConstraint("option != ''"),
primary_key=True)
value = sa.Column(sa.Text, sa.CheckConstraint("value != ''"),
nullable=False)
line = sa.Column(sa.Integer, sa.CheckConstraint('line > 0'),
nullable=False)
__table_args__ = (sa.UniqueConstraint(filename, line),
{'info': {'without_rowid': True}})
@classmethod
def load(cls, filename: str, *, bind,
_groupby_section=_tools.groupby_itemgetter(0)):
select_values = (sa.select(Config.section, Config.option, Config.value)
.filter_by(filename=filename)
.order_by('section', 'option'))
result = _backend.iterrows(select_values, bind=bind)
return {section: {option: value for _, option, value in grp}
for section, grp in _groupby_section(result)}
| 38.083333 | 89 | 0.586265 | [
"MIT"
] | glottolog/treedb | treedb/backend/models.py | 5,941 | Python |
import logging
import multiprocessing
import os
import signal
import sys
import time
from typing import Any
from datastore.reader.app import register_services
from gunicorn.app.base import BaseApplication
from .shared.env import is_dev_mode
from .shared.interfaces.logging import LoggingModule
from .shared.interfaces.wsgi import WSGIApplication
register_services()
# ATTENTION: We use the Python builtin logging module. To change this use
# something like "import custom_logging as logging".
DEFAULT_ADDRESSES = {
"ActionView": "0.0.0.0:9002",
"PresenterView": "0.0.0.0:9003",
}
class OpenSlidesBackendGunicornApplication(BaseApplication): # pragma: no cover
"""
Standalone application class for Gunicorn. It prepares Gunicorn for using
OpenSlidesBackendWSGIApplication via OpenSlidesBackendWSGIContainer either
with action component or with presenter component.
"""
def __init__(self, view_name: str, *args: Any, **kwargs: Any) -> None:
# Setup global loglevel.
if is_dev_mode():
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
self.view_name = view_name
if self.view_name not in ("ActionView", "PresenterView"):
raise ValueError(
f"View name has to be ActionView or PresenterView, not {self.view_name}."
)
logger.debug(f"Create gunicorn application for {self.view_name}.")
super().__init__(*args, **kwargs)
def load_config(self) -> None:
dev_mode = is_dev_mode()
options = {
"bind": DEFAULT_ADDRESSES[self.view_name],
"worker_tmp_dir": "/dev/shm", # See https://pythonspeed.com/articles/gunicorn-in-docker/
"timeout": int(os.environ.get("OPENSLIDES_BACKEND_WORKER_TIMEOUT", "30")),
"loglevel": "debug" if dev_mode else "info",
"reload": dev_mode,
"reload_engine": "auto", # This is the default however.
}
for key, value in options.items():
self.cfg.set(key, value)
def load(self) -> WSGIApplication:
# We import this here so Gunicorn can use its reload feature properly.
from .wsgi import create_wsgi_application
# TODO: Fix this typing problem.
logging_module: LoggingModule = logging # type: ignore
return create_wsgi_application(logging_module, self.view_name)
def start_action_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="ActionView").run()
def start_presenter_server() -> None: # pragma: no cover
OpenSlidesBackendGunicornApplication(view_name="PresenterView").run()
def start_them_all() -> None: # pragma: no cover
print(
f"Start all components in child processes. Parent process id is {os.getpid()}."
)
processes = {
"action": multiprocessing.Process(target=start_action_server),
"presenter": multiprocessing.Process(target=start_presenter_server),
}
for process in processes.values():
process.start()
def sigterm_handler(signalnum: int, current_stack_frame: Any) -> None:
strsignal = signal.strsignal # type: ignore
print(
f"Parent process {os.getpid()} received {strsignal(signalnum)} "
"signal. Terminate all child processes first."
)
for child in multiprocessing.active_children():
child.terminate()
child.join()
print(f"Parent process {os.getpid()} terminated successfully.")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
signal.signal(signal.SIGINT, sigterm_handler)
while True:
for name, process in processes.items():
if not process.is_alive():
process.join()
print(
f"Component {name} terminated. Terminate all other components now."
)
for other_name, other_process in processes.items():
if name != other_name:
other_process.terminate()
other_process.join()
print("Parent process terminated.")
sys.exit(1)
time.sleep(0.1)
def main() -> None: # pragma: no cover
component = os.environ.get("OPENSLIDES_BACKEND_COMPONENT", "all")
if component == "action":
start_action_server()
elif component == "presenter":
start_presenter_server()
elif component == "all":
start_them_all()
else:
print(
f"Error: OPENSLIDES_BACKEND_COMPONENT must not be {component}.",
file=sys.stderr,
)
sys.stderr.flush()
sys.exit(1)
sys.exit(0)
| 34.591241 | 101 | 0.643807 | [
"MIT"
] | GabrielInTheWorld/openslides-backend | openslides_backend/main.py | 4,739 | Python |
import logging
from datetime import datetime
from dateutil import parser as DatetimeParser
def dicom_name(names: list) -> str:
s = "^".join(names).upper()
return s
def dicom_date(dt: datetime) -> str:
s = dt.strftime("%Y%m%d")
return s
def dicom_time(dt: datetime) -> str:
s = dt.strftime("%H%M%S")
return s
def dicom_datetime(dt: datetime) -> (str, str):
d = dicom_date(dt)
t = dicom_time(dt)
return d, t
def parse_dicom_datetime(dts: str, tms: str = None) -> datetime:
if tms:
dts = dts + tms
# GE Scanner dt format
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S")
return ts
except ValueError:
# Wrong format
pass
# Siemens scanners use fractional seconds
try:
ts = datetime.strptime( dts, "%Y%m%d%H%M%S.%f")
return ts
except ValueError:
# Wrong format
pass
# Unknown format, fall back on guessing
try:
# Parser does _not_ like fractional seconds
dts = dts.split(".")[0]
ts = DatetimeParser.parse(dts)
return ts
except ValueError:
# Wrong format
pass
logger = logging.getLogger("DcmStrings")
logger.error(f"Failed to parse date time string: {dts}")
def date_str_to_dicom(dstr):
dt = DatetimeParser.parse(dstr)
dcm_dt = dicom_date(dt)
return dcm_dt
| 20.969697 | 64 | 0.604769 | [
"MIT"
] | derekmerck/diana2 | package/diana/utils/dicom/strings.py | 1,384 | Python |
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2016 Wind River Systems, Inc.
#
from cgtsclient.common import base
from cgtsclient import exc
CREATION_ATTRIBUTES = ['servicename', 'state']
class SmNodes(base.Resource):
def __repr__(self):
return "<SmNodes %s>" % self._info
class SmNodesManager(base.Manager):
resource_class = SmNodes
@staticmethod
def _path(id=None):
return '/v1/servicenodes/%s' % id if id else '/v1/servicenodes'
def list(self):
return self._list(self._path(), "nodes")
def get(self, nodes_id):
try:
return self._list(self._path(nodes_id))[0]
except IndexError:
return None
def create(self, **kwargs):
new = {}
for (key, value) in kwargs.items():
if key in CREATION_ATTRIBUTES:
new[key] = value
else:
raise exc.InvalidAttribute()
return self._create(self._path(), new)
def delete(self, nodes_id):
return self._delete(self._path(nodes_id))
def update(self, nodes_id, patch):
return self._update(self._path(nodes_id), patch)
| 27.968254 | 78 | 0.646425 | [
"Apache-2.0"
] | ChantYuCN/cgts-client | cgtsclient/v1/sm_service_nodes.py | 1,763 | Python |
import codecs
import csv
import datetime
import logging
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from todo.models import Task, TaskList
log = logging.getLogger(__name__)
class CSVImporter:
"""Core upsert functionality for CSV import, for re-use by `import_csv` management command, web UI and tests.
Supplies a detailed log of what was and was not imported at the end. See README for usage notes.
"""
def __init__(self):
self.errors = []
self.upserts = []
self.summaries = []
self.line_count = 0
self.upsert_count = 0
def upsert(self, fileobj, as_string_obj=False):
"""Expects a file *object*, not a file path. This is important because this has to work for both
the management command and the web uploader; the web uploader will pass in in-memory file
with no path!
Header row is:
Title, Group, Task List, Created Date, Due Date, Completed, Created By, Assigned To, Note, Priority
"""
if as_string_obj:
# fileobj comes from mgmt command
csv_reader = csv.DictReader(fileobj)
else:
# fileobj comes from browser upload (in-memory)
csv_reader = csv.DictReader(codecs.iterdecode(fileobj, "utf-8"))
# DI check: Do we have expected header row?
header = csv_reader.fieldnames
expected = [
"Title",
"Group",
"Task List",
"Created By",
"Created Date",
"Due Date",
"Completed",
"Assigned To",
"Note",
"Priority",
]
if header != expected:
self.errors.append(
f"Inbound data does not have expected columns.\nShould be: {expected}"
)
return
for row in csv_reader:
self.line_count += 1
newrow = self.validate_row(row)
if newrow:
# newrow at this point is fully validated, and all FK relations exist,
# e.g. `newrow.get("Assigned To")`, is a Django User instance.
assignee = newrow.get("Assigned To") if newrow.get("Assigned To") else None
created_at = (
newrow.get("Created Date")
if newrow.get("Created Date")
else datetime.datetime.today()
)
due_date = newrow.get("Due Date") if newrow.get("Due Date") else None
priority = newrow.get("Priority") if newrow.get("Priority") else None
obj, created = Task.objects.update_or_create(
created_by=newrow.get("Created By"),
task_list=newrow.get("Task List"),
title=newrow.get("Title"),
defaults={
"assigned_to": assignee,
"completed": newrow.get("Completed"),
"created_at": created_at,
"due_date": due_date,
"note": newrow.get("Note"),
"priority": priority,
},
)
self.upsert_count += 1
msg = (
f'Upserted task {obj.id}: "{obj.title}"'
f' in list "{obj.task_list}" (group "{obj.task_list.group}")'
)
self.upserts.append(msg)
self.summaries.append(f"Processed {self.line_count} CSV rows")
self.summaries.append(f"Upserted {self.upsert_count} rows")
self.summaries.append(f"Skipped {self.line_count - self.upsert_count} rows")
return {"summaries": self.summaries, "upserts": self.upserts, "errors": self.errors}
def validate_row(self, row):
"""Perform data integrity checks and set default values. Returns a valid object for insertion, or False.
Errors are stored for later display. Intentionally not broken up into separate validator functions because
there are interdpendencies, such as checking for existing `creator` in one place and then using
that creator for group membership check in others."""
row_errors = []
# #######################
# Task creator must exist
if not row.get("Created By"):
msg = f"Missing required task creator."
row_errors.append(msg)
creator = get_user_model().objects.filter(username=row.get("Created By")).first()
if not creator:
msg = f"Invalid task creator {row.get('Created By')}"
row_errors.append(msg)
# #######################
# If specified, Assignee must exist
assignee = None # Perfectly valid
if row.get("Assigned To"):
assigned = get_user_model().objects.filter(username=row.get("Assigned To"))
if assigned.exists():
assignee = assigned.first()
else:
msg = f"Missing or invalid task assignee {row.get('Assigned To')}"
row_errors.append(msg)
# #######################
# Group must exist
try:
target_group = Group.objects.get(name=row.get("Group"))
except Group.DoesNotExist:
msg = f"Could not find group {row.get('Group')}."
row_errors.append(msg)
target_group = None
# #######################
# Task creator must be in the target group
if creator and target_group not in creator.groups.all():
msg = f"{creator} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Assignee must be in the target group
if assignee and target_group not in assignee.groups.all():
msg = f"{assignee} is not in group {target_group}"
row_errors.append(msg)
# #######################
# Task list must exist in the target group
try:
tasklist = TaskList.objects.get(name=row.get("Task List"), group=target_group)
row["Task List"] = tasklist
except TaskList.DoesNotExist:
msg = f"Task list {row.get('Task List')} in group {target_group} does not exist"
row_errors.append(msg)
# #######################
# Validate Dates
datefields = ["Due Date", "Created Date"]
for datefield in datefields:
datestring = row.get(datefield)
if datestring:
valid_date = self.validate_date(datestring)
if valid_date:
row[datefield] = valid_date
else:
msg = f"Could not convert {datefield} {datestring} to valid date instance"
row_errors.append(msg)
# #######################
# Group membership checks have passed
row["Created By"] = creator
row["Group"] = target_group
if assignee:
row["Assigned To"] = assignee
# Set Completed
row["Completed"] = row["Completed"] == "Yes"
# #######################
if row_errors:
self.errors.append({self.line_count: row_errors})
return False
# No errors:
return row
def validate_date(self, datestring):
"""Inbound date string from CSV translates to a valid python date."""
try:
date_obj = datetime.datetime.strptime(datestring, "%Y-%m-%d")
return date_obj
except ValueError:
return False
| 37.737624 | 114 | 0.543093 | [
"BSD-3-Clause"
] | paiuolo/django-todo | todo/operations/csv_importer.py | 7,623 | Python |