id
stringlengths 1
8
| text
stringlengths 6
1.05M
| dataset_id
stringclasses 1
value |
---|---|---|
/js.html5shiv-3.7.3.tar.gz/js.html5shiv-3.7.3/README.txt | js.html5shiv
************
Introduction
============
This library packages `html5shiv`_ for `fanstatic`_.
.. _`fanstatic`: http://fanstatic.org
.. _`html5shiv`: https://github.com/aFarkas/html5shiv
This requires integration between your web framework and ``fanstatic``,
and making sure that the original resources (shipped in the ``resources``
directory in ``js.html5shiv``) are published to some URL.
Updating
========
The lateset version of the library can be downloaded using the following
commands::
cd js/html5shiv/resources
wget https://github.com/aFarkas/html5shiv/raw/master/src/html5shiv.js -O html5shiv.js
wget https://github.com/aFarkas/html5shiv/raw/master/src/html5shiv-printshiv.js -O html5shiv-printshiv.js
wget https://github.com/aFarkas/html5shiv/raw/master/dist/html5shiv.js -O html5shiv.min.js
wget https://github.com/aFarkas/html5shiv/raw/master/dist/html5shiv-printshiv.js -O html5shiv-printshiv.min.js
This will ensure the files are named correctly as the original library names
both the source and minified versions with the same filenames within its
repository.
| PypiClean |
/WINGECHR_PACKAGE_TEMPLATE-0.0.0.tar.gz/WINGECHR_PACKAGE_TEMPLATE-0.0.0/server_django/project/settings.py |
import logging
import os
import sys
from urllib.parse import urljoin
from _local.settings import * # noqa: F403
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"main.apps.AppConfig",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
]
},
}
]
ROOT_URLCONF = "project.urls"
WSGI_APPLICATION = "project.wsgi.application"
if "test" in sys.argv:
DATABASES = {"default": TEST_DATABASE} # noqa: F405
else:
DATABASES = {"default": DEFAULT_DATABASE} # noqa: F405
USE_I18N = True
USE_L10N = True
USE_TZ = True
STATIC_URL = urljoin(BASE_URL, "static/") # noqa: F405
STATIC_ROOT = os.path.join(BASE_DIR, "_static/")
MEDIA_URL = urljoin(BASE_URL, "media/") # noqa: F405
MEDIA_ROOT = os.path.join(BASE_DIR, "_local/media/")
AUTH_PASSWORD_VALIDATORS = []
AUTHENTICATION_BACKENDS = ("django.contrib.auth.backends.ModelBackend",)
LOGLEVEL = logging.INFO if DEBUG else logging.WARNING # noqa: F405
logger = logging.getLogger()
logFormatter = logging.Formatter("%(asctime)s [%(levelname)s %(funcName)s] %(message)s")
fileHandler = logging.FileHandler(os.path.join(BASE_DIR, "_local", "log.txt"))
fileHandler.setFormatter(logFormatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
logger.setLevel(LOGLEVEL)
logger.addHandler(fileHandler)
logger.addHandler(consoleHandler)
IMAGE = {
"SIZE": 1024,
"EXTENSION": "jpg",
"QUALITY": 75,
"TMP_DIR": "image_tmp",
"DIR": "img",
}
DOWNLOAD_XLSX_FILENAME = "Projekte.xlsx" | PypiClean |
/v2/model/list_tag_req.py |
import pprint
import re
import six
class ListTagReq:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'tags': 'list[TagValues]',
'tags_any': 'list[object]',
'not_tags': 'list[object]',
'not_tags_any': 'list[object]',
'limit': 'int',
'offset': 'int',
'action': 'str',
'matches': 'list[Tag]'
}
attribute_map = {
'tags': 'tags',
'tags_any': 'tags_any',
'not_tags': 'not_tags',
'not_tags_any': 'not_tags_any',
'limit': 'limit',
'offset': 'offset',
'action': 'action',
'matches': 'matches'
}
def __init__(self, tags=None, tags_any=None, not_tags=None, not_tags_any=None, limit=None, offset=None, action=None, matches=None):
"""ListTagReq - a model defined in huaweicloud sdk"""
self._tags = None
self._tags_any = None
self._not_tags = None
self._not_tags_any = None
self._limit = None
self._offset = None
self._action = None
self._matches = None
self.discriminator = None
if tags is not None:
self.tags = tags
if tags_any is not None:
self.tags_any = tags_any
if not_tags is not None:
self.not_tags = not_tags
if not_tags_any is not None:
self.not_tags_any = not_tags_any
if limit is not None:
self.limit = limit
if offset is not None:
self.offset = offset
self.action = action
if matches is not None:
self.matches = matches
@property
def tags(self):
"""Gets the tags of this ListTagReq.
包含标签。 最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:return: The tags of this ListTagReq.
:rtype: list[TagValues]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this ListTagReq.
包含标签。 最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:param tags: The tags of this ListTagReq.
:type: list[TagValues]
"""
self._tags = tags
@property
def tags_any(self):
"""Gets the tags_any of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:return: The tags_any of this ListTagReq.
:rtype: list[object]
"""
return self._tags_any
@tags_any.setter
def tags_any(self, tags_any):
"""Sets the tags_any of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:param tags_any: The tags_any of this ListTagReq.
:type: list[object]
"""
self._tags_any = tags_any
@property
def not_tags(self):
"""Gets the not_tags of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:return: The not_tags of this ListTagReq.
:rtype: list[object]
"""
return self._not_tags
@not_tags.setter
def not_tags(self, not_tags):
"""Sets the not_tags of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:param not_tags: The not_tags of this ListTagReq.
:type: list[object]
"""
self._not_tags = not_tags
@property
def not_tags_any(self):
"""Gets the not_tags_any of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:return: The not_tags_any of this ListTagReq.
:rtype: list[object]
"""
return self._not_tags_any
@not_tags_any.setter
def not_tags_any(self, not_tags_any):
"""Sets the not_tags_any of this ListTagReq.
最多包含10个key,每个key下面的value最多10个,结构体不能缺失,key不能为空或者空字符串。Key不能重复,同一个key中values不能重复。
:param not_tags_any: The not_tags_any of this ListTagReq.
:type: list[object]
"""
self._not_tags_any = not_tags_any
@property
def limit(self):
"""Gets the limit of this ListTagReq.
每页返回的资源个数。 取值范围:1~1000 参数取值说明: 如果action为filter时,默认为1000。 如果action为count时,无此参数。
:return: The limit of this ListTagReq.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListTagReq.
每页返回的资源个数。 取值范围:1~1000 参数取值说明: 如果action为filter时,默认为1000。 如果action为count时,无此参数。
:param limit: The limit of this ListTagReq.
:type: int
"""
self._limit = limit
@property
def offset(self):
"""Gets the offset of this ListTagReq.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 参数取值说明: 查询第一页数据时,不需要传入此参数。 查询后续页码数据时,将查询前一页数据时响应体中的值带入此参数。 如果action为filter时,默认为0,必须为数字,不能为负数。 如果action为count时,无此参数。
:return: The offset of this ListTagReq.
:rtype: int
"""
return self._offset
@offset.setter
def offset(self, offset):
"""Sets the offset of this ListTagReq.
分页查询起始偏移量,表示从偏移量的下一个资源开始查询。 取值范围:0~2147483647 默认值为0。 参数取值说明: 查询第一页数据时,不需要传入此参数。 查询后续页码数据时,将查询前一页数据时响应体中的值带入此参数。 如果action为filter时,默认为0,必须为数字,不能为负数。 如果action为count时,无此参数。
:param offset: The offset of this ListTagReq.
:type: int
"""
self._offset = offset
@property
def action(self):
"""Gets the action of this ListTagReq.
操作标识(区分大小写)。 取值范围: filter:分页过滤查询 count:查询总条数
:return: The action of this ListTagReq.
:rtype: str
"""
return self._action
@action.setter
def action(self, action):
"""Sets the action of this ListTagReq.
操作标识(区分大小写)。 取值范围: filter:分页过滤查询 count:查询总条数
:param action: The action of this ListTagReq.
:type: str
"""
self._action = action
@property
def matches(self):
"""Gets the matches of this ListTagReq.
key为要匹配的字段,value为匹配的值。 如果value为空字符串则精确匹配,否则模糊匹配。
:return: The matches of this ListTagReq.
:rtype: list[Tag]
"""
return self._matches
@matches.setter
def matches(self, matches):
"""Sets the matches of this ListTagReq.
key为要匹配的字段,value为匹配的值。 如果value为空字符串则精确匹配,否则模糊匹配。
:param matches: The matches of this ListTagReq.
:type: list[Tag]
"""
self._matches = matches
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListTagReq):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/enough-2.2.35.tar.gz/enough-2.2.35/playbooks/debops/docs/ansible/roles/ferm/index.rst | .. Copyright (C) 2013-2017 Maciej Delmanowski <[email protected]>
.. Copyright (C) 2015-2017 Robin Schneider <[email protected]>
.. Copyright (C) 2016 Reto Gantenbein <[email protected]>
.. Copyright (C) 2014-2017 DebOps <https://debops.org/>
.. SPDX-License-Identifier: GPL-3.0-only
.. _debops.ferm:
debops.ferm
===========
.. include:: man_description.rst
:start-line: 9
.. toctree::
:maxdepth: 2
getting-started
rules
guides
defaults/main
defaults-detailed
Copyright
---------
.. literalinclude:: ../../../../ansible/roles/ferm/COPYRIGHT
..
Local Variables:
mode: rst
ispell-local-dictionary: "american"
End:
| PypiClean |
/taas_api_client-1.1.1-py3-none-any.whl/taas_api/client.py | import requests
import logging
from urllib.parse import urljoin
from taas_api import data
logger = logging.getLogger(__name__)
class BaseClient:
def __init__(self, url: str, auth_token: str = None):
# TAAS URL is used for development, TAAS_IP is used for real in pipeline
self.taas_url = url
self.auth_token = auth_token
def post(self, path: str, data: dict):
logger.info(f"POST {path}")
return self._handle_response(
requests.post(
urljoin(self.taas_url, path), headers=self._common_headers(), json=data
)
)
def get(self, path: str, params: dict ={}):
logger.info(f"GET {path}")
return self._handle_response(
requests.get(
urljoin(self.taas_url, path), headers=self._common_headers(), params=params
)
)
def delete(self, path: str):
logger.info(f"DELETE {path}")
return self._handle_response(
requests.delete(
urljoin(self.taas_url, path), headers=self._common_headers()
)
)
def _handle_response(self, response):
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
logger.warning(response.content)
raise
return response.json()
def _common_headers(self):
return {
"Authorization": f"Token {self.auth_token}",
}
class Client(BaseClient):
def get_order(self, order_id):
return self.get(path=f"/api/order/{order_id}")
def get_balances(self):
return self.get(path=f"/api/balances/")
def get_orders(self):
return self.get(path=f"/api/orders/")
def place_multi_order(self, request: data.PlaceMultiOrderRequest):
if not isinstance(request, data.PlaceMultiOrderRequest):
raise ValueError(f"Expecting request to be of type {data.PlaceMultiOrderRequest}")
validate_success, errors = request.validate()
if not validate_success:
raise ValueError(str(errors))
return self.post(path=f"/api/multi_orders/", data=request.to_post_body())
def place_order(self, request: data.PlaceOrderRequest):
if not isinstance(request, data.PlaceOrderRequest):
raise ValueError(f"Expecting request to be of type {data.PlaceOrderRequest}")
validate_success, error = request.validate()
if not validate_success:
raise ValueError(error)
return self.post(path="/api/orders/", data=request.to_post_body())
def cancel_order(self, order_id):
return self.delete(path=f"/api/order/{order_id}") | PypiClean |
/contentai_metadata_flatten-1.4.1.tar.gz/contentai_metadata_flatten-1.4.1/contentai_metadata_flatten/parsers/__init__.py |
import pkgutil
import importlib
import json
import re
import math
import gzip
from os import path
from pathlib import Path
import logging
import warnings
from sys import stdout as STDOUT
import pandas as pd
import contentaiextractor as contentai
class Flatten():
# https://cloud.google.com/video-intelligence/docs/reference/reast/Shared.Types/Likelihood
GCP_LIKELIHOOD_MAP = { "LIKELIHOOD_UNSPECIFIED": 0.0, "VERY_UNLIKELY": 0.1, "UNLIKELY": 0.25,
"POSSIBLE": 0.5, "LIKELY": 0.75, "VERY_LIKELY": 0.9 }
TAG_TRANSCRIPT = "_transcript_"
ROUND_DIGITS = 5
SCORE_DEFAULT = 0.5
def __init__(self, path_content, logger=None):
super().__init__()
self.extractor_keys = []
self.extractor_name = None
self.path_content = path_content
if logger is None:
logger = logging.getLogger()
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(STDOUT)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.logger = logger
@staticmethod
def known_types():
"""Return the output types for this generator
:return: list. List of output types (file types) for this generator
"""
return None
@staticmethod
def default_config():
"""Return default configuration dictionary for parsing..."""
return {"verbose": True}
def json_load(self, path_file):
"""Helper to read dict object from JSON
:param path_file: (str): Path for source file (can be gzipped)
:return: dict. The loaded dict or an empty dict (`{}`) on error
"""
if path.exists(path_file):
if path_file.endswith(".gz"):
infile = gzip.open(path_file, 'rt')
else:
infile = open(path_file, 'rt')
try:
return json.load(infile)
except json.decoder.JSONDecodeError as e:
return {}
except UnicodeDecodeError as e:
return {}
return {}
def text_load(self, path_file):
"""Helper to read text object
:param path_file: (str): Path for source file (can be gzipped)
:return: dict. The loaded dict or an empty dict (`{}`) on error
"""
if path.exists(path_file):
if path_file.endswith(".gz"):
infile = gzip.open(path_file, 'rt')
else:
infile = open(path_file, 'rt')
try:
return infile.read()
except UnicodeDecodeError as e:
return ""
return ""
def get_extractor_results(self, extractor_name, path, force_retrieve=False, is_json=True):
"""Get results from remote or local location. Return a dictionary or string (depending on is_json), empty if not found"""
result_data = {} if is_json else ""
if force_retrieve or (len(self.extractor_keys) < 1 or self.extractor_name != extractor_name): # safe way to request without 404/500 error
self.extractor_name = extractor_name
try:
self.extractor_keys = self.get_extractor_keys(extractor_name)
self.logger.info(f"Retrieved available keys {self.extractor_keys} for extractor {self.extractor_name} ")
if self.extractor_keys is None:
self.extractor_keys = []
except Exception as e:
self.logger.info(f"Failed to get extractor keys for extractor {self.extractor_name} (error: '{e}')")
if self.extractor_keys is not None and path in self.extractor_keys: # have the keys, check for presence
try:
if is_json:
_local_data = contentai.get_json(extractor_name, path)
else:
_local_data = contentai.get(extractor_name, path)
result_data = _local_data
except Exception as e:
self.logger.warning(f"Failed to get key data '{path}' for extractor '{extractor_name}'")
if not result_data: # do we need to load it locally?
for dir_search in self.recursive_search(self.path_content, extractor_name):
path_file = dir_search.joinpath(path)
if is_json:
result_data = self.json_load(str(path_file))
if not result_data:
result_data = self.json_load(str(path_file)+".gz")
else: # not JSON, just return string?
result_data = self.text_load(str(path_file))
if not result_data:
result_data = self.text_load(str(path_file)+".gz")
return result_data
def get_extractor_keys(self, extractor_name):
return contentai.keys(extractor_name)
def recursive_search(self, path_root, extractor_name):
"""Attempt to find a specific extractor directory under the desired path"""
list_dirs = []
for path_search in Path(path_root).rglob(extractor_name):
if path_search.is_dir():
list_dirs.append(path_search)
return list_dirs
# import other modules
_modules = []
for module_finder, extractor_name, _ in pkgutil.iter_modules(__path__):
parser_module = module_finder.find_module(extractor_name).load_module()
parser_obj = getattr(parser_module, "Parser") # get class template
if parser_obj is not None:
_modules.append({'obj':parser_obj, 'types':parser_obj.known_types(), 'name':extractor_name})
def get_by_type(type_list=None):
"""Get parsers with a specific filter for type.
:param local_list: (list) list of tag type required in output (e.g. ['shot', 'tag']) (default=None or all available)
:return list: list of raw "Parser()" classes that are instantiated with input file paths
"""
local_list = []
if type_list is None:
local_list = [local_obj for local_obj in _modules]
else:
if type(type_list) != list:
type_list = [type_list]
type_list = set(type_list) # convert to set
local_list = [local_obj for local_obj in _modules if local_obj['types'] is None or len(type_list.intersection(set(local_obj['types']))) > 0]
return local_list
def get_by_name(name_limit=None):
"""Get parsers with a specific filter for name.
:param name_limit: (str) list of tag type required in output (e.g. 'dsai_metadata', 'azure') (default=None or all available)
:return list: list of raw "Parser()" classes that are instantiated with input file paths
"""
local_list = []
if name_limit is None:
local_list = [local_obj for local_obj in _modules]
else: # update from "in" to "==" for name compare in v 1.2.2 to avoid partial match (e.g. metadata -> dsai_metadata)
local_list = [local_obj for local_obj in _modules if name_limit == local_obj['name']]
return local_list
def empty_dataframe():
return pd.DataFrame([], columns=["time_begin", "time_end", "source_event", "tag_type",
"time_event", "tag", "score", "details", "extractor"]) | PypiClean |
/AIfES_Converter-1.0.0-py3-none-any.whl/aifes/keras_extractor/keras_extractor.py | import warnings
from ..support.aifes_model import *
try:
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import ELU, LeakyReLU, ReLU, Softmax
from tensorflow.keras.activations import sigmoid, softsign, tanh
except ImportError as err:
raise ImportError("Tensorflow is not installed. Please make sure that you install Tensorflow in the right version "
"(>= 2.4) to convert your model from Keras to AIfES.")
from packaging import version
try:
assert version.parse(tf.version.VERSION) >= version.parse('2.4.0')
except AssertionError as err:
raise ImportError("Tensorflow is not installed in the required version. Please install version 2.4 and above.")
class KerasExtractor:
"""Keras Extractor Class. Provides interface functions for the AifesCodeGenerator to extract values from a
Keras model"""
# Activation Functions available in Dense Layer
ACT_FUNCTIONS = ['elu', 'leakyrelu', 'leaky_relu', 'relu', 'softsign', 'softmax', 'sigmoid', 'tanh']
# Separate Activation Functions as Keras.Layers
ACT_LAYER = [ELU, LeakyReLU, ReLU, softsign, Softmax, sigmoid, tanh]
def __init__(self, model: keras.Model, use_transposed_layers=False):
"""
Initialize the KerasExtractor
:param model: Keras Model which should be converted
:param use_transposed_layers: If transposed layers should be used for the dense layers
"""
self._model = model
self._aifes_model = None
self._use_transposed_layers = use_transposed_layers
self._has_bias = True
def extractor_structure(self) -> AifesModel:
"""
Extracts the Keras model and saves it as an AIfES Model representation
:return: Returns a representation of the Keras model as AIfES Model
"""
# Local variables
# Contains the AIfES structure after extraction
aifes_fnn_structure = []
# Get layer count
layer_count = len(self._model.layers)
aifes_layer_count = layer_count + 1
# Go through each layer and extract values from it
for x in range(0, layer_count, 1):
curr_layer = self._model.layers[x]
# Check if current layer is a dense layer
if self._is_dense_layer(curr_layer):
# Check if first layer, then we need to add an input layer
if x == 0:
aifes_fnn_structure.append(AifesLayer_Input(self._model.layers[x].input_shape[1],
self._model.layers[x].input_shape[1]))
# Add corresponding dense layer depending on if transposed layers should be used
if not self._use_transposed_layers:
aifes_fnn_structure.append(AifesLayer_Dense(self._model.layers[x].units,
self._model.layers[x].units))
else:
aifes_fnn_structure.append(AifesLayer_DenseTranspose(self._model.layers[x].units,
self._model.layers[x].units))
# Check if dense layer contains activation, if not, no activation is added
if self._is_dense_layer_with_activation(curr_layer):
aifes_fnn_structure.append(self._get_activation_function(curr_layer))
else:
if self._is_unsupported_activation_function(curr_layer):
raise ValueError(f"Unsupported activation function in layer {x}. See "
f"https://fraunhofer-ims.github.io/AIfES_for_Arduino/#OverviewFeatures "
f"for available activation functions.")
# Check if current layer is an activation layer and is after the first layer
elif self._is_activation_layer(curr_layer) and x > 0:
# Add activation layer to AIfES model
aifes_fnn_structure.append(self._get_activation_layer(curr_layer))
# Layer is neither a dense nor activation layer, raise error
else:
if x == 0:
raise ValueError(f"First layer needs to be a dense layer. Got '{curr_layer}' instead.")
else:
raise ValueError(f"Unsupported layer chosen. Got '{curr_layer}', but must be one of "
"Dense, ELU, LeakyReLU, linear, relu, sigmoid, softmax, softsign or "
"tanh")
# Create AIfES Model and return it
self._aifes_model = AifesModel(aifes_fnn_structure, aifes_layer_count, None)
return self._aifes_model
def extractor_values(self):
"""
Extracts the values of a Keras model and returns them
:return: Extracted weights
"""
if not self._has_bias:
raise ValueError("Your model needs dense layer with bias for a conversion to AIfES with weights. Please "
"ensure that your layers have bias.")
weights = self._model.get_weights()
return weights
def get_transpose_status(self) -> bool:
"""
Returns status, if transposed layers should be used
:return: Bool, True if transposed layers are used, otherwise False
"""
return self._use_transposed_layers
def _is_dense_layer(self, curr_layer) -> bool:
"""
Checks if current layer is a correctly formated dense layer
:param curr_layer: Layer of the model, which should be checked
:return: True, if current layer is dense layer, otherwise False
"""
if curr_layer.__class__.__name__ == 'Dense':
if self._is_correctly_configured_dense_layer(curr_layer):
return True
else:
return False
else:
return False
def _is_dense_layer_with_activation(self, curr_layer) -> bool:
"""
Checks is activation function is part of self.ACT_FUNCTIONS, and has therefore an activation function. Linear activation function is default and therefore not considered as activation function.
:param curr_layer: Current layer, which should be checked
:return: True, if activation function is set and supported, otherwise False
"""
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
# Check if acti is part of ACT_FUNCTIONS
if acti in self.ACT_FUNCTIONS:
return True
else:
return False
def _get_activation_function(self, curr_layer) -> AifesLayer:
"""
Returns the activation layer for AIfES of the curr_layer. Extracts the value from a dense layer with set activation function.
:param curr_layer: Current layer, from which the activation function should be extracted
:return: AifesLayer with the initialized AIfES activation layer
"""
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
# Return corresponding activation layer
if acti == 'elu':
if type(layer_config["activation"]) is dict:
return AifesLayer_Elu(layer_config["activation"]["config"]["alpha"])
else:
warnings.warn("Elu layer was not customized. The default alpha value of 1.0 is used. ")
return AifesLayer_Elu(alpha_value=1.0)
elif acti == 'leakyrelu':
if type(layer_config["activation"]) is dict:
return AifesLayer_Leaky_ReLU(layer_config["activation"]["config"]["alpha"])
else:
warnings.warn("LeakyRelu was not customized. The default alpha value of 0.3 is used. ")
return AifesLayer_Leaky_ReLU(alpha_value=0.3)
elif acti == 'leaky_relu':
warnings.warn("LeakyRelu was not customized. The default alpha value of 0.3 is used. ")
return AifesLayer_Leaky_ReLU(alpha_value=0.3)
elif acti == 'linear':
return AifesLayer_Linear()
elif acti == 'relu':
return AifesLayer_ReLU()
elif acti == 'sigmoid':
return AifesLayer_Sigmoid()
elif acti == 'softmax':
return AifesLayer_Softmax()
elif acti == 'softsign':
return AifesLayer_Softsign()
elif acti == 'tanh':
return AifesLayer_Tanh()
else:
raise ValueError(
"Unsupported activation in layer. Got " + str(acti) + ", but must be part of"
"ELU, LeakyReLU, linear, relu, sigmoid, softmax, softsign or tanh")
def _is_activation_layer(self, curr_layer) -> bool:
"""
Check if current layer is an activation layer. Checks self.ACT_LAYER if curr_layer is included.
:param curr_layer: Current layer
:return: True, if current layer is activation layer, otherwise False
"""
if type(curr_layer) in self.ACT_LAYER:
return True
else:
return False
def _get_activation_layer(self, curr_layer) -> AifesLayer:
"""
Returns the activation layer for AIfES of the curr_layer. Checks the type of the curr_layer. (Independent activation function, not set with Dense layer)
:param curr_layer: Current layer
:return: AifesLayer with the initialized AIfES activation layer
"""
layer_type = type(curr_layer)
if layer_type == ELU:
return AifesLayer_Elu(curr_layer.alpha)
elif layer_type == LeakyReLU:
return AifesLayer_Leaky_ReLU(curr_layer.alpha)
elif layer_type == ReLU:
return AifesLayer_ReLU()
elif layer_type == sigmoid:
return AifesLayer_Sigmoid()
elif layer_type == Softmax:
return AifesLayer_Softmax()
elif layer_type == softsign:
return AifesLayer_Softsign()
elif layer_type == tanh:
return AifesLayer_Tanh()
else:
raise ValueError("Unsupported activation layer " + str(layer_type) + ". Activation Layer needs to be of"
" type ELU, LeakyReLU, ReLU, Sigmoid, Softmax, Softsign or Tanh")
def _is_unsupported_activation_function(self, curr_layer):
# Get activation function
layer_config = curr_layer.get_config()
acti = layer_config["activation"]
# When configurable activation function, acti is of type dict. We need only the name, so we extract it here
if type(acti) is dict:
acti = acti['class_name'].lower()
if acti == 'linear':
return False
else:
return True
def _is_correctly_configured_dense_layer(self, curr_layer):
if str(curr_layer.dtype) != 'float32':
raise ValueError(f"A dense layer has not the data type 'float32', but instead {curr_layer.dtype}. Please "
f"change it to 'float32'")
if str(curr_layer.use_bias) != 'True':
self._has_bias = False
return True | PypiClean |
/django-planet-0.10.1.tar.gz/django-planet-0.10.1/planet/feeds.py |
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import linebreaks, escape
from django.utils.translation import ugettext_lazy as _
from planet.models import Post, Author
from tagging.models import Tag, TaggedItem
ITEMS_PER_FEED = getattr(settings, 'PLANET_ITEMS_PER_FEED', 50)
class PostFeed(Feed):
def __init__(self, *args, **kwargs):
super(PostFeed, self).__init__(*args, **kwargs)
self.site = Site.objects.get_current()
def title(self):
return _(u"%s latest posts") % (self.site.name, )
def link(self):
return reverse("planet_rss_feed")
def items(self):
return Post.objects.order_by('-date_modified')
def item_title(self, post):
return post.title
def item_description(self, post):
return post.content
def item_id(self, post):
return post.guid
def item_updated(self, post):
return post.date_modified
def item_published(self, post):
return post.date_created
def item_content(self, post):
return {"type" : "html", }, linebreaks(escape(post.content))
def item_links(self, post):
return [{"href" : reverse("planet_post_detail", args=(post.pk, post.get_slug()))}]
def item_authors(self, post):
return [{"name" : post.author}]
class AuthorFeed(PostFeed):
def get_object(self, request, author_id):
return get_object_or_404(Author, pk=author_id)
def title(self, author):
return _("Posts by %(author_name)s - %(site_name)s") %\
{'author_name': author.name, 'site_name': self.site.name}
def links(self, author):
return ({'href': reverse("planet_author_show", args=(author.pk, author.get_slug()))},)
def items(self, author):
return Post.objects.filter(authors=author,
).distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class TagFeed(PostFeed):
def get_object(self, request, tag):
return get_object_or_404(Tag, name=tag)
def title(self, tag):
return _("Posts under %(tag)s tag - %(site_name)s") %\
{'tag': tag, 'site_name': self.site.name}
def links(self, tag):
return ({'href': reverse("planet_tag_detail", kwargs={"tag": tag.name})},)
def items(self, tag):
return TaggedItem.objects.get_by_model(
Post.objects.filter(feed__site=self.site), tag)\
.distinct().order_by("-date_created")[:ITEMS_PER_FEED]
class AuthorTagFeed(PostFeed):
def get_object(self, request, author_id, tag):
self.tag = tag
return get_object_or_404(Author, pk=author_id)
def title(self, author):
return _("Posts by %(author_name)s under %(tag)s tag - %(site_name)s")\
% {'author_name': author.name, 'tag': self.tag, 'site_name': self.site.name}
def links(self, author):
return ({'href': reverse("planet_by_tag_author_show", args=(author.pk, self.tag))},)
def items(self, author):
return TaggedItem.objects.get_by_model(
Post.objects.filter(feed__site=self.site, authors=author), self.tag)\
.distinct().order_by("-date_created")[:ITEMS_PER_FEED] | PypiClean |
/wapiti3-3.1.8-py3-none-any.whl/wapitiCore/attack/mod_wp_enum.py | import json
import re
import xml
import xml.etree.ElementTree as ET
from os.path import join as path_join
from typing import Match, Optional
from wapitiCore.attack.attack import Attack
from wapitiCore.definitions.fingerprint import NAME as TECHNO_DETECTED
from wapitiCore.definitions.fingerprint import WSTG_CODE as TECHNO_DETECTED_WSTG_CODE
from wapitiCore.definitions.fingerprint_webapp import NAME as WEB_APP_VERSIONED
from wapitiCore.main.log import log_blue, log_orange, logging
from wapitiCore.net.response import Response
from wapitiCore.net import Request
MSG_TECHNO_VERSIONED = "{0} {1} detected"
MSG_NO_WP = "No WordPress Detected"
MSG_WP_VERSION = "WordPress Version : {0}"
class ModuleWpEnum(Attack):
"""Detect WordPress Plugins with version."""
name = "wp_enum"
PAYLOADS_FILE_PLUGINS = "wordpress_plugins.txt"
PAYLOADS_FILE_THEMES = "wordpress_themes.txt"
def get_plugin(self):
with open(
path_join(self.DATA_DIR, self.PAYLOADS_FILE_PLUGINS),
errors="ignore",
encoding='utf-8'
) as plugin_list:
for line in plugin_list:
plugin = line.strip()
if plugin:
yield plugin
def get_theme(self):
with open(
path_join(self.DATA_DIR, self.PAYLOADS_FILE_THEMES),
errors="ignore",
encoding='utf-8'
) as theme_list:
for line in theme_list:
theme = line.strip()
if theme:
yield theme
async def detect_version(self, url: str):
rss_urls = ["feed/", "comments/feed/", "feed/rss/", "feed/rss2/"]
detected_version = None
for rss_url in rss_urls:
request = Request(f"{url}{'' if url.endswith('/') else '/'}{rss_url}", "GET")
response: Response = await self.crawler.async_send(request, follow_redirects=True)
if not response.content or response.is_error or "content-type" not in response.headers:
continue
if "xml" not in response.headers["content-type"]:
log_orange(f"Response content-type for {rss_url} is not XML")
continue
root = ET.fromstring(response.content)
if root is None:
continue
try:
generator_text = root.findtext('./channel/generator')
except xml.etree.ElementTree.ParseError:
continue
version: Match = re.search(r"\Ahttps?:\/\/wordpress\.(?:[a-z]+)\/\?v=(.*)\Z", generator_text)
if version is None:
continue
detected_version = version.group(1)
break
log_blue(
MSG_WP_VERSION,
detected_version or "N/A"
)
info_content = {"name": "WordPress", "versions": [], "categories": ["CMS", "Blogs"], "groups": ["Content"]}
if detected_version:
info_content["versions"].append(detected_version)
await self.add_vuln_info(
category=WEB_APP_VERSIONED,
request=request,
info=json.dumps(info_content)
)
await self.add_addition(
category=TECHNO_DETECTED,
request=request,
info=json.dumps(info_content)
)
async def detect_plugin(self, url):
for plugin in self.get_plugin():
if self._stop_event.is_set():
break
request = Request(f'{url}/wp-content/plugins/{plugin}/readme.txt', 'GET')
response = await self.crawler.async_send(request)
if response.is_success:
version = re.search(r'tag:\s*([\d.]+)', response.content)
# This check was added to detect invalid format of "Readme.txt" who can cause a crashe
if version:
version = version.group(1)
else:
logging.warning("Readme.txt is not in a valid format")
version = ""
plugin_detected = {
"name": plugin,
"versions": [version],
"categories": ["WordPress plugins"],
"groups": ['Add-ons']
}
log_blue(
MSG_TECHNO_VERSIONED,
plugin,
version
)
await self.add_addition(
category=TECHNO_DETECTED,
request=request,
info=json.dumps(plugin_detected),
wstg=TECHNO_DETECTED_WSTG_CODE,
response=response
)
elif response.status == 403:
plugin_detected = {
"name": plugin,
"versions": [""],
"categories": ["WordPress plugins"],
"groups": ['Add-ons']
}
log_blue(
MSG_TECHNO_VERSIONED,
plugin,
[""]
)
await self.add_addition(
category=TECHNO_DETECTED,
request=request,
info=json.dumps(plugin_detected),
wstg=TECHNO_DETECTED_WSTG_CODE,
response=response
)
async def detect_theme(self, url):
for theme in self.get_theme():
if self._stop_event.is_set():
break
request = Request(f'{url}/wp-content/themes/{theme}/readme.txt', 'GET')
response = await self.crawler.async_send(request)
if response.is_success:
version = re.search(r'tag:\s*([\d.]+)', response.content)
# This check was added to detect invalid format of "Readme.txt" who can cause a crashe
if version:
version = version.group(1)
else:
version = ""
theme_detected = {
"name": theme,
"versions": [version],
"categories": ["WordPress themes"],
"groups": ['Add-ons']
}
log_blue(
MSG_TECHNO_VERSIONED,
theme,
version
)
await self.add_addition(
category=TECHNO_DETECTED,
request=request,
info=json.dumps(theme_detected),
wstg=TECHNO_DETECTED_WSTG_CODE,
response=response
)
elif response.status == 403:
theme_detected = {
"name": theme,
"versions": [""],
"categories": ["WordPress themes"],
"groups": ['Add-ons']
}
log_blue(
MSG_TECHNO_VERSIONED,
theme,
[""]
)
await self.add_addition(
category=TECHNO_DETECTED,
request=request,
info=json.dumps(theme_detected),
wstg=TECHNO_DETECTED_WSTG_CODE,
response=response
)
@staticmethod
def check_wordpress(response: Response):
if re.findall('WordPress.*', response.content):
return True
return False
async def must_attack(self, request: Request, response: Optional[Response] = None):
if self.finished:
return False
if request.method == "POST":
return False
return request.url == await self.persister.get_root_url()
async def attack(self, request: Request, response: Optional[Response] = None):
self.finished = True
request_to_root = Request(request.url)
response = await self.crawler.async_send(request_to_root, follow_redirects=True)
if self.check_wordpress(response):
await self.detect_version(request_to_root.url)
log_blue("----")
log_blue("Enumeration of WordPress Plugins :")
await self.detect_plugin(request_to_root.url)
log_blue("----")
log_blue("Enumeration of WordPress Themes :")
await self.detect_theme(request_to_root.url)
else:
log_blue(MSG_NO_WP) | PypiClean |
/airtest-sd-1.0.tar.gz/airtest-sd-1.0/airtest/core/android/touch_methods/touch_proxy.py | import warnings
from collections import OrderedDict
from airtest.core.android.touch_methods.minitouch import Minitouch
from airtest.core.android.touch_methods.maxtouch import Maxtouch
from airtest.core.android.touch_methods.base_touch import BaseTouch
from airtest.core.android.constant import TOUCH_METHOD
from airtest.utils.logger import get_logger
LOGGING = get_logger(__name__)
class TouchProxy(object):
"""
Perform touch operation according to the specified method
"""
TOUCH_METHODS = OrderedDict()
def __init__(self, touch_method):
self.touch_method = touch_method
def __getattr__(self, name):
if name == "method_name":
return self.touch_method.METHOD_NAME
method = getattr(self.touch_method, name, getattr(self.touch_method.base_touch, name, None))
if method:
return method
else:
raise NotImplementedError("%s does not support %s method" %
(getattr(self.touch_method, "METHOD_NAME", ""), name))
@classmethod
def check_touch(cls, touch_impl):
try:
touch_impl.base_touch.install_and_setup()
except Exception as e:
LOGGING.error(e)
LOGGING.warning("%s setup up failed!" % touch_impl.METHOD_NAME)
return False
else:
return True
@classmethod
def auto_setup(cls, adb, default_method=None, ori_transformer=None, size_info=None, input_event=None):
"""
Args:
adb: :py:mod:`airtest.core.android.adb.ADB`
default_method: The default click method, such as "MINITOUCH"
ori_transformer: dev._touch_point_by_orientation
size_info: the result of dev.get_display_info()
input_event: dev.input_event
*args:
**kwargs:
Returns: TouchProxy object
Examples:
>>> dev = Android()
>>> touch_proxy = TouchProxy.auto_setup(dev.adb, ori_transformer=dev._touch_point_by_orientation)
>>> touch_proxy.touch((100, 100))
"""
if default_method and default_method in cls.TOUCH_METHODS:
touch_method = cls.TOUCH_METHODS[default_method].METHOD_CLASS(adb, size_info=size_info,
input_event=input_event)
impl = cls.TOUCH_METHODS[default_method](touch_method, ori_transformer)
if cls.check_touch(impl):
return TouchProxy(impl)
if not default_method:
for name, touch_impl in cls.TOUCH_METHODS.items():
touch_method = touch_impl.METHOD_CLASS(adb, size_info=size_info, input_event=input_event)
impl = touch_impl(touch_method, ori_transformer)
if cls.check_touch(impl):
return TouchProxy(impl)
# If both minitouch and maxtouch fail to initialize, use adbtouch
# 如果minitouch和maxtouch都初始化失败,使用adbtouch
adb_touch = AdbTouchImplementation(adb)
warnings.warn("Currently using ADB touch, the efficiency may be very low.")
return TouchProxy(adb_touch)
def register_touch(cls):
TouchProxy.TOUCH_METHODS[cls.METHOD_NAME] = cls
return cls
class AdbTouchImplementation(object):
METHOD_NAME = TOUCH_METHOD.ADBTOUCH
def __init__(self, base_touch):
"""
:param base_touch: :py:mod:`airtest.core.android.adb.ADB`
"""
self.base_touch = base_touch
def touch(self, pos, duration=0.01):
if duration <= 0.01:
self.base_touch.touch(pos)
else:
self.swipe(pos, pos, duration=duration)
def swipe(self, p1, p2, duration=0.5, *args, **kwargs):
duration *= 1000
self.base_touch.swipe(p1, p2, duration=duration)
def teardown(self):
pass
@register_touch
class MinitouchImplementation(AdbTouchImplementation):
METHOD_NAME = TOUCH_METHOD.MINITOUCH
METHOD_CLASS = Minitouch
def __init__(self, minitouch, ori_transformer):
"""
:param minitouch: :py:mod:`airtest.core.android.touch_methods.minitouch.Minitouch`
:param ori_transformer: Android._touch_point_by_orientation()
"""
super(MinitouchImplementation, self).__init__(minitouch)
self.ori_transformer = ori_transformer
def touch(self, pos, duration=0.01):
pos = self.ori_transformer(pos)
self.base_touch.touch(pos, duration=duration)
def swipe(self, p1, p2, duration=0.5, steps=5, fingers=1):
p1 = self.ori_transformer(p1)
p2 = self.ori_transformer(p2)
if fingers == 1:
self.base_touch.swipe(p1, p2, duration=duration, steps=steps)
elif fingers == 2:
self.base_touch.two_finger_swipe(p1, p2, duration=duration, steps=steps)
else:
raise Exception("param fingers should be 1 or 2")
def pinch(self, center=None, percent=0.5, duration=0.5, steps=5, in_or_out='in'):
if center:
center = self.ori_transformer(center)
self.base_touch.pinch(center=center, percent=percent, duration=duration, steps=steps, in_or_out=in_or_out)
def swipe_along(self, coordinates_list, duration=0.8, steps=5):
pos_list = [self.ori_transformer(xy) for xy in coordinates_list]
self.base_touch.swipe_along(pos_list, duration=duration, steps=steps)
def two_finger_swipe(self, tuple_from_xy, tuple_to_xy, duration=0.8, steps=5, offset=(0, 50)):
tuple_from_xy = self.ori_transformer(tuple_from_xy)
tuple_to_xy = self.ori_transformer(tuple_to_xy)
self.base_touch.two_finger_swipe(tuple_from_xy, tuple_to_xy, duration=duration, steps=steps, offset=offset)
def perform(self, motion_events, interval=0.01):
self.base_touch.perform(motion_events, interval)
@register_touch
class MaxtouchImplementation(MinitouchImplementation):
METHOD_NAME = TOUCH_METHOD.MAXTOUCH
METHOD_CLASS = Maxtouch
def __init__(self, maxtouch, ori_transformer):
"""
New screen click scheme, support Android10
新的屏幕点击方案,支持Android10以上版本
:param maxtouch: :py:mod:`airtest.core.android.touch_methods.maxtouch.Maxtouch`
:param ori_transformer: Android._touch_point_by_orientation()
"""
super(MaxtouchImplementation, self).__init__(maxtouch, ori_transformer)
def perform(self, motion_events, interval=0.01):
self.base_touch.perform(motion_events, interval) | PypiClean |
/nats-py-2.3.1.tar.gz/nats-py-2.3.1/nats/js/kv.py |
from __future__ import annotations
import asyncio
import datetime
from dataclasses import dataclass
from typing import TYPE_CHECKING, List, Optional
import nats.errors
import nats.js.errors
from nats.js import api
if TYPE_CHECKING:
from nats.js import JetStreamContext
KV_OP = "KV-Operation"
KV_DEL = "DEL"
KV_PURGE = "PURGE"
MSG_ROLLUP_SUBJECT = "sub"
class KeyValue:
"""
KeyValue uses the JetStream KeyValue functionality.
.. note::
This functionality is EXPERIMENTAL and may be changed in later releases.
::
import asyncio
import nats
async def main():
nc = await nats.connect()
js = nc.jetstream()
# Create a KV
kv = await js.create_key_value(bucket='MY_KV')
# Set and retrieve a value
await kv.put('hello', b'world')
entry = await kv.get('hello')
print(f'KeyValue.Entry: key={entry.key}, value={entry.value}')
# KeyValue.Entry: key=hello, value=world
await nc.close()
if __name__ == '__main__':
asyncio.run(main())
"""
@dataclass
class Entry:
"""
An entry from a KeyValue store in JetStream.
"""
bucket: str
key: str
value: Optional[bytes]
revision: Optional[int]
delta: Optional[int]
created: Optional[int]
operation: Optional[str]
@dataclass(frozen=True)
class BucketStatus:
"""
BucketStatus is the status of a KeyValue bucket.
"""
stream_info: api.StreamInfo
bucket: str
@property
def values(self) -> int:
"""
values returns the number of stored messages in the stream.
"""
return self.stream_info.state.messages
@property
def history(self) -> int:
"""
history returns the max msgs per subject.
"""
return self.stream_info.config.max_msgs_per_subject
@property
def ttl(self) -> Optional[float]:
"""
ttl returns the max age in seconds.
"""
if self.stream_info.config.max_age is None:
return None
return self.stream_info.config.max_age
def __init__(
self,
name: str,
stream: str,
pre: str,
js: JetStreamContext,
direct: bool,
) -> None:
self._name = name
self._stream = stream
self._pre = pre
self._js = js
self._direct = direct
async def get(self, key: str, revision: Optional[int] = None) -> Entry:
"""
get returns the latest value for the key.
"""
entry = None
try:
entry = await self._get(key, revision)
except nats.js.errors.KeyDeletedError as err:
raise nats.js.errors.KeyNotFoundError(err.entry, err.op)
return entry
async def _get(self, key: str, revision: Optional[int] = None) -> Entry:
msg = None
subject = f"{self._pre}{key}"
try:
if revision:
msg = await self._js.get_msg(
self._stream,
seq=revision,
direct=self._direct,
)
else:
msg = await self._js.get_msg(
self._stream,
subject=subject,
seq=revision,
direct=self._direct,
)
except nats.js.errors.NotFoundError:
raise nats.js.errors.KeyNotFoundError
# Check whether the revision from the stream does not match the key.
if subject != msg.subject:
raise nats.js.errors.KeyNotFoundError(
message=f"expected '{subject}', but got '{msg.subject}'"
)
entry = KeyValue.Entry(
bucket=self._name,
key=key,
value=msg.data,
revision=msg.seq,
delta=None,
created=None,
operation=None,
)
# Check headers to see if deleted or purged.
if msg.headers:
op = msg.headers.get(KV_OP, None)
if op == KV_DEL or op == KV_PURGE:
raise nats.js.errors.KeyDeletedError(entry, op)
return entry
async def put(self, key: str, value: bytes) -> int:
"""
put will place the new value for the key into the store
and return the revision number.
"""
pa = await self._js.publish(f"{self._pre}{key}", value)
return pa.seq
async def create(self, key: str, value: bytes) -> int:
"""
create will add the key/value pair iff it does not exist.
"""
pa = None
try:
pa = await self.update(key, value, last=0)
except nats.js.errors.KeyWrongLastSequenceError as err:
# In case of attempting to recreate an already deleted key,
# the client would get a KeyWrongLastSequenceError. When this happens,
# it is needed to fetch latest revision number and attempt to update.
try:
# NOTE: This reimplements the following behavior from Go client.
#
# Since we have tombstones for DEL ops for watchers, this could be from that
# so we need to double check.
#
# Get latest revision to update in case it was deleted but if it was not
await self._get(key)
# No exception so not a deleted key, so reraise the original KeyWrongLastSequenceError.
# If it was deleted then the error exception will contain metadata
# to recreate using the last revision.
raise err
except nats.js.errors.KeyDeletedError as err:
pa = await self.update(key, value, last=err.entry.revision)
return pa
async def update(
self, key: str, value: bytes, last: Optional[int] = None
) -> int:
"""
update will update the value iff the latest revision matches.
"""
hdrs = {}
if not last:
last = 0
hdrs[api.Header.EXPECTED_LAST_SUBJECT_SEQUENCE] = str(last)
pa = None
try:
pa = await self._js.publish(
f"{self._pre}{key}", value, headers=hdrs
)
except nats.js.errors.APIError as err:
# Check for a BadRequest::KeyWrongLastSequenceError error code.
if err.err_code == 10071:
raise nats.js.errors.KeyWrongLastSequenceError(
description=err.description
)
else:
raise err
return pa.seq
async def delete(self, key: str, last: Optional[int] = None) -> bool:
"""
delete will place a delete marker and remove all previous revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_DEL
if last and last > 0:
hdrs[api.Header.EXPECTED_LAST_SUBJECT_SEQUENCE] = str(last)
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def purge(self, key: str) -> bool:
"""
purge will remove the key and all revisions.
"""
hdrs = {}
hdrs[KV_OP] = KV_PURGE
hdrs[api.Header.ROLLUP] = MSG_ROLLUP_SUBJECT
await self._js.publish(f"{self._pre}{key}", headers=hdrs)
return True
async def purge_deletes(self, olderthan: int = 30 * 60) -> bool:
"""
purge will remove all current delete markers older.
:param olderthan: time in seconds
"""
watcher = await self.watchall()
delete_markers = []
async for update in watcher:
if update.operation == KV_DEL or update.operation == KV_PURGE:
delete_markers.append(update)
for entry in delete_markers:
keep = 0
subject = f"{self._pre}{entry.key}"
duration = datetime.datetime.now() - entry.created
if olderthan > 0 and olderthan > duration.total_seconds():
keep = 1
await self._js.purge_stream(
self._stream, subject=subject, keep=keep
)
return True
async def status(self) -> BucketStatus:
"""
status retrieves the status and configuration of a bucket.
"""
info = await self._js.stream_info(self._stream)
return KeyValue.BucketStatus(stream_info=info, bucket=self._name)
class KeyWatcher:
def __init__(self, js):
self._js = js
self._updates = asyncio.Queue(maxsize=256)
self._sub = None
self._pending: Optional[int] = None
# init done means that the nil marker has been sent,
# once this is sent it won't be sent anymore.
self._init_done = False
async def stop(self):
"""
stop will stop this watcher.
"""
await self._sub.unsubscribe()
async def updates(self, timeout=5):
"""
updates fetches the next update from a watcher.
"""
try:
return await asyncio.wait_for(self._updates.get(), timeout)
except asyncio.TimeoutError:
raise nats.errors.TimeoutError
def __aiter__(self):
return self
async def __anext__(self):
entry = await self._updates.get()
if not entry:
raise StopAsyncIteration
else:
return entry
async def watchall(self, **kwargs) -> KeyWatcher:
"""
watchall returns a KeyValue watcher that matches all the keys.
"""
return await self.watch(">", **kwargs)
async def keys(self, **kwargs) -> List[str]:
"""
keys will return a list of the keys from a KeyValue store.
"""
watcher = await self.watchall(
ignore_deletes=True,
meta_only=True,
)
keys = []
async for key in watcher:
# None entry is used to signal that there is no more info.
if not key:
break
keys.append(key.key)
await watcher.stop()
if not keys:
raise nats.js.errors.NoKeysError
return keys
async def history(self, key: str) -> List[Entry]:
"""
history retrieves a list of the entries so far.
"""
watcher = await self.watch(key, include_history=True)
entries = []
async for entry in watcher:
# None entry is used to signal that there is no more info.
if not entry:
break
entries.append(entry)
await watcher.stop()
if not entries:
raise nats.js.errors.NoKeysError
return entries
async def watch(
self,
keys,
headers_only=False,
include_history=False,
ignore_deletes=False,
meta_only=False,
) -> KeyWatcher:
"""
watch will fire a callback when a key that matches the keys
pattern is updated.
The first update after starting the watch is None in case
there are no pending updates.
"""
subject = f"{self._pre}{keys}"
watcher = KeyValue.KeyWatcher(self)
init_setup: asyncio.Future[bool] = asyncio.Future()
async def watch_updates(msg):
if not init_setup.done():
await asyncio.wait_for(init_setup, timeout=self._js._timeout)
meta = msg.metadata
op = None
if msg.header and KV_OP in msg.header:
op = msg.header.get(KV_OP)
# keys() uses this
if ignore_deletes:
if (op == KV_PURGE or op == KV_DEL):
if meta.num_pending == 0 and not watcher._init_done:
await watcher._updates.put(None)
watcher._init_done = True
return
entry = KeyValue.Entry(
bucket=self._name,
key=msg.subject[len(self._pre):],
value=msg.data,
revision=meta.sequence.stream,
delta=meta.num_pending,
created=meta.timestamp,
operation=op,
)
await watcher._updates.put(entry)
# When there are no more updates send an empty marker
# to signal that it is done, this will unblock iterators
if meta.num_pending == 0 and (not watcher._init_done):
await watcher._updates.put(None)
watcher._init_done = True
deliver_policy = None
if not include_history:
deliver_policy = api.DeliverPolicy.LAST_PER_SUBJECT
watcher._sub = await self._js.subscribe(
subject,
cb=watch_updates,
ordered_consumer=True,
deliver_policy=deliver_policy,
headers_only=meta_only,
)
await asyncio.sleep(0)
# Check from consumer info what is the number of messages
# awaiting to be consumed to send the initial signal marker.
try:
cinfo = await watcher._sub.consumer_info()
watcher._pending = cinfo.num_pending
# If no delivered and/or pending messages, then signal
# that this is the start.
# The consumer subscription will start receiving messages
# so need to check those that have already made it.
received = watcher._sub.delivered
init_setup.set_result(True)
if cinfo.num_pending == 0 and received == 0:
await watcher._updates.put(None)
watcher._init_done = True
except Exception as err:
init_setup.cancel()
await watcher._sub.unsubscribe()
raise err
return watcher | PypiClean |
/django3-dashing-0.5.2.tar.gz/django3-dashing-0.5.2/dashing/static/dashing-config.js |
var dashboard = new Dashboard();
dashboard.addWidget('clock_widget', 'Clock');
dashboard.addWidget('current_valuation_widget', 'Number', {
getData: function () {
$.extend(this.scope, {
title: 'Current Valuation',
moreInfo: 'In billions',
updatedAt: 'Last updated at 14:10',
detail: '64%',
value: '$35',
icon: 'fa fa-arrow-up'
});
}
});
dashboard.addWidget('buzzwords_widget', 'List', {
getData: function () {
$.extend(this.scope, {
title: 'Buzzwords',
moreInfo: '# of times said around the office',
updatedAt: 'Last updated at 18:58',
data: [{ label: 'Exit strategy', value: 24 },
{ label: 'Web 2.0', value: 12 },
{ label: 'Turn-key', value: 2 },
{ label: 'Enterprise', value: 12 },
{ label: 'Pivoting', value: 3 },
{ label: 'Leverage', value: 10 },
{ label: 'Streamlininess', value: 4 },
{ label: 'Paradigm shift', value: 6 },
{ label: 'Synergy', value: 7 }]
});
}
});
dashboard.addWidget('convergence_widget', 'Graph', {
getData: function () {
$.extend(this.scope, {
title: 'Convergence',
value: '41',
moreInfo: 'Above is total number',
xAxisType: 'time', // types: time or default Display Axis.Y default format Number
properties: { renderer: 'area', color: '#fff' },
data: [
{ x: 0, y: 40 },
{ x: 1, y: 49 },
{ x: 2, y: 38 },
{ x: 3, y: 30 },
{ x: 4, y: 32 }
]
});
},
interval: 120000 // every two minutes
});
dashboard.addWidget('completion_widget', 'Knob', {
getData: function () {
$.extend(this.scope, {
title: 'Completion',
updatedAt: 'Last updated at 14:10',
detail: 'today 10',
value: '35',
data: {
angleArc: 250,
angleOffset: -125,
displayInput: true,
displayPrevious: true,
step: 1,
min: 1,
max: 99,
readOnly: true,
format: function (value) { return value + '%'; }
}
});
}
}); | PypiClean |
/boto_brimley-2.61.0.tar.gz/boto_brimley-2.61.0/boto/route53/connection.py |
from boto.route53 import exception
import random
import uuid
import xml.sax
import boto
from boto.connection import AWSAuthConnection
from boto import handler
import boto.jsonresponse
from boto.route53.record import ResourceRecordSets
from boto.route53.zone import Zone
from boto.compat import six, urllib
HZXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
HZPXML = """<?xml version="1.0" encoding="UTF-8"?>
<CreateHostedZoneRequest xmlns="%(xmlns)s">
<Name>%(name)s</Name>
<VPC>
<VPCId>%(vpc_id)s</VPCId>
<VPCRegion>%(vpc_region)s</VPCRegion>
</VPC>
<CallerReference>%(caller_ref)s</CallerReference>
<HostedZoneConfig>
<Comment>%(comment)s</Comment>
</HostedZoneConfig>
</CreateHostedZoneRequest>"""
# boto.set_stream_logger('dns')
class Route53Connection(AWSAuthConnection):
DefaultHost = 'route53.amazonaws.com'
"""The default Route53 API endpoint to connect to."""
Version = '2013-04-01'
"""Route53 API version."""
XMLNameSpace = 'https://route53.amazonaws.com/doc/2013-04-01/'
"""XML schema for this Route53 API version."""
def __init__(self, aws_access_key_id=None, aws_secret_access_key=None,
port=None, proxy=None, proxy_port=None,
host=DefaultHost, debug=0, security_token=None,
validate_certs=True, https_connection_factory=None,
profile_name=None):
super(Route53Connection, self).__init__(
host,
aws_access_key_id, aws_secret_access_key,
True, port, proxy, proxy_port, debug=debug,
security_token=security_token,
validate_certs=validate_certs,
https_connection_factory=https_connection_factory,
profile_name=profile_name)
def _required_auth_capability(self):
return ['route53']
def make_request(self, action, path, headers=None, data='', params=None):
if params:
pairs = []
for key, val in six.iteritems(params):
if val is None:
continue
pairs.append(key + '=' + urllib.parse.quote(str(val)))
path += '?' + '&'.join(pairs)
return super(Route53Connection, self).make_request(
action, path, headers, data,
retry_handler=self._retry_handler)
# Hosted Zones
def get_all_hosted_zones(self, start_marker=None, zone_list=None):
"""
Returns a Python data structure with information about all
Hosted Zones defined for the AWS account.
:param int start_marker: start marker to pass when fetching additional
results after a truncated list
:param list zone_list: a HostedZones list to prepend to results
"""
params = {}
if start_marker:
params = {'marker': start_marker}
response = self.make_request('GET', '/%s/hostedzone' % self.Version,
params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HostedZones',
item_marker=('HostedZone',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
if zone_list:
e['ListHostedZonesResponse']['HostedZones'].extend(zone_list)
while 'NextMarker' in e['ListHostedZonesResponse']:
next_marker = e['ListHostedZonesResponse']['NextMarker']
zone_list = e['ListHostedZonesResponse']['HostedZones']
e = self.get_all_hosted_zones(next_marker, zone_list)
return e
def get_hosted_zone(self, hosted_zone_id):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_hosted_zone_by_name(self, hosted_zone_name):
"""
Get detailed information about a particular Hosted Zone.
:type hosted_zone_name: str
:param hosted_zone_name: The fully qualified domain name for the Hosted
Zone
"""
if hosted_zone_name[-1] != '.':
hosted_zone_name += '.'
all_hosted_zones = self.get_all_hosted_zones()
for zone in all_hosted_zones['ListHostedZonesResponse']['HostedZones']:
# check that they gave us the FQDN for their zone
if zone['Name'] == hosted_zone_name:
return self.get_hosted_zone(zone['Id'].split('/')[-1])
def create_hosted_zone(self, domain_name, caller_ref=None, comment='',
private_zone=False, vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Python data structure with
information about the newly created Hosted Zone.
:type domain_name: str
:param domain_name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.A list of strings with the image
IDs wanted.
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHostedZone requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
:type comment: str
:param comment: Any comments you want to include about the hosted
zone.
:type private_zone: bool
:param private_zone: Set True if creating a private hosted zone.
:type vpc_id: str
:param vpc_id: When creating a private hosted zone, the VPC Id to
associate to is required.
:type vpc_region: str
:param vpc_region: When creating a private hosted zone, the region
of the associated VPC is required.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
if private_zone:
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'vpc_id': vpc_id,
'vpc_region': vpc_region,
'xmlns': self.XMLNameSpace}
xml_body = HZPXML % params
else:
params = {'name': domain_name,
'caller_ref': caller_ref,
'comment': comment,
'xmlns': self.XMLNameSpace}
xml_body = HZXML % params
uri = '/%s/hostedzone' % self.Version
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element(list_marker='NameServers',
item_marker=('NameServer',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status,
response.reason,
body)
def delete_hosted_zone(self, hosted_zone_id):
"""
Delete the hosted zone specified by the given id.
:type hosted_zone_id: str
:param hosted_zone_id: The hosted zone's id
"""
uri = '/%s/hostedzone/%s' % (self.Version, hosted_zone_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Health checks
POSTHCXMLBody = """<CreateHealthCheckRequest xmlns="%(xmlns)s">
<CallerReference>%(caller_ref)s</CallerReference>
%(health_check)s
</CreateHealthCheckRequest>"""
def create_health_check(self, health_check, caller_ref=None):
"""
Create a new Health Check
:type health_check: HealthCheck
:param health_check: HealthCheck object
:type caller_ref: str
:param caller_ref: A unique string that identifies the request
and that allows failed CreateHealthCheckRequest requests to be retried
without the risk of executing the operation twice. If you don't
provide a value for this, boto will generate a Type 4 UUID and
use that.
"""
if caller_ref is None:
caller_ref = str(uuid.uuid4())
uri = '/%s/healthcheck' % self.Version
params = {'xmlns': self.XMLNameSpace,
'caller_ref': caller_ref,
'health_check': health_check.to_xml()
}
xml_body = self.POSTHCXMLBody % params
response = self.make_request('POST', uri, {'Content-Type': 'text/xml'}, xml_body)
body = response.read()
boto.log.debug(body)
if response.status == 201:
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
else:
raise exception.DNSServerError(response.status, response.reason, body)
def get_list_health_checks(self, maxitems=None, marker=None):
"""
Return a list of health checks
:type maxitems: int
:param maxitems: Maximum number of items to return
:type marker: str
:param marker: marker to get next set of items to list
"""
params = {}
if maxitems is not None:
params['maxitems'] = maxitems
if marker is not None:
params['marker'] = marker
uri = '/%s/healthcheck' % (self.Version, )
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='HealthChecks',
item_marker=('HealthCheck',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_checker_ip_ranges(self):
"""
Return a list of Route53 healthcheck IP ranges
"""
uri = '/%s/checkeripranges' % self.Version
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element(list_marker='CheckerIpRanges', item_marker=('member',))
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def delete_health_check(self, health_check_id):
"""
Delete a health check
:type health_check_id: str
:param health_check_id: ID of the health check to delete
"""
uri = '/%s/healthcheck/%s' % (self.Version, health_check_id)
response = self.make_request('DELETE', uri)
body = response.read()
boto.log.debug(body)
if response.status not in (200, 204):
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
# Resource Record Sets
def get_all_rrsets(self, hosted_zone_id, type=None,
name=None, identifier=None, maxitems=None):
"""
Retrieve the Resource Record Sets defined for this Hosted Zone.
Returns the raw XML data returned by the Route53 call.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type type: str
:param type: The type of resource record set to begin the record
listing from. Valid choices are:
* A
* AAAA
* CNAME
* MX
* NS
* PTR
* SOA
* SPF
* SRV
* TXT
Valid values for weighted resource record sets:
* A
* AAAA
* CNAME
* TXT
Valid values for Zone Apex Aliases:
* A
* AAAA
:type name: str
:param name: The first name in the lexicographic ordering of domain
names to be retrieved
:type identifier: str
:param identifier: In a hosted zone that includes weighted resource
record sets (multiple resource record sets with the same DNS
name and type that are differentiated only by SetIdentifier),
if results were truncated for a given DNS name and type,
the value of SetIdentifier for the next resource record
set that has the current DNS name and type
:type maxitems: int
:param maxitems: The maximum number of records
"""
params = {'type': type, 'name': name,
'identifier': identifier, 'maxitems': maxitems}
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('GET', uri, params=params)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
rs = ResourceRecordSets(connection=self, hosted_zone_id=hosted_zone_id)
h = handler.XmlHandler(rs, self)
xml.sax.parseString(body, h)
return rs
def change_rrsets(self, hosted_zone_id, xml_body):
"""
Create or change the authoritative DNS information for this
Hosted Zone.
Returns a Python data structure with information about the set of
changes, including the Change ID.
:type hosted_zone_id: str
:param hosted_zone_id: The unique identifier for the Hosted Zone
:type xml_body: str
:param xml_body: The list of changes to be made, defined in the
XML schema defined by the Route53 service.
"""
uri = '/%s/hostedzone/%s/rrset' % (self.Version, hosted_zone_id)
response = self.make_request('POST', uri,
{'Content-Type': 'text/xml'},
xml_body)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def get_change(self, change_id):
"""
Get information about a proposed set of changes, as submitted
by the change_rrsets method.
Returns a Python data structure with status information about the
changes.
:type change_id: str
:param change_id: The unique identifier for the set of changes.
This ID is returned in the response to the change_rrsets method.
"""
uri = '/%s/change/%s' % (self.Version, change_id)
response = self.make_request('GET', uri)
body = response.read()
boto.log.debug(body)
if response.status >= 300:
raise exception.DNSServerError(response.status,
response.reason,
body)
e = boto.jsonresponse.Element()
h = boto.jsonresponse.XmlHandler(e, None)
h.parse(body)
return e
def create_zone(self, name, private_zone=False,
vpc_id=None, vpc_region=None):
"""
Create a new Hosted Zone. Returns a Zone object for the newly
created Hosted Zone.
:type name: str
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication. If you omit the final period,
Amazon Route 53 assumes the domain is relative to the root.
This is the name you have registered with your DNS registrar.
It is also the name you will delegate from your registrar to
the Amazon Route 53 delegation servers returned in
response to this request.
:type private_zone: bool
:param private_zone: Set True if creating a private hosted zone.
:type vpc_id: str
:param vpc_id: When creating a private hosted zone, the VPC Id to
associate to is required.
:type vpc_region: str
:param vpc_region: When creating a private hosted zone, the region
of the associated VPC is required.
"""
zone = self.create_hosted_zone(name, private_zone=private_zone,
vpc_id=vpc_id, vpc_region=vpc_region)
return Zone(self, zone['CreateHostedZoneResponse']['HostedZone'])
def get_zone(self, name):
"""
Returns a Zone object for the specified Hosted Zone.
:param name: The name of the domain. This should be a
fully-specified domain, and should end with a final period
as the last label indication.
"""
name = self._make_qualified(name)
for zone in self.get_zones():
if name == zone.name:
return zone
def get_zones(self):
"""
Returns a list of Zone objects, one for each of the Hosted
Zones defined for the AWS account.
:rtype: list
:returns: A list of Zone objects.
"""
zones = self.get_all_hosted_zones()
return [Zone(self, zone) for zone in
zones['ListHostedZonesResponse']['HostedZones']]
def _make_qualified(self, value):
"""
Ensure passed domain names end in a period (.) character.
This will usually make a domain fully qualified.
"""
if type(value) in [list, tuple, set]:
new_list = []
for record in value:
if record and not record[-1] == '.':
new_list.append("%s." % record)
else:
new_list.append(record)
return new_list
else:
value = value.strip()
if value and not value[-1] == '.':
value = "%s." % value
return value
def _retry_handler(self, response, i, next_sleep):
status = None
boto.log.debug("Saw HTTP status: %s" % response.status)
if response.status == 400:
body = response.read()
# We need to parse the error first
err = exception.DNSServerError(
response.status,
response.reason,
body)
if err.error_code:
# This is a case where we need to ignore a 400 error, as
# Route53 returns this. See
# http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/DNSLimitations.html
if not err.error_code in (
'PriorRequestNotComplete',
'Throttling',
'ServiceUnavailable',
'RequestExpired'):
return status
msg = "%s, retry attempt %s" % (
err.error_code,
i
)
next_sleep = min(random.random() * (2 ** i),
boto.config.get('Boto', 'max_retry_delay', 60))
i += 1
status = (msg, i, next_sleep)
return status | PypiClean |
/minitage.paste-1.4.6.zip/minitage.paste-1.4.6/src/minitage/paste/projects/plone3/template/+category_dir+/+project_dir+/README.rst_tmpl | ==============================================================
BUILDOUT FOR $project DOCUMENTATION
==============================================================
INSTALLING THIS PROJECT WITHOUT MINITAGE
-----------------------------------------
::
source /minitage/bin/activate
$scm_type clone $uri ${project}
cd ${project}
python bootstrap.py -dc buildout-(dev/prod).cfg
bin/buildout -vvvvvNc -dc buildout-(dev/prod).cfg
INSTALLING THIS PROJECT VITH MINITAGE
--------------------------------------
ALWAYS USE THE MINITAGE ENVIRONMENT FILE INSIDE A MINITAGE
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Before doing anything in your project just after being installed, just source the environment file in your current shell::
source \$MT/zope/${project}/sys/share/minitage/minitage.env # env file is generated with \$MT/bin/paster create -t minitage.instances.env ${project}
THE MINITAGE DANCE
~~~~~~~~~~~~~~~~~~~~~~~~
::
export MT=/minitage
virtualenv --no-site-packages --distribute \$MT
source /minitage/bin/activate
easy_install -U minitage.core minitage.paste
#if $scm_type == 'svn'
svn co $uri/minilays/${project} \$MT/minilays/${project}
#elif $scm_type in ['hg', 'git']
#set $minilay = $uri.replace('buildout', 'minilay')
$scm_type clone $minilay \$MT/minilays/${project}
#end if
minimerge -v $project
#minimerge -v ${project}-prod
source \$MT/zope/${project}/sys/share/minitage/minitage.env
cd \$INS #enjoy !
CREATE A FIRST PLONESITE OBJECT
---------------------------------
Just run your plone and install ${project}
PLAYING WITH DATAFS & PROJECT DATABASES
-------------------------------------------
- Upload the latest datafs from production to staging server::
bin/buildout -vNc <CONFIG>-prod.cfg install upload-datafs
- Get the latest datafs from production to staging server::
bin/buildout -vNc <CONFIG> install get-datafs
DEVELOP MODE
---------------
To develop your application, run the ``(minitage.)buildout-dev.cfg`` buildout, it extends this one but:
* it comes with development tools.
* it configures the instance to be more verbose (debug mode & verbose security)
* it has only one instance and not all the hassles from production.
PRODUCTION MODE
---------------
To make your application safe for production, run the ``(minitage.)buildout-prod.cfg`` buildout'.
It extends this one with additionnal crontabs and backup scripts and some additionnal instances creation.
BASE BUILDOUTS WHICH DO ONLY SCHEDULE PARTS FROM THERE & THERE
-------------------------------------------------------------------
Love to know that Minitage support includes xml libs, ldap, dbs; python, dependencies & common eggs cache for things like lxml or Pillow), subversion & much more.
::
|-- etc/base.cfg -> The base buildout
|-- buildout-prod.cfg -> buildout for production
|-- buildout-dev.cfg -> buildout for development
|-- etc/minitage/minitage.cfg -> some buildout tweaks to run in the best of the world with minitage
|-- minitage.buildout-prod.cfg -> buildout for production with minitage support
|-- minitage.buildout-dev.cfg -> buildout for development with minitage support
PLONE OFFICIAL BUILDOUTS INTEGRATION
--------------------------------------
In ``etc/base.cfg``, we extends directly plone release versions & sources files.
PROJECT SETTINGS
~~~~~~~~~~~~~~~~~~~~~~~~
- Think you have the most important sections of this buildout configuration in etc/${project}.cfg
Set the project developement specific settings there
::
etc/project/
|-- ${project}.cfg -> your project needs (packages, sources, products)
|-- sources.cfg -> externals sources of your project:
| - Sources not packaged as python eggs.
| - Eggs Grabbed from svn, add here your develoment eggs.
| - Links to find distributions.
|-- patches.cfg -> patches used on the project
|-- cluster.cfg -> define new zope instances here & also their FileSystemStorage if any.
|-- newsletter.cfg -> singing & dancing integration (new instance with clockserver, version pinning, fss if any)
|-- ${project}-kgs.cfg -> Generated KGS for your project (minitage's printer or buildout.dumppickledversion)
`-- versions.cfg -> minimal version pinning for installing your project
SYSTEM ADMINISTRATORS RELATED FILES
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
etc/init.d/ -> various init script (eg supervisor)
etc/logrotate.d/ -> various logrotate configuration files
etc/sys/
|-- high-availability.cfg -> Project production settings like supervision, loadbalancer and so on
|-- maintenance.cfg -> Project maintenance settings (crons, logs)
`-- settings.cfg -> various settings (crons hours, hosts, installation paths, ports, passwords)
#if $with_ploneproduct_fss:
CRONS
~~~~~~
::
|-- etc/cron_scripts/fss_daily.sh -> backup script for fss
#end if
REVERSE PROXY
--------------
We generate two virtualhosts for a cliassical apache setup, mostly ready but feel free to copy/adapt.
::
etc/apache/
|-- ${apache_vhost_number}-${project}.reverseproxy.conf -> a vhost for ruse with a standalone plone (even with haproxy in front of.)
`-- apache.cfg
etc/templates/apache/
|-- ${apache_vhost_number}-${project}.reverseproxy.conf.in -> Template for a vhost for ruse with a standalone plone (even with haproxy in front of.)
In settings.cfg you have now some settings for declaring which host is your reverse proxy backend & the vhost mounting:
* hosts:zope-front / ports:zope-front -> zope front backend
* reverseproxy:host / reverseproxy:port / reverseproxy:mount-point -> host / port / mountpoint on the reverse proxy)
CONFIGURATION TEMPLATES
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
etc/templates/
|-- balancer.conf.template -> haproxy template.
| Copy or ln the generated file 'etc/loadbalancing/balancer.conf' to your haproxy installation if any.
#if $with_ploneproduct_fss
|-- fss_daily.sh.in -> FSS daily backup script template
#end if
`-- logrotate.conf.template -> logrotate configuration file template for your Zope logs
`-- supervisor.initd -> template for supervisor init script
BACKENDS
~~~~~~~~~~~
::
etc/backends/
#if $with_ploneproduct_fss:
|-- etc/backends/fss.cfg -> Filestorage configuration if any
#end if
|-- etc/backends/relstorage.cfg -> relstorage configuration if any
|-- etc/backends/zeo.cfg -> zeoserver configuration if any
`-- etc/backends/zodb.cfg -> zodb configuration if any
OS SPECIFIC SYSTEM INSTALLERS
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Thos popular tools around zope/plone dev (not supported, just here for your conveniance, READ BEFORE USING THEM)
And you'd better have to learn how to bootstrap some minitage environment out there, funny and more secure & reproductible!
::
|-- etc/os
|-- debian.sh -> debian specific
|-- opensuse-dev.sh -> opensuse/dev specific
|-- opensuse.sh -> suse specific
|-- osx.sh -> osx specific
`-- ubuntu.sh -> ubuntu specific
CONTINEOUS INTEGRATION
~~~~~~~~~~~~~~~~~~~~~~~~~
Here are the files needed for our hudson integration.
For hudson we provide some shell helpers more or less generated to run 'a build':
- an helper which set some variables in the current environement for others helpers
- an helper which update the project
- an helper which update the associated sources grabbed via mr.developer
- an helper which run all the tests
This is described in details on the related configuration files you will find in the layout below.
::
|-- etc/hudson/
| `-- $project
| |-- build
| |-- build.sh -> the project build helper
| |-- test.sh -> the project test executor helper (launch all tests needed)
| |-- update_mrdeveloper.sh -> update sources grabbed via mrdeveloper
| `-- update_project.sh -> update this layout
|
|-- etc/templates/hudson/
`-- ${project}
|-- build
| `-- activate_env.sh.in -> buildout template to generate etc/hudson/${project}/build/activate.env.sh
`-- config.xml.in -> buildout template to generate etc/hudson/${project}/config.xml (hudson job/build file)
$instances_description.replace('# ', '')
.. vim:set ft=rst: | PypiClean |
/564bff00ff_strawberry_graphql-0.168.2-py3-none-any.whl/strawberry/union.py | from __future__ import annotations
import itertools
from itertools import chain
from typing import (
TYPE_CHECKING,
Any,
Collection,
Iterable,
List,
Mapping,
NoReturn,
Optional,
Tuple,
Type,
TypeVar,
Union,
cast,
)
from typing_extensions import Annotated, get_origin
from graphql import GraphQLNamedType, GraphQLUnionType
from strawberry.annotation import StrawberryAnnotation
from strawberry.exceptions import (
InvalidTypeForUnionMergeError,
InvalidUnionTypeError,
UnallowedReturnTypeForUnion,
WrongReturnTypeForUnion,
)
from strawberry.lazy_type import LazyType
from strawberry.type import StrawberryOptional, StrawberryType
if TYPE_CHECKING:
from graphql import (
GraphQLAbstractType,
GraphQLResolveInfo,
GraphQLType,
GraphQLTypeResolver,
)
from strawberry.schema.types.concrete_type import TypeMap
from strawberry.types.types import TypeDefinition
class StrawberryUnion(StrawberryType):
def __init__(
self,
name: Optional[str] = None,
type_annotations: Tuple[StrawberryAnnotation, ...] = tuple(),
description: Optional[str] = None,
directives: Iterable[object] = (),
):
self.graphql_name = name
self.type_annotations = type_annotations
self.description = description
self.directives = directives
def __eq__(self, other: object) -> bool:
if isinstance(other, StrawberryType):
if isinstance(other, StrawberryUnion):
return (
self.graphql_name == other.graphql_name
and self.type_annotations == other.type_annotations
and self.description == other.description
)
return False
return super().__eq__(other)
def __hash__(self) -> int:
# TODO: Is this a bad idea? __eq__ objects are supposed to have the same hash
return id(self)
def __or__(self, other: Union[StrawberryType, type]) -> StrawberryType:
if other is None:
# Return the correct notation when using `StrawberryUnion | None`.
return StrawberryOptional(of_type=self)
# Raise an error in any other case.
# There is Work in progress to deal with more merging cases, see:
# https://github.com/strawberry-graphql/strawberry/pull/1455
raise InvalidTypeForUnionMergeError(self, other)
@property
def types(self) -> Tuple[StrawberryType, ...]:
return tuple(
cast(StrawberryType, annotation.resolve())
for annotation in self.type_annotations
)
@property
def type_params(self) -> List[TypeVar]:
def _get_type_params(type_: StrawberryType):
if isinstance(type_, LazyType):
type_ = cast("StrawberryType", type_.resolve_type())
if hasattr(type_, "_type_definition"):
parameters = getattr(type_, "__parameters__", None)
return list(parameters) if parameters else []
return type_.type_params
# TODO: check if order is important:
# https://github.com/strawberry-graphql/strawberry/issues/445
return list(
set(itertools.chain(*(_get_type_params(type_) for type_ in self.types)))
)
@property
def is_generic(self) -> bool:
def _is_generic(type_: object) -> bool:
if hasattr(type_, "_type_definition"):
type_ = type_._type_definition
if isinstance(type_, StrawberryType):
return type_.is_generic
return False
return any(map(_is_generic, self.types))
def copy_with(
self, type_var_map: Mapping[TypeVar, Union[StrawberryType, type]]
) -> StrawberryType:
if not self.is_generic:
return self
new_types = []
for type_ in self.types:
new_type: Union[StrawberryType, type]
if hasattr(type_, "_type_definition"):
type_definition: TypeDefinition = type_._type_definition
if type_definition.is_generic:
new_type = type_definition.copy_with(type_var_map)
if isinstance(type_, StrawberryType) and type_.is_generic:
new_type = type_.copy_with(type_var_map)
else:
new_type = type_
new_types.append(new_type)
return StrawberryUnion(
type_annotations=tuple(map(StrawberryAnnotation, new_types)),
description=self.description,
)
def __call__(self, *_args, **_kwargs) -> NoReturn:
"""Do not use.
Used to bypass
https://github.com/python/cpython/blob/5efb1a77e75648012f8b52960c8637fc296a5c6d/Lib/typing.py#L148-L149
"""
raise ValueError("Cannot use union type directly")
def get_type_resolver(self, type_map: TypeMap) -> GraphQLTypeResolver:
def _resolve_union_type(
root: Any, info: GraphQLResolveInfo, type_: GraphQLAbstractType
) -> str:
assert isinstance(type_, GraphQLUnionType)
from strawberry.types.types import TypeDefinition
# If the type given is not an Object type, try resolving using `is_type_of`
# defined on the union's inner types
if not hasattr(root, "_type_definition"):
for inner_type in type_.types:
if inner_type.is_type_of is not None and inner_type.is_type_of(
root, info
):
return inner_type.name
# Couldn't resolve using `is_type_of`
raise WrongReturnTypeForUnion(info.field_name, str(type(root)))
return_type: Optional[GraphQLType]
# Iterate over all of our known types and find the first concrete
# type that implements the type. We prioritise checking types named in the
# Union in case a nested generic object matches against more than one type.
concrete_types_for_union = (type_map[x.name] for x in type_.types)
# TODO: do we still need to iterate over all types in `type_map`?
for possible_concrete_type in chain(
concrete_types_for_union, type_map.values()
):
possible_type = possible_concrete_type.definition
if not isinstance(possible_type, TypeDefinition):
continue
if possible_type.is_implemented_by(root):
return_type = possible_concrete_type.implementation
break
else:
return_type = None
# Make sure the found type is expected by the Union
if return_type is None or return_type not in type_.types:
raise UnallowedReturnTypeForUnion(
info.field_name, str(type(root)), set(type_.types)
)
# Return the name of the type. Returning the actual type is now deprecated
if isinstance(return_type, GraphQLNamedType):
# TODO: Can return_type ever _not_ be a GraphQLNamedType?
return return_type.name
else:
# todo: check if this is correct
return return_type.__name__ # type: ignore
return _resolve_union_type
@staticmethod
def is_valid_union_type(type_: object) -> bool:
# Usual case: Union made of @strawberry.types
if hasattr(type_, "_type_definition"):
return True
# Can't confidently assert that these types are valid/invalid within Unions
# until full type resolving stage is complete
ignored_types = (LazyType, TypeVar)
if isinstance(type_, ignored_types):
return True
if get_origin(type_) is Annotated:
return True
return False
Types = TypeVar("Types", bound=Type)
# We return a Union type here in order to allow to use the union type as type
# annotation.
# For the `types` argument we'd ideally use a TypeVarTuple, but that's not
# yet supported in any python implementation (or in typing_extensions).
# See https://www.python.org/dev/peps/pep-0646/ for more information
def union(
name: str,
types: Collection[Types],
*,
description: Optional[str] = None,
directives: Iterable[object] = (),
) -> Union[Types]:
"""Creates a new named Union type.
Example usages:
>>> @strawberry.type
... class A: ...
>>> @strawberry.type
... class B: ...
>>> strawberry.union("Name", (A, Optional[B]))
"""
# Validate types
if not types:
raise TypeError("No types passed to `union`")
for type_ in types:
# Due to TypeVars, Annotations, LazyTypes, etc., this does not perfectly detect
# issues. This check also occurs in the Schema conversion stage as a backup.
if not StrawberryUnion.is_valid_union_type(type_):
raise InvalidUnionTypeError(union_name=name, invalid_type=type_)
union_definition = StrawberryUnion(
name=name,
type_annotations=tuple(StrawberryAnnotation(type_) for type_ in types),
description=description,
directives=directives,
)
return union_definition # type: ignore | PypiClean |
/gamification-engine-0.4.0.tar.gz/gamification-engine-0.4.0/gengine/app/jsscripts/node_modules/intl-relativeformat/dist/locale-data/sq.js | IntlRelativeFormat.__addLocaleData({"locale":"sq","pluralRuleFunction":function (n,ord){var s=String(n).split("."),t0=Number(s[0])==n,n10=t0&&s[0].slice(-1),n100=t0&&s[0].slice(-2);if(ord)return n==1?"one":n10==4&&n100!=14?"many":"other";return n==1?"one":"other"},"fields":{"year":{"displayName":"vit","relative":{"0":"këtë vit","1":"vitin e ardhshëm","-1":"vitin e kaluar"},"relativeTime":{"future":{"one":"pas {0} viti","other":"pas {0} vjetësh"},"past":{"one":"{0} vit më parë","other":"{0} vjet më parë"}}},"year-short":{"displayName":"vit","relative":{"0":"këtë vit","1":"vitin e ardhshëm","-1":"vitin e kaluar"},"relativeTime":{"future":{"one":"pas {0} viti","other":"pas {0} vjetësh"},"past":{"one":"{0} vit më parë","other":"{0} vjet më parë"}}},"month":{"displayName":"muaj","relative":{"0":"këtë muaj","1":"muajin e ardhshëm","-1":"muajin e kaluar"},"relativeTime":{"future":{"one":"pas {0} muaji","other":"pas {0} muajsh"},"past":{"one":"{0} muaj më parë","other":"{0} muaj më parë"}}},"month-short":{"displayName":"muaj","relative":{"0":"këtë muaj","1":"muajin e ardhshëm","-1":"muajin e kaluar"},"relativeTime":{"future":{"one":"pas {0} muaji","other":"pas {0} muajsh"},"past":{"one":"{0} muaj më parë","other":"{0} muaj më parë"}}},"day":{"displayName":"ditë","relative":{"0":"sot","1":"nesër","-1":"dje"},"relativeTime":{"future":{"one":"pas {0} dite","other":"pas {0} ditësh"},"past":{"one":"{0} ditë më parë","other":"{0} ditë më parë"}}},"day-short":{"displayName":"ditë","relative":{"0":"sot","1":"nesër","-1":"dje"},"relativeTime":{"future":{"one":"pas {0} dite","other":"pas {0} ditësh"},"past":{"one":"{0} ditë më parë","other":"{0} ditë më parë"}}},"hour":{"displayName":"orë","relative":{"0":"këtë orë"},"relativeTime":{"future":{"one":"pas {0} ore","other":"pas {0} orësh"},"past":{"one":"{0} orë më parë","other":"{0} orë më parë"}}},"hour-short":{"displayName":"orë","relative":{"0":"këtë orë"},"relativeTime":{"future":{"one":"pas {0} ore","other":"pas {0} orësh"},"past":{"one":"{0} orë më parë","other":"{0} orë më parë"}}},"minute":{"displayName":"minutë","relative":{"0":"këtë minutë"},"relativeTime":{"future":{"one":"pas {0} minute","other":"pas {0} minutash"},"past":{"one":"{0} minutë më parë","other":"{0} minuta më parë"}}},"minute-short":{"displayName":"min.","relative":{"0":"këtë minutë"},"relativeTime":{"future":{"one":"pas {0} min.","other":"pas {0} min."},"past":{"one":"{0} min. më parë","other":"{0} min. më parë"}}},"second":{"displayName":"sekondë","relative":{"0":"tani"},"relativeTime":{"future":{"one":"pas {0} sekonde","other":"pas {0} sekondash"},"past":{"one":"{0} sekondë më parë","other":"{0} sekonda më parë"}}},"second-short":{"displayName":"sek.","relative":{"0":"tani"},"relativeTime":{"future":{"one":"pas {0} sek.","other":"pas {0} sek."},"past":{"one":"{0} sek. më parë","other":"{0} sek. më parë"}}}}});
IntlRelativeFormat.__addLocaleData({"locale":"sq-MK","parentLocale":"sq"});
IntlRelativeFormat.__addLocaleData({"locale":"sq-XK","parentLocale":"sq"}); | PypiClean |
/XMLCheck-0.7.1.zip/XMLCheck-0.7.1/README.txt | XCheck
XMLChecker objects that can validate text, xml-formatted text,
or an elementtree.ElementTree node. XCheck objects can also
return checked data in a normalized form (BoolCheck, for example,
returns Boolean values, IntCheck returns an integer)
XCheck objects can manipulate Element objects.
You can define an XCheck object using an xml-formatted text:
checker_text = """<xcheck contact>
<children>
<text name="Name" />
<email name="Email" max_occurs=4 />
</children>
</xcheck>"""
checker = xcheck.load_checker(checker_text)
checker("<contact><name>Josh</name><email>[email protected]</email></contact>")
# returns True
| PypiClean |
/django-configurations-celery-2020.10.26.tar.gz/django-configurations-celery-2020.10.26/django_configurations_celery/__init__.py | from configurations import Configuration, values
class CeleryMixin:
CELERY_ACCEPT_CONTENT = values.ListValue(['application/json'])
CELERY_ENABLE_UTC = values.BooleanValue(True)
CELERY_IMPORTS = values.ListValue([])
CELERY_INCLUDE = values.ListValue([])
CELERY_TIMEZONE = values.Value('UTC')
CELERYBEAT_MAX_LOOP_INTERVAL = values.Value(0)
CELERYBEAT_SCHEDULE = {}
CELERYBEAT_SCHEDULER = values.Value('celery.beat:PersistentScheduler')
CELERYBEAT_SCHEDULE_FILENAME = values.Value('celerybeat-schedule')
CELERYBEAT_SYNC_EVERY = values.PositiveIntegerValue(0)
BROKER_URL = values.Value(None)
# BROKER_TRANSPORT
BROKER_TRANSPORT_OPTIONS = {}
BROKER_CONNECTION_TIMEOUT = values.FloatValue(4.0)
BROKER_CONNECTION_RETRY = values.BooleanValue(True)
BROKER_CONNECTION_MAX_RETRIES = values.PositiveIntegerValue(100)
BROKER_FAILOVER_STRATEGY = values.Value('round-robin')
BROKER_HEARTBEAT = values.FloatValue(120.0)
BROKER_LOGIN_METHOD = values.Value('AMQPLAIN')
BROKER_POOL_LIMIT = values.PositiveIntegerValue(10)
BROKER_USE_SSL = values.BooleanValue(False)
# CELERY_CACHE_BACKEND no longer used
CELERY_CACHE_BACKEND_OPTIONS = {}
CASSANDRA_COLUMN_FAMILY = values.Value(None)
CASSANDRA_ENTRY_TTL = values.PositiveIntegerValue(None)
CASSANDRA_KEYSPACE = values.Value(None)
CASSANDRA_PORT = values.PositiveIntegerValue(9042)
CASSANDRA_READ_CONSISTENCY = values.Value(None)
CASSANDRA_OPTIONS = {}
S3_ACCESS_KEY_ID = values.Value(None)
S3_SECRET_ACCESS_KEY = values.Value(None)
S3_BUCKET = values.Value(None)
S3_BASE_PATH = values.Value(None)
S3_ENDPOINT_URL = values.Value(None)
S3_REGION = values.Value(None)
CELERY_COUCHBASE_BACKEND_SETTINGS = {}
CELERY_ARANGODB_BACKEND_SETTINGS = {}
CELERY_MONGODB_BACKEND_SETTINGS = {}
CELERY_EVENT_QUEUE_EXPIRES = values.FloatValue(60.0)
CELERY_EVENT_QUEUE_TTL = values.FloatValue(5.0)
CELERY_EVENT_QUEUE_PREFIX = values.Value('celeryev')
CELERY_EVENT_SERIALIZER = values.Value('json')
CELERY_REDIS_DB = values.Value(None)
CELERY_REDIS_HOST = values.Value(None)
CELERY_REDIS_MAX_CONNECTIONS = values.PositiveIntegerValue(None)
CELERY_REDIS_PASSWORD = values.Value(None)
CELERY_REDIS_PORT = values.PositiveIntegerValue(None)
CELERY_REDIS_BACKEND_USE_SSL = values.BooleanValue(False)
CELERY_RESULT_BACKEND = values.Value(None)
CELERY_MAX_CACHED_RESULTS = values.BooleanValue(False)
CELERY_MESSAGE_COMPRESSION = values.Value(None)
CELERY_RESULT_EXCHANGE = values.Value(None)
CELERY_RESULT_EXCHANGE_TYPE = values.Value(None)
# CELERY_RESULT_EXPIRES timedelta 1 day.
CELERY_RESULT_PERSISTENT = values.BooleanValue(False)
CELERY_RESULT_SERIALIZER = values.Value('json')
CELERY_RESULT_DBURI = values.Value(None)
CELERY_RESULT_ENGINE_OPTIONS = {}
# _DB_SHORT_LIVED_SESSIONS
CELERY_RESULT_DB_TABLE_NAMES = values.ListValue([])
CELERY_SECURITY_CERTIFICATE = values.Value(None)
CELERY_SECURITY_CERT_STORE = values.Value(None)
CELERY_SECURITY_KEY = values.Value(None)
CELERY_ACKS_LATE = values.BooleanValue(False)
CELERY_ACKS_ON_FAILURE_OR_TIMEOUT = values.BooleanValue(True)
CELERY_ALWAYS_EAGER = values.BooleanValue(False)
CELERY_ANNOTATIONS = None # dict/list
CELERY_COMPRESSION = values.Value(None)
CELERY_CREATE_MISSING_QUEUES = values.BooleanValue(True)
CELERY_DEFAULT_DELIVERY_MODE = values.Value('persistent')
# CELERY_DEFAULT_EXCHANGE
CELERY_DEFAULT_EXCHANGE_TYPE = values.Value('direct')
CELERY_DEFAULT_QUEUE = values.Value('celery')
CELERY_DEFAULT_RATE_LIMIT = values.Value(None)
# CELERY_DEFAULT_ROUTING_KEY str
CELERY_EAGER_PROPAGATES = values.BooleanValue(False)
CELERY_IGNORE_RESULT = values.BooleanValue(False)
CELERY_PUBLISH_RETRY = values.BooleanValue(True)
# CELERY_PUBLISH_RETRY_POLICY
CELERY_QUEUES = None
CELERY_ROUTES = None
CELERY_SEND_SENT_EVENT = values.BooleanValue(False)
CELERY_SERIALIZER = values.Value('json')
CELERYD_SOFT_TIME_LIMIT = values.PositiveIntegerValue(None)
CELERYD_TIME_LIMIT = values.PositiveIntegerValue(None)
CELERY_TRACK_STARTED = values.BooleanValue(False)
CELERYD_AGENT = values.Value(None)
CELERYD_AUTOSCALER = values.Value('celery.worker.autoscale:Autoscaler')
CELERYD_CONCURRENCY = values.PositiveIntegerValue(None)
CELERYD_CONSUMER = values.Value('celery.worker.consumer:Consumer')
CELERY_WORKER_DIRECT = values.BooleanValue(False)
CELERY_DISABLE_RATE_LIMITS = values.BooleanValue(False)
CELERY_ENABLE_REMOTE_CONTROL = values.BooleanValue(True)
CELERYD_HIJACK_ROOT_LOGGER = values.BooleanValue(True)
# CELERYD_LOG_COLOR
CELERYD_LOG_FORMAT = values.Value(
'[%(asctime)s: %(levelname)s/%(processName)s] %(message)s')
CELERYD_WORKER_LOST_WAIT = values.FloatValue(10.0)
CELERYD_MAX_TASKS_PER_CHILD = values.PositiveIntegerValue(None)
CELERYD_POOL = values.Value('prefork')
# CELERYD_POOL_PUTLOCKS ?
CELERYD_POOL_RESTARTS = values.BooleanValue(False)
CELERYD_PREFETCH_MULTIPLIER = values.PositiveIntegerValue(4)
CELERYD_REDIRECT_STDOUTS = values.BooleanValue(True)
CELERYD_REDIRECT_STDOUTS_LEVEL = values.Value('WARNING')
CELERY_SEND_EVENTS = values.BooleanValue(False)
CELERYD_STATE_DB = values.Value(None)
CELERYD_TASK_LOG_FORMAT = values.Value("""[%(asctime)s: %(levelname)s/%(processName)s]
[%(task_name)s(%(task_id)s)] %(message)s""")
CELERYD_TIMER = values.Value(None)
CELERYD_TIMER_PRECISION = values.FloatValue(1.0)
class CeleryConfiguration(CeleryMixin, Configuration):
pass | PypiClean |
/csle_common-0.3.8.tar.gz/csle_common-0.3.8/src/csle_common/dao/emulation_config/topology_config.py | from typing import List, Dict, Any
from csle_common.dao.emulation_config.node_firewall_config import NodeFirewallConfig
from csle_common.util.general_util import GeneralUtil
from csle_base.json_serializable import JSONSerializable
class TopologyConfig(JSONSerializable):
"""
A DTO representing the topology configuration of an emulation environment
"""
def __init__(self, node_configs: List[NodeFirewallConfig], subnetwork_masks: List[str]):
"""
Initializes the DTO
:param node_configs: the list of node configurations
:param subnetwork: the subnetwork
"""
self.node_configs = node_configs
self.subnetwork_masks = subnetwork_masks
@staticmethod
def from_dict(d: Dict[str, Any]) -> "TopologyConfig":
"""
Converts a dict representation to an instance
:param d: the dict to convert
:return: the created instance
"""
obj = TopologyConfig(
node_configs=list(map(lambda x: NodeFirewallConfig.from_dict(x), d["node_configs"])),
subnetwork_masks=d["subnetwork_masks"]
)
return obj
def to_dict(self) -> Dict[str, Any]:
"""
Converts the object to a dict representation
:return: a dict representation of the object
"""
d: Dict[str, Any] = {}
d["subnetwork_masks"] = self.subnetwork_masks
d["node_configs"] = list(map(lambda x: x.to_dict(), self.node_configs))
return d
def __str__(self) -> str:
"""
:return: a string representation of the object
"""
return f"node configs:{','.join(list(map(lambda x: str(x), self.node_configs)))}, " \
f"subnetwork_masks:{','.join(self.subnetwork_masks)}"
@staticmethod
def from_json_file(json_file_path: str) -> "TopologyConfig":
"""
Reads a json file and converts it to a DTO
:param json_file_path: the json file path
:return: the converted DTO
"""
import io
import json
with io.open(json_file_path, 'r') as f:
json_str = f.read()
return TopologyConfig.from_dict(json.loads(json_str))
def copy(self) -> "TopologyConfig":
"""
:return: a copy of the DTO
"""
return TopologyConfig.from_dict(self.to_dict())
def create_execution_config(self, ip_first_octet: int) -> "TopologyConfig":
"""
Creates a new config for an execution
:param ip_first_octet: the first octet of the IP of the new execution
:return: the new config
"""
config = self.copy()
config.subnetwork_masks = list(map(lambda x: GeneralUtil.replace_first_octet_of_ip(
ip=x, ip_first_octet=ip_first_octet), config.subnetwork_masks))
config.node_configs = list(map(lambda x: x.create_execution_config(ip_first_octet=ip_first_octet),
config.node_configs))
return config | PypiClean |
/tw2.ckeditor-2.0.1.tar.gz/tw2.ckeditor-2.0.1/tw2/ckeditor/static/4.0.1/lang/km.js | /*
Copyright (c) 2003-2013, CKSource - Frederico Knabben. All rights reserved.
For licensing, see LICENSE.html or http://ckeditor.com/license
*/
CKEDITOR.lang['km']={"dir":"ltr","editor":"Rich Text Editor","common":{"editorHelp":"Press ALT 0 for help","browseServer":"មើល","url":"URL","protocol":"ប្រូតូកូល","upload":"ទាញយក","uploadSubmit":"បញ្ជូនទៅកាន់ម៉ាស៊ីនផ្តល់សេវា","image":"រូបភាព","flash":"Flash","form":"បែបបទ","checkbox":"ប្រអប់ជ្រើសរើស","radio":"ប៉ូតុនរង្វង់មូល","textField":"ជួរសរសេរអត្ថបទ","textarea":"តំបន់សរសេរអត្ថបទ","hiddenField":"ជួរលាក់","button":"ប៉ូតុន","select":"ជួរជ្រើសរើស","imageButton":"ប៉ូតុនរូបភាព","notSet":"<មិនមែន>","id":"Id","name":"ឈ្មោះ","langDir":"ទិសដៅភាសា","langDirLtr":"ពីឆ្វេងទៅស្តាំ(LTR)","langDirRtl":"ពីស្តាំទៅឆ្វេង(RTL)","langCode":"លេខកូតភាសា","longDescr":"អធិប្បាយ URL វែង","cssClass":"Stylesheet Classes","advisoryTitle":"ចំណងជើង ប្រឹក្សា","cssStyle":"ម៉ូត","ok":"យល់ព្រម","cancel":"មិនយល់ព្រម","close":"Close","preview":"មើលសាកល្បង","resize":"Resize","generalTab":"General","advancedTab":"កំរិតខ្ពស់","validateNumberFailed":"This value is not a number.","confirmNewPage":"Any unsaved changes to this content will be lost. Are you sure you want to load new page?","confirmCancel":"Some of the options have been changed. Are you sure to close the dialog?","options":"Options","target":"គោលដៅ","targetNew":"New Window (_blank)","targetTop":"Topmost Window (_top)","targetSelf":"Same Window (_self)","targetParent":"Parent Window (_parent)","langDirLTR":"ពីឆ្វេងទៅស្តាំ(LTR)","langDirRTL":"ពីស្តាំទៅឆ្វេង(RTL)","styles":"ម៉ូត","cssClasses":"Stylesheet Classes","width":"ទទឹង","height":"កំពស់","align":"កំណត់ទីតាំង","alignLeft":"ខាងឆ្វង","alignRight":"ខាងស្តាំ","alignCenter":"កណ្តាល","alignTop":"ខាងលើ","alignMiddle":"កណ្តាល","alignBottom":"ខាងក្រោម","invalidValue":"Invalid value.","invalidHeight":"Height must be a number.","invalidWidth":"Width must be a number.","invalidCssLength":"Value specified for the \"%1\" field must be a positive number with or without a valid CSS measurement unit (px, %, in, cm, mm, em, ex, pt, or pc).","invalidHtmlLength":"Value specified for the \"%1\" field must be a positive number with or without a valid HTML measurement unit (px or %).","invalidInlineStyle":"Value specified for the inline style must consist of one or more tuples with the format of \"name : value\", separated by semi-colons.","cssLengthTooltip":"Enter a number for a value in pixels or a number with a valid CSS unit (px, %, in, cm, mm, em, ex, pt, or pc).","unavailable":"%1<span class=\"cke_accessibility\">, unavailable</span>"},"about":{"copy":"Copyright © $1. All rights reserved.","dlgTitle":"About CKEditor","help":"Check $1 for help.","moreInfo":"For licensing information please visit our web site:","title":"About CKEditor","userGuide":"CKEditor User's Guide"},"basicstyles":{"bold":"អក្សរដិតធំ","italic":"អក្សរផ្តេក","strike":"ដិតបន្ទាត់ពាក់កណ្តាលអក្សរ","subscript":"អក្សរតូចក្រោម","superscript":"អក្សរតូចលើ","underline":"ដិតបន្ទាត់ពីក្រោមអក្សរ"},"blockquote":{"toolbar":"Block Quote"},"clipboard":{"copy":"ចំលងយក","copyError":"ការកំណត់សុវត្ថភាពរបស់កម្មវិធីរុករករបស់លោកអ្នក នេះមិនអាចធ្វើកម្មវិធីតាក់តែងអត្ថបទ ចំលងអត្ថបទយកដោយស្វ័យប្រវត្តបានឡើយ ។ សូមប្រើប្រាស់បន្សំ ឃីដូចនេះ (Ctrl/Cmd+C)។","cut":"កាត់យក","cutError":"ការកំណត់សុវត្ថភាពរបស់កម្មវិធីរុករករបស់លោកអ្នក នេះមិនអាចធ្វើកម្មវិធីតាក់តែងអត្ថបទ កាត់អត្ថបទយកដោយស្វ័យប្រវត្តបានឡើយ ។ សូមប្រើប្រាស់បន្សំ ឃីដូចនេះ (Ctrl/Cmd+X) ។","paste":"ចំលងដាក់","pasteArea":"Paste Area","pasteMsg":"សូមចំលងអត្ថបទទៅដាក់ក្នុងប្រអប់ដូចខាងក្រោមដោយប្រើប្រាស់ ឃី (<STRONG>Ctrl/Cmd+V</STRONG>) ហើយចុច <STRONG>OK</STRONG> ។","securityMsg":"Because of your browser security settings, the editor is not able to access your clipboard data directly. You are required to paste it again in this window.","title":"ចំលងដាក់"},"contextmenu":{"options":"Context Menu Options"},"toolbar":{"toolbarCollapse":"Collapse Toolbar","toolbarExpand":"Expand Toolbar","toolbarGroups":{"document":"Document","clipboard":"Clipboard/Undo","editing":"Editing","forms":"Forms","basicstyles":"Basic Styles","paragraph":"Paragraph","links":"Links","insert":"Insert","styles":"Styles","colors":"Colors","tools":"Tools"},"toolbars":"Editor toolbars"},"elementspath":{"eleLabel":"Elements path","eleTitle":"%1 element"},"list":{"bulletedlist":"បញ្ជីជារង្វង់មូល","numberedlist":"បញ្ជីជាអក្សរ"},"indent":{"indent":"បន្ថែមការចូលបន្ទាត់","outdent":"បន្ថយការចូលបន្ទាត់"},"format":{"label":"រចនា","panelTitle":"រចនា","tag_address":"Address","tag_div":"Normal (DIV)","tag_h1":"Heading 1","tag_h2":"Heading 2","tag_h3":"Heading 3","tag_h4":"Heading 4","tag_h5":"Heading 5","tag_h6":"Heading 6","tag_p":"Normal","tag_pre":"Formatted"},"horizontalrule":{"toolbar":"បន្ថែមបន្ទាត់ផ្តេក"},"image":{"alertUrl":"សូមសរសេរងាស័យដ្ឋានរបស់រូបភាព","alt":"អត្ថបទជំនួស","border":"ស៊ុម","btnUpload":"បញ្ជូនទៅកាន់ម៉ាស៊ីនផ្តល់សេវា","button2Img":"Do you want to transform the selected image button on a simple image?","hSpace":"គំលាតទទឹង","img2Button":"Do you want to transform the selected image on a image button?","infoTab":"ពត៌មានអំពីរូបភាព","linkTab":"ឈ្នាប់","lockRatio":"អត្រាឡុក","menu":"ការកំណត់រូបភាព","resetSize":"កំណត់ទំហំឡើងវិញ","title":"ការកំណត់រូបភាព","titleButton":"ការកំណត់ប៉ូតុនរូបភាព","upload":"ទាញយក","urlMissing":"Image source URL is missing.","vSpace":"គំលាតបណ្តោយ","validateBorder":"Border must be a whole number.","validateHSpace":"HSpace must be a whole number.","validateVSpace":"VSpace must be a whole number."},"fakeobjects":{"anchor":"Anchor","flash":"Flash Animation","hiddenfield":"Hidden Field","iframe":"IFrame","unknown":"Unknown Object"},"link":{"acccessKey":"ឃី សំរាប់ចូល","advanced":"កំរិតខ្ពស់","advisoryContentType":"ប្រភេទអត្ថបទ ប្រឹក្សា","advisoryTitle":"ចំណងជើង ប្រឹក្សា","anchor":{"toolbar":"បន្ថែម/កែប្រែ យុថ្កា","menu":"ការកំណត់យុថ្កា","title":"ការកំណត់យុថ្កា","name":"ឈ្មោះយុទ្ធថ្កា","errorName":"សូមសរសេរ ឈ្មោះយុទ្ធថ្កា","remove":"Remove Anchor"},"anchorId":"តាម Id","anchorName":"តាមឈ្មោះរបស់យុថ្កា","charset":"លេខកូតអក្សររបស់ឈ្នាប់","cssClasses":"Stylesheet Classes","emailAddress":"អ៊ីមែល","emailBody":"អត្ថបទ","emailSubject":"ចំណងជើងអត្ថបទ","id":"Id","info":"ពត៌មានអំពីឈ្នាប់","langCode":"ទិសដៅភាសា","langDir":"ទិសដៅភាសា","langDirLTR":"ពីឆ្វេងទៅស្តាំ(LTR)","langDirRTL":"ពីស្តាំទៅឆ្វេង(RTL)","menu":"កែប្រែឈ្នាប់","name":"ឈ្មោះ","noAnchors":"(No anchors available in the document)","noEmail":"សូមសរសេរ អាស័យដ្ឋាន អ៊ីមែល","noUrl":"សូមសរសេរ អាស័យដ្ឋាន URL","other":"<other>","popupDependent":"អាស្រ័យលើ (Netscape)","popupFeatures":"លក្ខណះរបស់វីនដូលលោត","popupFullScreen":"អេក្រុងពេញ(IE)","popupLeft":"ទីតាំងខាងឆ្វេង","popupLocationBar":"របា ទីតាំង","popupMenuBar":"របា មឺនុយ","popupResizable":"Resizable","popupScrollBars":"របា ទាញ","popupStatusBar":"របា ពត៌មាន","popupToolbar":"របា ឩបករណ៍","popupTop":"ទីតាំងខាងលើ","rel":"Relationship","selectAnchor":"ជ្រើសរើសយុថ្កា","styles":"ម៉ូត","tabIndex":"លេខ Tab","target":"គោលដៅ","targetFrame":"<ហ្វ្រេម>","targetFrameName":"ឈ្មោះហ្រ្វេមដែលជាគោលដៅ","targetPopup":"<វីនដូវ លោត>","targetPopupName":"ឈ្មោះវីនដូវលោត","title":"ឈ្នាប់","toAnchor":"យុថ្កានៅក្នុងទំព័រនេះ","toEmail":"អ៊ីមែល","toUrl":"URL","toolbar":"បន្ថែម/កែប្រែ ឈ្នាប់","type":"ប្រភេទឈ្នាប់","unlink":"លប់ឈ្នាប់","upload":"ទាញយក"},"magicline":{"title":"Insert paragraph here"},"maximize":{"maximize":"Maximize","minimize":"Minimize"},"pastetext":{"button":"ចំលងដាក់អត្ថបទធម្មតា","title":"ចំលងដាក់អត្ថបទធម្មតា"},"pastefromword":{"confirmCleanup":"The text you want to paste seems to be copied from Word. Do you want to clean it before pasting?","error":"It was not possible to clean up the pasted data due to an internal error","title":"ចំលងដាក់ពី Word","toolbar":"ចំលងដាក់ពី Word"},"removeformat":{"toolbar":"លប់ចោល ការរចនា"},"sourcearea":{"toolbar":"កូត"},"specialchar":{"options":"Special Character Options","title":"តូអក្សរពិសេស","toolbar":"បន្ថែមអក្សរពិសេស"},"scayt":{"about":"About SCAYT","aboutTab":"About","addWord":"Add Word","allCaps":"Ignore All-Caps Words","dic_create":"Create","dic_delete":"Delete","dic_field_name":"Dictionary name","dic_info":"Initially the User Dictionary is stored in a Cookie. However, Cookies are limited in size. When the User Dictionary grows to a point where it cannot be stored in a Cookie, then the dictionary may be stored on our server. To store your personal dictionary on our server you should specify a name for your dictionary. If you already have a stored dictionary, please type its name and click the Restore button.","dic_rename":"Rename","dic_restore":"Restore","dictionariesTab":"Dictionaries","disable":"Disable SCAYT","emptyDic":"Dictionary name should not be empty.","enable":"Enable SCAYT","ignore":"Ignore","ignoreAll":"Ignore All","ignoreDomainNames":"Ignore Domain Names","langs":"Languages","languagesTab":"Languages","mixedCase":"Ignore Words with Mixed Case","mixedWithDigits":"Ignore Words with Numbers","moreSuggestions":"More suggestions","opera_title":"Not supported by Opera","options":"Options","optionsTab":"Options","title":"Spell Check As You Type","toggle":"Toggle SCAYT","noSuggestions":"No suggestion"},"stylescombo":{"label":"ម៉ូត","panelTitle":"Formatting Styles","panelTitle1":"Block Styles","panelTitle2":"Inline Styles","panelTitle3":"Object Styles"},"table":{"border":"ទំហំស៊ុម","caption":"ចំណងជើង","cell":{"menu":"Cell","insertBefore":"Insert Cell Before","insertAfter":"Insert Cell After","deleteCell":"លប់សែល","merge":"បញ្ជូលសែល","mergeRight":"Merge Right","mergeDown":"Merge Down","splitHorizontal":"Split Cell Horizontally","splitVertical":"Split Cell Vertically","title":"Cell Properties","cellType":"Cell Type","rowSpan":"Rows Span","colSpan":"Columns Span","wordWrap":"Word Wrap","hAlign":"Horizontal Alignment","vAlign":"Vertical Alignment","alignBaseline":"Baseline","bgColor":"Background Color","borderColor":"Border Color","data":"Data","header":"Header","yes":"Yes","no":"No","invalidWidth":"Cell width must be a number.","invalidHeight":"Cell height must be a number.","invalidRowSpan":"Rows span must be a whole number.","invalidColSpan":"Columns span must be a whole number.","chooseColor":"Choose"},"cellPad":"គែមសែល","cellSpace":"គំលាតសែល","column":{"menu":"Column","insertBefore":"Insert Column Before","insertAfter":"Insert Column After","deleteColumn":"លប់ជួរឈរ"},"columns":"ជួរឈរ","deleteTable":"លប់តារាង","headers":"Headers","headersBoth":"Both","headersColumn":"First column","headersNone":"None","headersRow":"First Row","invalidBorder":"Border size must be a number.","invalidCellPadding":"Cell padding must be a positive number.","invalidCellSpacing":"Cell spacing must be a positive number.","invalidCols":"Number of columns must be a number greater than 0.","invalidHeight":"Table height must be a number.","invalidRows":"Number of rows must be a number greater than 0.","invalidWidth":"Table width must be a number.","menu":"ការកំណត់ តារាង","row":{"menu":"Row","insertBefore":"Insert Row Before","insertAfter":"Insert Row After","deleteRow":"លប់ជួរផ្តេក"},"rows":"ជួរផ្តេក","summary":"សេចក្តីសង្ខេប","title":"ការកំណត់ តារាង","toolbar":"តារាង","widthPc":"ភាគរយ","widthPx":"ភីកសែល","widthUnit":"width unit"},"undo":{"redo":"ធ្វើឡើងវិញ","undo":"សារឡើងវិញ"},"wsc":{"btnIgnore":"មិនផ្លាស់ប្តូរ","btnIgnoreAll":"មិនផ្លាស់ប្តូរ ទាំងអស់","btnReplace":"ជំនួស","btnReplaceAll":"ជំនួសទាំងអស់","btnUndo":"សារឡើងវិញ","changeTo":"ផ្លាស់ប្តូរទៅ","errorLoading":"Error loading application service host: %s.","ieSpellDownload":"ពុំមានកម្មវិធីពិនិត្យអក្ខរាវិរុទ្ធ ។ តើចង់ទាញយកពីណា?","manyChanges":"ការពិនិត្យអក្ខរាវិរុទ្ធបានចប់: %1 ពាក្យបានផ្លាស់ប្តូរ","noChanges":"ការពិនិត្យអក្ខរាវិរុទ្ធបានចប់: ពុំមានផ្លាស់ប្តូរ","noMispell":"ការពិនិត្យអក្ខរាវិរុទ្ធបានចប់: គ្មានកំហុស","noSuggestions":"- គ្មានសំណើរ -","notAvailable":"Sorry, but service is unavailable now.","notInDic":"គ្មានក្នុងវចនានុក្រម","oneChange":"ការពិនិត្យអក្ខរាវិរុទ្ធបានចប់: ពាក្យមួយត្រូចបានផ្លាស់ប្តូរ","progress":"កំពុងពិនិត្យអក្ខរាវិរុទ្ធ...","title":"Spell Check","toolbar":"ពិនិត្យអក្ខរាវិរុទ្ធ"}}; | PypiClean |
/django-tequila-4.0.0.tar.gz/django-tequila-4.0.0/sample_app/python3-8-django-2/django_tequila_app/settings.py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+jnvf=z69!l@j(^*^h*+fdlt_61!1v446znccb)9%@tjiw1@x-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
'django_extensions',
'sslserver',
'django_tequila',
'django_tequila_app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django_tequila.middleware.TequilaMiddleware',
]
ROOT_URLCONF = 'django_tequila_app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_tequila_app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'django_tequila_app', 'database.db'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# Django-tequila specifics
AUTH_USER_MODEL = 'django_tequila_app.User'
AUTHENTICATION_BACKENDS = ('django_tequila.django_backend.TequilaBackend',)
TEQUILA_SERVICE_NAME = "django_tequila_service"
TEQUILA_SERVER_URL = "https://tequila.epfl.ch"
TEQUILA_NEW_USER_INACTIVE = False
TEQUILA_CLEAN_URL = True
TEQUILA_STRONG_AUTHENTICATION = False
TEQUILA_ALLOWED_REQUEST_HOSTS = None
TEQUILA_ALLOW_GUESTS = False
TEQUILA_CUSTOM_USERNAME_ATTRIBUTE = 'uniqueid'
LOGIN_URL = "/login"
LOGIN_REDIRECT_URL = "/"
LOGOUT_URL = "/"
LOGIN_REDIRECT_IF_NOT_ALLOWED = "/not_allowed"
LOGIN_REDIRECT_TEXT_IF_NOT_ALLOWED = "Not allowed : please contact your admin"
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
},
'loggers': {
'django_tequila': {
'handlers': ['console'],
'level': 'DEBUG',
},
},
}
REMOTE_SELENIUM_SERVER = 'http://selenium:4444/wd/hub' | PypiClean |
/bci-essentials-0.0.6.tar.gz/bci-essentials-0.0.6/bci_essentials/classification/mi_classifier.py | import os
import sys
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from pyriemann.preprocessing import Whitening
from pyriemann.estimation import Covariances
from pyriemann.classification import MDM, TSclassifier
from pyriemann.channelselection import FlatChannelRemover, ElectrodeSelection
# Custom libraries
# - Append higher directory to import bci_essentials
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),os.pardir))
from classification.generic_classifier import Generic_classifier
from bci_essentials.visuals import *
from bci_essentials.signal_processing import *
from bci_essentials.channel_selection import *
class MI_classifier(Generic_classifier):
def set_mi_classifier_settings(self, n_splits=5, type="TS", remove_flats=False, whitening=False, covariance_estimator="scm", artifact_rejection="none", channel_selection="none", pred_threshold=0.5, random_seed = 42, n_jobs=1):
# Build the cross-validation split
self.n_splits = n_splits
self.cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=random_seed)
self.covariance_estimator = covariance_estimator
# Shrinkage LDA
if type == "sLDA":
slda = LinearDiscriminantAnalysis(solver='eigen',shrinkage='auto')
self.clf_model = Pipeline([("Shrinkage LDA", slda)])
self.clf = Pipeline([("Shrinkage LDA", slda)])
# Random Forest
elif type == "RandomForest":
rf = RandomForestClassifier()
self.clf_model = Pipeline([("Random Forest", rf)])
self.clf = Pipeline([("Random Forest", rf)])
# Tangent Space Logistic Regression
elif type == "TS":
ts = TSclassifier()
self.clf_model = Pipeline([("Tangent Space", ts)])
self.clf = Pipeline([("Tangent Space", ts)])
# Minimum Distance to Mean
elif type == "MDM":
mdm = MDM(metric=dict(mean='riemann', distance='riemann'), n_jobs = n_jobs)
self.clf_model = Pipeline([("MDM", mdm)])
self.clf = Pipeline([("MDM", mdm)])
# CSP + Logistic Regression (REQUIRES MNE CSP)
# elif type == "CSP-LR":
# lr = LogisticRegression()
# self.clf_model = Pipeline([('CSP', csp), ('LogisticRegression', lr)])
# self.clf = Pipeline([('CSP', csp), ('LogisticRegression', lr)])
else:
print("Classifier type not defined")
if artifact_rejection == "potato":
print("Potato not implemented")
# self.clf_model.steps.insert(0, ["Riemannian Potato", Potato()])
# self.clf.steps.insert(0, ["Riemannian Potato", Potato()])
if whitening == True:
self.clf_model.steps.insert(0, ["Whitening", Whitening()])
self.clf.steps.insert(0, ["Whitening", Whitening()])
if channel_selection == "riemann":
rcs = ElectrodeSelection()
self.clf_model.steps.insert(0, ["Channel Selection", rcs])
self.clf.steps.insert(0, ["Channel Selection", rcs])
if remove_flats:
rf = FlatChannelRemover()
self.clf_model.steps.insert(0, ["Remove Flat Channels", rf])
self.clf.steps.insert(0, ["Remove Flat Channels", rf])
# Threshold
self.pred_threshold = pred_threshold
# Rebuild from scratch with each training
self.rebuild = True
def fit(self, print_fit=True, print_performance=True):
# get dimensions
nwindows, nchannels, nsamples = self.X.shape
# do the rest of the training if train_free is false
self.X = np.array(self.X)
# Try rebuilding the classifier each time
if self.rebuild == True:
self.next_fit_window = 0
self.clf = self.clf_model
# get temporal subset
subX = self.X[self.next_fit_window:,:,:]
suby = self.y[self.next_fit_window:]
self.next_fit_window = nwindows
# Init predictions to all false
preds = np.zeros(nwindows)
def mi_kernel(subX, suby):
for train_idx, test_idx in self.cv.split(subX,suby):
self.clf = self.clf_model
X_train, X_test = subX[train_idx], subX[test_idx]
y_train, y_test = suby[train_idx], suby[test_idx]
# get the covariance matrices for the training set
X_train_cov = Covariances(estimator=self.covariance_estimator).transform(X_train)
X_test_cov = Covariances(estimator=self.covariance_estimator).transform(X_test)
# fit the classsifier
self.clf.fit(X_train_cov, y_train)
preds[test_idx] = self.clf.predict(X_test_cov)
accuracy = sum(preds == self.y)/len(preds)
precision = precision_score(self.y,preds, average = 'micro')
recall = recall_score(self.y, preds, average = 'micro')
model = self.clf
return model, preds, accuracy, precision, recall
# Check if channel selection is true
if self.channel_selection_setup:
print("Doing channel selection")
updated_subset, updated_model, preds, accuracy, precision, recall = channel_selection_by_method(mi_kernel, self.X, self.y, self.channel_labels, # kernel setup
self.chs_method, self.chs_metric, self.chs_initial_subset, # wrapper setup
self.chs_max_time, self.chs_min_channels, self.chs_max_channels, self.chs_performance_delta, # stopping criterion
self.chs_n_jobs, self.chs_output)
# channel_selection_by_method(mi_kernel, subX, suby, self.channel_labels, method=self.chs_method, max_time=self.chs_max_time, metric="accuracy", n_jobs=-1)
print("The optimal subset is ", updated_subset)
self.subset = updated_subset
self.clf = updated_model
else:
print("Not doing channel selection")
self.clf, preds, accuracy, precision, recall = mi_kernel(subX, suby)
# Print performance stats
self.offline_window_count = nwindows
self.offline_window_counts.append(self.offline_window_count)
# accuracy
accuracy = sum(preds == self.y)/len(preds)
self.offline_accuracy.append(accuracy)
if print_performance:
print("accuracy = {}".format(accuracy))
# precision
precision = precision_score(self.y, preds, average = 'micro')
self.offline_precision.append(precision)
if print_performance:
print("precision = {}".format(precision))
# recall
recall = recall_score(self.y, preds, average = 'micro')
self.offline_recall.append(recall)
if print_performance:
print("recall = {}".format(recall))
# confusion matrix in command line
cm = confusion_matrix(self.y, preds)
self.offline_cm = cm
if print_performance:
print("confusion matrix")
print(cm)
def predict(self, X, print_predict=True):
# if X is 2D, make it 3D with one as first dimension
if len(X.shape) < 3:
X = X[np.newaxis, ...]
X = self.get_subset(X)
# Troubleshooting
#X = self.X[-6:,:,:]
if print_predict:
print("the shape of X is", X.shape)
X_cov = Covariances(estimator=self.covariance_estimator).transform(X)
#X_cov = X_cov[0,:,:]
pred = self.clf.predict(X_cov)
pred_proba = self.clf.predict_proba(X_cov)
if print_predict:
print(pred)
print(pred_proba)
for i in range(len(pred)):
self.predictions.append(pred[i])
self.pred_probas.append(pred_proba[i])
# add a threhold
#pred = (pred_proba[:] >= self.pred_threshold).astype(int) # set threshold as 0.3
#print(pred.shape)
# print(pred)
# for p in pred:
# p = int(p)
# print(p)
# print(pred)
# pred = str(pred).replace(".", ",")
return pred | PypiClean |
/python-rpm-spec-0.14.1.tar.gz/python-rpm-spec-0.14.1/pyrpm/spec.py | import os
import re
import sys
from warnings import warn
from abc import ABCMeta, abstractmethod
from typing import Any, AnyStr, Dict, List, Optional, Union, Tuple, Type, cast
if sys.version_info < (3, 7):
re.Pattern = Any
re.Match = Any
__all__ = ["Spec", "replace_macros", "Package", "warnings_enabled"]
# Set this to True if you want the library to issue warnings during parsing.
warnings_enabled: bool = False
class _Tag(metaclass=ABCMeta):
def __init__(self, name: str, pattern_obj: re.Pattern, attr_type: Type[Any]) -> None:
self.name = name
self.pattern_obj = pattern_obj
self.attr_type = attr_type
def test(self, line: str) -> Optional[re.Match]:
return re.search(self.pattern_obj, line)
def update(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Any:
"""Update given spec object and parse context and return them again.
:param spec_obj: An instance of Spec class
:param context: The parse context
:param match_obj: The re.match object
:param line: The original line
:return: Given updated Spec instance and parse context dictionary.
"""
assert spec_obj
assert context
assert match_obj
assert line
return self.update_impl(spec_obj, context, match_obj, line)
@abstractmethod
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
pass
@staticmethod
def current_target(spec_obj: "Spec", context: Dict[str, Any]) -> Union["Spec", "Package"]:
target_obj = spec_obj
if context["current_subpackage"] is not None:
target_obj = context["current_subpackage"]
return target_obj
class _NameValue(_Tag):
"""Parse a simple name → value tag."""
def __init__(self, name: str, pattern_obj: re.Pattern, attr_type: Optional[Type[Any]] = None) -> None:
super().__init__(name, pattern_obj, cast(Type[Any], attr_type if attr_type else str))
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
if self.name == "changelog":
context["current_subpackage"] = None
target_obj = _Tag.current_target(spec_obj, context)
value = match_obj.group(1)
# Sub-packages
if self.name == "name":
spec_obj.packages = []
spec_obj.packages.append(Package(value))
if self.name in ["description", "changelog"]:
context["multiline"] = self.name
else:
setattr(target_obj, self.name, self.attr_type(value))
return spec_obj, context
class _SetterMacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, str)
@abstractmethod
def get_namespace(self, spec_obj: "Spec", context: Dict[str, Any]) -> "Spec":
raise NotImplementedError()
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
name, value = match_obj.groups()
setattr(self.get_namespace(spec_obj, context), name, str(value))
return spec_obj, context
class _GlobalMacroDef(_SetterMacroDef):
"""Parse global macro definitions."""
def get_namespace(self, spec_obj: "Spec", context: Dict[str, Any]) -> "Spec":
return spec_obj
class _LocalMacroDef(_SetterMacroDef):
"""Parse define macro definitions."""
def get_namespace(self, spec_obj: "Spec", context: Dict[str, Any]) -> "Spec":
return context["current_subpackage"]
class _MacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, str)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
name, value = match_obj.groups()
spec_obj.macros[name] = str(value)
if name not in _tag_names:
# Also make available as attribute of spec object
setattr(spec_obj, name, str(value))
return spec_obj, context
class _List(_Tag):
"""Parse a tag that expands to a list."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
target_obj = _Tag.current_target(spec_obj, context)
if not hasattr(target_obj, self.name):
setattr(target_obj, self.name, [])
value = match_obj.group(1)
if self.name == "packages":
if value == "-n":
subpackage_name = re.split(r"\s+", line)[-1].rstrip()
else:
subpackage_name = f"{spec_obj.name}-{value}"
package = Package(subpackage_name)
context["current_subpackage"] = package
package.is_subpackage = True
spec_obj.packages.append(package)
elif self.name in [
"build_requires",
"requires",
"conflicts",
"obsoletes",
"provides",
]:
# Remove comments on same line
value = value.split("#", 2)[0].rstrip()
# Macros are valid in requirements
value = replace_macros(value, spec=spec_obj)
# It's also legal to do:
# Requires: a b c
# Requires: b >= 3.1
# Requires: a, b >= 3.1, c
# 1. Tokenize
tokens = [val for val in re.split("[\t\n, ]", value) if val != ""]
values: List[str] = []
# 2. Join
add = False
for val in tokens:
if add:
add = False
val = values.pop() + " " + val
elif val in [">=", "!=", ">", "<", "<=", "==", "="]:
add = True # Add next value to this one
val = values.pop() + " " + val
values.append(val)
for val in values:
requirement = Requirement(val)
getattr(target_obj, self.name).append(requirement)
else:
getattr(target_obj, self.name).append(value)
return spec_obj, context
class _ListAndDict(_Tag):
"""Parse a tag that expands to a list and to a dict."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, list)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
source_name, value = match_obj.groups()
dictionary = getattr(spec_obj, f"{self.name}_dict")
dictionary[source_name] = value
target_obj = _Tag.current_target(spec_obj, context)
# If we are in a subpackage, add sources and patches to the subpackage dicts as well
if hasattr(target_obj, "is_subpackage") and target_obj.is_subpackage:
dictionary = getattr(target_obj, f"{self.name}_dict")
dictionary[source_name] = value
getattr(target_obj, self.name).append(value)
getattr(spec_obj, self.name).append(value)
return spec_obj, context
class _SplitValue(_NameValue):
"""Parse a (name->value) tag, and at the same time split the tag to a list."""
def __init__(self, name: str, pattern_obj: re.Pattern, sep: Optional[None] = None) -> None:
super().__init__(name, pattern_obj)
self.name_list = f"{name}_list"
self.sep = sep
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], match_obj: re.Match, line: str) -> Tuple["Spec", dict]:
super().update_impl(spec_obj, context, match_obj, line)
target_obj = _Tag.current_target(spec_obj, context)
value: str = getattr(target_obj, self.name)
values = value.split(self.sep)
setattr(target_obj, self.name_list, values)
return spec_obj, context
def re_tag_compile(tag: AnyStr) -> re.Pattern:
return re.compile(tag, re.IGNORECASE)
class _DummyMacroDef(_Tag):
"""Parse global macro definitions."""
def __init__(self, name: str, pattern_obj: re.Pattern) -> None:
super().__init__(name, pattern_obj, str)
def update_impl(self, spec_obj: "Spec", context: Dict[str, Any], _: re.Match, line: str) -> Tuple["Spec", dict]:
context["line_processor"] = None
if warnings_enabled:
warn("Unknown macro: " + line)
return spec_obj, context
_tags = [
_NameValue("name", re_tag_compile(r"^Name\s*:\s*(\S+)")),
_NameValue("version", re_tag_compile(r"^Version\s*:\s*(\S+)")),
_NameValue("epoch", re_tag_compile(r"^Epoch\s*:\s*(\S+)")),
_NameValue("release", re_tag_compile(r"^Release\s*:\s*(\S+)")),
_NameValue("summary", re_tag_compile(r"^Summary\s*:\s*(.+)")),
_NameValue("description", re_tag_compile(r"^%description\s*(\S*)")),
_NameValue("changelog", re_tag_compile(r"^%changelog\s*(\S*)")),
_NameValue("license", re_tag_compile(r"^License\s*:\s*(.+)")),
_NameValue("group", re_tag_compile(r"^Group\s*:\s*(.+)")),
_NameValue("url", re_tag_compile(r"^URL\s*:\s*(\S+)")),
_NameValue("buildroot", re_tag_compile(r"^BuildRoot\s*:\s*(\S+)")),
_SplitValue("buildarch", re_tag_compile(r"^BuildArch\s*:\s*(\S+)")),
_SplitValue("excludearch", re_tag_compile(r"^ExcludeArch\s*:\s*(.+)")),
_SplitValue("exclusivearch", re_tag_compile(r"^ExclusiveArch\s*:\s*(.+)")),
_ListAndDict("sources", re_tag_compile(r"^(Source\d*\s*):\s*(.+)")),
_ListAndDict("patches", re_tag_compile(r"^(Patch\d*\s*):\s*(\S+)")),
_List("build_requires", re_tag_compile(r"^BuildRequires\s*:\s*(.+)")),
_List("requires", re_tag_compile(r"^Requires\s*:\s*(.+)")),
_List("conflicts", re_tag_compile(r"^Conflicts\s*:\s*(.+)")),
_List("obsoletes", re_tag_compile(r"^Obsoletes\s*:\s*(.+)")),
_List("provides", re_tag_compile(r"^Provides\s*:\s*(.+)")),
_List("packages", re_tag_compile(r"^%package\s+(\S+)")),
_MacroDef("define", re_tag_compile(r"^%define\s+(\S+)\s+(\S+)")),
_MacroDef("global", re_tag_compile(r"^%global\s+(\S+)\s+(\S+)")),
_DummyMacroDef("dummy", re_tag_compile(r"^%[a-z_]+\b.*$")),
]
_tag_names = [tag.name for tag in _tags]
_macro_pattern = re.compile(r"%{(\S+?)\}|%(\w+?)\b")
def _parse(spec_obj: "Spec", context: Dict[str, Any], line: str) -> Any:
for tag in _tags:
match = tag.test(line)
if match:
if "multiline" in context:
context.pop("multiline", None)
return tag.update(spec_obj, context, match, line)
if "multiline" in context:
target_obj = _Tag.current_target(spec_obj, context)
previous_txt = getattr(target_obj, context["multiline"], "")
if previous_txt is None:
previous_txt = ""
setattr(target_obj, context["multiline"], str(previous_txt) + line + os.linesep)
return spec_obj, context
class Requirement:
"""Represents a single requirement or build requirement in an RPM spec file.
Each spec file contains one or more requirements or build requirements.
For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
BuildRequires: gstreamer%{?_isa} >= 0.1.0
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
This spec file's requirements have a name and either a required or minimum
version.
"""
expr = re.compile(r"(.*?)\s+([<>]=?|=)\s+(\S+)")
def __init__(self, name: str) -> None:
assert isinstance(name, str)
self.line = name
self.name: str
self.operator: Optional[str]
self.version: Optional[str]
match = Requirement.expr.match(name)
if match:
self.name = match.group(1)
self.operator = match.group(2)
self.version = match.group(3)
else:
self.name = name
self.operator = None
self.version = None
def __eq__(self, o: object) -> bool:
if isinstance(o, str):
return self.line == o
if isinstance(o, Requirement):
return self.name == o.name and self.operator == o.operator and self.version == o.version
return False
def __repr__(self) -> str:
return f"Requirement('{self.line}')"
class Package:
"""Represents a single package in a RPM spec file.
Each spec file describes at least one package and can contain one or more subpackages (described
by the %package directive). For example, consider following spec file::
Name: foo
Version: 0.1
%description
%{name} is the library that everyone needs.
%package devel
Summary: Header files, libraries and development documentation for %{name}
Group: Development/Libraries
Requires: %{name}%{?_isa} = %{version}-%{release}
%description devel
This package contains the header files, static libraries, and development
documentation for %{name}. If you like to develop programs using %{name}, you
will need to install %{name}-devel.
%package -n bar
Summary: A command line client for foo.
License: GPLv2+
%description -n bar
This package contains a command line client for foo.
This spec file will create three packages:
* A package named foo, the base package.
* A package named foo-devel, a subpackage.
* A package named bar, also a subpackage, but without the foo- prefix.
As you can see above, the name of a subpackage normally includes the main package name. When the
-n option is added to the %package directive, the prefix of the base package name is omitted and
a completely new name is used.
"""
def __init__(self, name: str) -> None:
assert isinstance(name, str)
for tag in _tags:
if tag.attr_type is list and tag.name in [
"build_requires",
"requires",
"conflicts",
"obsoletes",
"provides",
"sources",
"patches",
]:
setattr(self, tag.name, tag.attr_type())
elif tag.name in [
"description",
]:
setattr(self, tag.name, None)
self.sources_dict: Dict[str, str] = {}
self.patches_dict: Dict[str, str] = {}
self.name = name
self.is_subpackage = False
def __repr__(self) -> str:
return f"Package('{self.name}')"
class Spec:
"""Represents a single spec file."""
def __init__(self) -> None:
for tag in _tags:
if tag.attr_type is list:
setattr(self, tag.name, tag.attr_type())
else:
setattr(self, tag.name, None)
self.sources_dict: Dict[str, str] = {}
self.patches_dict: Dict[str, str] = {}
self.macros: Dict[str, str] = {}
self.name: Optional[str]
self.packages: List[Package] = []
@property
def packages_dict(self) -> Dict[str, Package]:
"""All packages in this RPM spec as a dictionary.
You can access the individual packages by their package name, e.g.,
git_spec.packages_dict['git-doc']
"""
assert self.packages
return dict(zip([package.name for package in self.packages], self.packages))
@classmethod
def from_file(cls, filename: str) -> "Spec":
"""Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object.
"""
spec = cls()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line.rstrip())
return spec
@classmethod
def from_string(cls, string: str) -> "Spec":
"""Creates a new Spec object from a given string.
:param string: The contents of a spec file.
:return: A new Spec object.
"""
spec = cls()
parse_context = {"current_subpackage": None}
for line in string.splitlines():
spec, parse_context = _parse(spec, parse_context, line)
return spec
def replace_macros(string: str, spec: Spec) -> str:
"""Replace all macros in given string with corresponding values.
For example, a string '%{name}-%{version}.tar.gz' will be transformed to 'foo-2.0.tar.gz'.
:param string A string containing macros that you want to be replaced.
:param spec A Spec object. Definitions in that spec file will be used to replace macros.
:return A string where all macros in given input are substituted as good as possible.
"""
assert isinstance(spec, Spec)
def get_first_non_none_value(values: Tuple[Any, ...]) -> Any:
return next((v for v in values if v is not None), None)
def is_conditional_macro(macro: str) -> bool:
return macro.startswith(("?", "!"))
def is_optional_macro(macro: str) -> bool:
return macro.startswith("?")
def is_negation_macro(macro: str) -> bool:
return macro.startswith("!")
def get_replacement_string(match: re.Match) -> str:
# pylint: disable=too-many-return-statements
groups = match.groups()
macro_name: str = get_first_non_none_value(groups)
assert macro_name, "Expected a non None value"
if is_conditional_macro(macro_name) and spec:
parts = macro_name[1:].split(sep=":", maxsplit=1)
assert parts, "Expected a ':' in macro name'"
macro = parts[0]
if is_optional_macro(macro_name):
if hasattr(spec, macro) or macro in spec.macros:
if len(parts) == 2:
return parts[1]
if macro in spec.macros:
return spec.macros[macro]
if hasattr(spec, macro):
return getattr(spec, macro)
assert False, "Unreachable"
return ""
if is_negation_macro(macro_name):
if len(parts) == 2:
return parts[1]
return spec.macros.get(macro, getattr(spec, macro))
if spec:
value = spec.macros.get(macro_name, getattr(spec, macro_name, None))
if value:
return str(value)
return match.string[match.start() : match.end()]
# Recursively expand macros
# Note: If macros are not defined in the spec file, this won't try to
# expand them.
while True:
ret = re.sub(_macro_pattern, get_replacement_string, string)
if ret != string:
string = ret
continue
return ret | PypiClean |
/redomino.odttransforms-0.4.zip/redomino.odttransforms-0.4/README.rst | redomino.odttransforms
======================
It register a new portal transforms that let you generate odt files for a given .odt template.
This products is very simple: it just interpolates odt variables with the given ones you pass calling the transformer.
Therefore it performs just variable substitutions, not a real odt templating processor.
No external bynaries are needed, it depends on http://ooopy.sourceforge.net/.
Tested with ooopy version 1.8.10901.
This plugin is meant for developers, it could be used for generating odt files, write a custom PloneFormGen adapter, etc.
Usage
-----
Example::
>>> from zope.component import getUtility
>>> from Products.PortalTransforms.interfaces import IPortalTransformsTool
>>> file_contents = open('your odt file with variables').read() # see redomino/odttransforms/tests/input.odt
>>> portal_transforms = getUtility(IPortalTransformsTool)
>>> converter = portal_transforms.convertTo(target_mimetype='application/vnd.oasis.opendocument.text.transformed',
>>> orig=file_contents,
>>> mimetype='application/vnd.oasis.opendocument.text',
>>> mapper=dict(plone_version='4.3.2-sunny-day-beta'),
>>> )
>>> transformed_odt_contents = converter.getData()
Tests
-----
Test status:
.. image:: https://secure.travis-ci.org/redomino/redomino.odttransforms.png
:target: https://travis-ci.org/redomino/redomino.odttransforms
How to launch tests::
$ ./bin/test -m redomino.odttransforms
Authors
-------
* Davide Moro <[email protected]>
| PypiClean |
/nnisgf-0.4-py3-none-manylinux1_x86_64.whl/nnisgf-0.4.data/data/nni/node_modules/azure-storage/lib/common/services/storageserviceclient.js | //
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Module dependencies.
var request = require('../request-wrapper');
var url = require('url');
var qs = require('querystring');
var util = require('util');
var xml2js = require('xml2js');
var events = require('events');
var _ = require('underscore');
var guid = require('uuid');
var os = require('os');
var extend = require('extend');
var Parser = require('json-edm-parser');
var Md5Wrapper = require('../md5-wrapper');
var azureutil = require('../util/util');
var validate = require('../util/validate');
var SR = require('../util/sr');
var WebResource = require('../http/webresource');
var BufferStream = require('../streams/bufferstream.js');
var ServiceSettings = require('./servicesettings');
var StorageServiceSettings = require('./storageservicesettings');
var Constants = require('../util/constants');
var StorageUtilities = require('../util/storageutilities');
var ServicePropertiesResult = require('../models/servicepropertiesresult');
var TableUtilities = require('../../services/table/tableutilities');
var SharedKey = require('../signing/sharedkey');
var SharedAccessSignature = require('../signing/sharedaccesssignature');
var TokenSigner = require('../signing/tokensigner');
var HeaderConstants = Constants.HeaderConstants;
var QueryStringConstants = Constants.QueryStringConstants;
var HttpResponseCodes = Constants.HttpConstants.HttpResponseCodes;
var StorageServiceClientConstants = Constants.StorageServiceClientConstants;
var defaultRequestLocationMode = Constants.RequestLocationMode.PRIMARY_ONLY;
var RequestLocationMode = Constants.RequestLocationMode;
var Logger = require('../diagnostics/logger');
var errors = require('../errors/errors');
var ArgumentError = errors.ArgumentError;
var ArgumentNullError = errors.ArgumentNullError;
var TimeoutError = errors.TimeoutError;
var StorageError = errors.StorageError;
/**
* Creates a new StorageServiceClient object.
*
* @class
* The StorageServiceClient class is the base class of all the service classes.
* @constructor
* @param {string} storageAccount The storage account.
* @param {string} storageAccessKey The storage access key.
* @param {object} host The host for the service.
* @param {bool} usePathStyleUri Boolean value indicating wether to use path style uris.
* @param {string} sas The Shared Access Signature string.
* @param {TokenCredential} [token] The {@link TokenCredential} object, which can be created with an OAuth access token string.
*/
function StorageServiceClient(storageAccount, storageAccessKey, host, usePathStyleUri, sas, token) {
StorageServiceClient['super_'].call(this);
if(storageAccount && storageAccessKey) {
// account and key
this.storageAccount = storageAccount;
this.storageAccessKey = storageAccessKey;
this.storageCredentials = new SharedKey(this.storageAccount, this.storageAccessKey, usePathStyleUri);
} else if (sas) {
// sas
this.sasToken = sas;
this.storageCredentials = new SharedAccessSignature(sas);
} else if (token) {
// access token
this.token = token;
this.storageCredentials = new TokenSigner(token);
} else {
// anonymous
this.anonymous = true;
this.storageCredentials = {
signRequest: function(webResource, callback){
// no op, anonymous access
callback(null);
}
};
}
if(host){
this.setHost(host);
}
this.apiVersion = HeaderConstants.TARGET_STORAGE_VERSION;
this.usePathStyleUri = usePathStyleUri;
this._initDefaultFilter();
/**
* The logger of the service. To change the log level of the services, set the `[logger.level]{@link Logger#level}`.
* @name StorageServiceClient#logger
* @type Logger
* */
this.logger = new Logger(Logger.LogLevels.INFO);
this._setDefaultProxy();
this.xml2jsSettings = StorageServiceClient._getDefaultXml2jsSettings();
this.defaultLocationMode = StorageUtilities.LocationMode.PRIMARY_ONLY;
}
util.inherits(StorageServiceClient, events.EventEmitter);
/**
* Gets the default xml2js settings.
* @ignore
* @return {object} The default settings
*/
StorageServiceClient._getDefaultXml2jsSettings = function() {
var xml2jsSettings = _.clone(xml2js.defaults['0.2']);
// these determine what happens if the xml contains attributes
xml2jsSettings.attrkey = Constants.TableConstants.XML_METADATA_MARKER;
xml2jsSettings.charkey = Constants.TableConstants.XML_VALUE_MARKER;
// from xml2js guide: always put child nodes in an array if true; otherwise an array is created only if there is more than one.
xml2jsSettings.explicitArray = false;
return xml2jsSettings;
};
/**
* Sets a host for the service.
* @ignore
* @param {string} host The host for the service.
*/
StorageServiceClient.prototype.setHost = function (host) {
var parseHost = function(hostUri){
var parsedHost;
if(!azureutil.objectIsNull(hostUri)) {
if(hostUri.indexOf('http') === -1 && hostUri.indexOf('//') !== 0){
hostUri = '//' + hostUri;
}
parsedHost = url.parse(hostUri, false, true);
if(!parsedHost.protocol){
parsedHost.protocol = ServiceSettings.DEFAULT_PROTOCOL;
}
if (!parsedHost.port) {
if (parsedHost.protocol === Constants.HTTPS) {
parsedHost.port = Constants.DEFAULT_HTTPS_PORT;
} else {
parsedHost.port = Constants.DEFAULT_HTTP_PORT;
}
}
parsedHost = url.format({
protocol: parsedHost.protocol,
port: parsedHost.port,
hostname: parsedHost.hostname,
pathname: parsedHost.pathname
});
}
return parsedHost;
};
validate.isValidHost(host);
this.host = {
primaryHost: parseHost(host.primaryHost),
secondaryHost: parseHost(host.secondaryHost)
};
};
/**
* Performs a REST service request through HTTP expecting an input stream.
* @ignore
*
* @param {WebResource} webResource The webresource on which to perform the request.
* @param {string} outputData The outgoing request data as a raw string.
* @param {object} [options] The request options.
* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {function} callback The response callback function.
*/
StorageServiceClient.prototype.performRequest = function (webResource, outputData, options, callback) {
this._performRequest(webResource, { outputData: outputData }, options, callback);
};
/**
* Performs a REST service request through HTTP expecting an input stream.
* @ignore
*
* @param {WebResource} webResource The webresource on which to perform the request.
* @param {Stream} outputStream The outgoing request data as a stream.
* @param {object} [options] The request options.
* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {function} callback The response callback function.
*/
StorageServiceClient.prototype.performRequestOutputStream = function (webResource, outputStream, options, callback) {
this._performRequest(webResource, { outputStream: outputStream }, options, callback);
};
/**
* Performs a REST service request through HTTP expecting an input stream.
* @ignore
*
* @param {WebResource} webResource The webresource on which to perform the request.
* @param {string} outputData The outgoing request data as a raw string.
* @param {Stream} inputStream The ingoing response data as a stream.
* @param {object} [options] The request options.
* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {function} callback The response callback function.
*/
StorageServiceClient.prototype.performRequestInputStream = function (webResource, outputData, inputStream, options, callback) {
this._performRequest(webResource, { outputData: outputData, inputStream: inputStream }, options, callback);
};
/**
* Performs a REST service request through HTTP.
* @ignore
*
* @param {WebResource} webResource The webresource on which to perform the request.
* @param {object} body The request body.
* @param {string} [body.outputData] The outgoing request data as a raw string.
* @param {Stream} [body.outputStream] The outgoing request data as a stream.
* @param {Stream} [body.inputStream] The ingoing response data as a stream.
* @param {object} [options] The request options.
* @param {string} [options.clientRequestId] A string that represents the client request ID with a 1KB character limit.
* @param {int} [options.timeoutIntervalInMs] The timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {function} callback The response callback function.
*/
StorageServiceClient.prototype._performRequest = function (webResource, body, options, callback) {
var self = this;
// Sets a requestId on the webResource
if(!options.clientRequestId) {
options.clientRequestId = guid.v1();
}
webResource.withHeader(HeaderConstants.CLIENT_REQUEST_ID, options.clientRequestId);
// Sets the user-agent string if the process is not started by the browser
if(!process.browser) {
var userAgentComment = util.format('(NODE-VERSION %s; %s %s)', process.version, os.type(), os.release());
webResource.withHeader(HeaderConstants.USER_AGENT, Constants.USER_AGENT_PRODUCT_NAME + '/' + Constants.USER_AGENT_PRODUCT_VERSION + ' ' + userAgentComment);
}
// Initialize the location that the request is going to be sent to.
if(azureutil.objectIsNull(options.locationMode)) {
options.locationMode = this.defaultLocationMode;
}
// Initialize the location that the request can be sent to.
if(azureutil.objectIsNull(options.requestLocationMode)) {
options.requestLocationMode = defaultRequestLocationMode;
}
// Initialize whether nagling is used or not.
if(azureutil.objectIsNull(options.useNagleAlgorithm)) {
options.useNagleAlgorithm = this.useNagleAlgorithm;
}
this._initializeLocation(options);
// Initialize the operationExpiryTime
this._setOperationExpiryTime(options);
// If the output stream already got sent to server and got error back,
// we should NOT retry within the SDK as the stream data is not valid anymore if we retry directly.
// And it's very hard for SDK to re-wind the stream.
//
// If users want to retry on this kind of error, they can implement their own logic to parse the response and
// determine if they need to re-prepare a stream and call our SDK API to retry.
//
// Currently for blobs/files with size greater than 32MB (DEFAULT_SINGLE_BLOB_PUT_THRESHOLD_IN_BYTES),
// we'll send the steam by chunk buffers which doesn't have this issue.
var outputStreamSent = false;
var operation = function (options, next) {
self._validateLocation(options);
var currentLocation = options.currentLocation;
self._buildRequestOptions(webResource, body, options, function (err, finalRequestOptions) {
if (err) {
callback({ error: err, response: null }, function (finalRequestOptions, finalCallback) {
finalCallback(finalRequestOptions);
});
} else {
self.logger.log(Logger.LogLevels.DEBUG, 'FINAL REQUEST OPTIONS:\n' + util.inspect(finalRequestOptions));
if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) {
callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null }, function (finalRequestOptions, finalCallback) {
finalCallback(finalRequestOptions);
});
} else {
var processResponseCallback = function (error, response) {
var responseObject;
if (error) {
responseObject = { error: error, response: null };
} else {
responseObject = self._processResponse(webResource, response, options);
responseObject.contentMD5 = response.contentMD5;
responseObject.length = response.length;
}
responseObject.operationEndTime = new Date();
// Required for listing operations to make sure successive operations go to the same location.
responseObject.targetLocation = currentLocation;
responseObject.outputStreamSent = outputStreamSent;
callback(responseObject, next);
};
var endResponse;
var buildRequest = function (headersOnly, inputStream) {
// Build request (if body was set before, request will process immediately, if not it'll wait for the piping to happen
var requestStream;
var requestWithDefaults;
if(self.proxy) {
if(requestWithDefaults === undefined) {
requestWithDefaults = request.defaults({'proxy':self.proxy});
}
} else {
requestWithDefaults = request;
}
if (headersOnly) {
requestStream = requestWithDefaults(finalRequestOptions);
requestStream.on('error', processResponseCallback);
requestStream.on('response', function (response) {
var isValid = WebResource.validResponse(response.statusCode);
if (!isValid) {
// When getting invalid response, try to get the error message for future steps to extract the detailed error information
var contentLength = parseInt(response.headers['content-length']);
var errorMessageBuffer;
var index = 0;
if (contentLength !== undefined) {
errorMessageBuffer = Buffer.alloc(contentLength);
}
requestStream.on('data', function (data) {
if (contentLength !== undefined) {
data.copy(errorMessageBuffer, index);
index += data.length;
} else {
if (!errorMessageBuffer) {
errorMessageBuffer = data;
} else {
errorMessageBuffer = Buffer.concat([errorMessageBuffer, data]);
}
}
});
requestStream.on('end', function () {
if (errorMessageBuffer) {
// Strip the UTF8 BOM following the same ways as 'request' module
if (errorMessageBuffer.length > 3 &&
errorMessageBuffer[0] === 239 &&
errorMessageBuffer[1] === 187 &&
errorMessageBuffer[2] === 191) {
response.body = errorMessageBuffer.toString('utf8', 3);
} else {
response.body = errorMessageBuffer.toString('utf8');
}
}
processResponseCallback(null, response);
});
} else {
// Only pipe to the destination stream when we get a valid response from service
// Error message should NOT be piped to the destination stream
if (inputStream) {
requestStream.pipe(inputStream);
}
var responseLength = 0;
var internalHash = new Md5Wrapper().createMd5Hash();
response.on('data', function(data) {
responseLength += data.length;
internalHash.update(data);
});
response.on('end', function () {
// Calculate and set MD5 here
if(azureutil.objectIsNull(options.disableContentMD5Validation) || options.disableContentMD5Validation === false) {
response.contentMD5 = internalHash.digest('base64');
}
response.length = responseLength;
endResponse = response;
});
}
});
} else {
requestStream = requestWithDefaults(finalRequestOptions, processResponseCallback);
}
//If useNagleAlgorithm is not set or the value is set and is false, setNoDelay is set to true.
if (azureutil.objectIsNull(options.useNagleAlgorithm) || options.useNagleAlgorithm === false) {
requestStream.on('request', function(httpRequest) {
httpRequest.setNoDelay(true);
});
}
// Workaround to avoid request from potentially setting unwanted (rejected) headers by the service
var oldEnd = requestStream.end;
requestStream.end = function () {
if (finalRequestOptions.headers['content-length']) {
requestStream.headers['content-length'] = finalRequestOptions.headers['content-length'];
} else if (requestStream.headers['content-length']) {
delete requestStream.headers['content-length'];
}
oldEnd.call(requestStream);
};
// Bubble events up -- This is when the request is going to be made.
requestStream.on('response', function (response) {
self.emit('receivedResponseEvent', response);
});
return requestStream;
};
if (body && body.outputData) {
if (!azureutil.isBrowser() && Buffer.isBuffer(body.outputData)) {
// Request module will take 200MB additional memory when we pass a 100MB buffer as body
// Transfer buffer to stream will highly reduce the memory used by request module
finalRequestOptions.body = new BufferStream(body.outputData);
} else {
finalRequestOptions.body = body.outputData;
}
}
// Pipe any input / output streams
if (body && body.inputStream) {
body.inputStream.on('close', function () {
if (endResponse) {
processResponseCallback(null, endResponse);
endResponse = null;
}
});
body.inputStream.on('end', function () {
if (endResponse) {
processResponseCallback(null, endResponse);
endResponse = null;
}
});
body.inputStream.on('finish', function () {
if (endResponse) {
processResponseCallback(null, endResponse);
endResponse = null;
}
});
buildRequest(true, body.inputStream);
} else if (body && body.outputStream) {
var sendUnchunked = function () {
var size = finalRequestOptions.headers['content-length'] ?
finalRequestOptions.headers['content-length'] :
Constants.BlobConstants.MAX_SINGLE_UPLOAD_BLOB_SIZE_IN_BYTES;
var concatBuf = Buffer.alloc(parseInt(size));
var index = 0;
body.outputStream.on('data', function (d) {
outputStreamSent = true;
if(self._maximumExecutionTimeExceeded(Date.now(), options.operationExpiryTime)) {
processResponseCallback(new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION));
} else {
d.copy(concatBuf, index);
index += d.length;
}
}).on('end', function () {
var requestStream = buildRequest();
requestStream.write(concatBuf);
requestStream.end();
});
if (azureutil.isStreamPaused(body.outputStream)) {
body.outputStream.resume();
}
};
var sendStream = function () {
// NOTE: workaround for an unexpected EPIPE exception when piping streams larger than 29 MB
if (!azureutil.objectIsNull(finalRequestOptions.headers['content-length']) && finalRequestOptions.headers['content-length'] < 29 * 1024 * 1024) {
body.outputStream.pipe(buildRequest());
outputStreamSent = true;
if (azureutil.isStreamPaused(body.outputStream)) {
body.outputStream.resume();
}
} else {
sendUnchunked();
}
};
if (!body.outputStream.readable) {
// if the content length is zero, build the request and don't send a body
if (finalRequestOptions.headers['content-length'] === 0) {
buildRequest();
} else {
// otherwise, wait until we know the readable stream is actually valid before piping
body.outputStream.on('open', function () {
sendStream();
});
}
} else {
sendStream();
}
// This catches any errors that happen while creating the readable stream (usually invalid names)
body.outputStream.on('error', function (error) {
processResponseCallback(error);
});
} else {
buildRequest();
}
}
}
});
};
// The filter will do what it needs to the requestOptions and will provide a
// function to be handled after the reply
self.filter(options, function (postFiltersRequestOptions, nextPostCallback) {
if(self._maximumExecutionTimeExceeded(Date.now() + postFiltersRequestOptions.retryInterval, postFiltersRequestOptions.operationExpiryTime)) {
callback({ error: new TimeoutError(SR.MAXIMUM_EXECUTION_TIMEOUT_EXCEPTION), response: null}, function (postFiltersRequestOptions, finalCallback) {
finalCallback(postFiltersRequestOptions);
});
} else {
// If there is a filter, flow is:
// filter -> operation -> process response
if(postFiltersRequestOptions.retryContext) {
var func = function() {
operation(postFiltersRequestOptions, nextPostCallback);
};
// Sleep for retryInterval before making the request
setTimeout(func, postFiltersRequestOptions.retryInterval);
} else {
// No retry policy filter specified
operation(postFiltersRequestOptions, nextPostCallback);
}
}
});
};
/**
* Builds the request options to be passed to the http.request method.
* @ignore
* @param {WebResource} webResource The webresource where to build the options from.
* @param {object} options The request options.
* @param {function(error, requestOptions)} callback The callback function.
*/
StorageServiceClient.prototype._buildRequestOptions = function (webResource, body, options, callback) {
webResource.withHeader(HeaderConstants.STORAGE_VERSION, this.apiVersion);
webResource.withHeader(HeaderConstants.MS_DATE, new Date().toUTCString());
if (!webResource.headers[HeaderConstants.ACCEPT]) {
webResource.withHeader(HeaderConstants.ACCEPT, 'application/atom+xml,application/xml');
}
webResource.withHeader(HeaderConstants.ACCEPT_CHARSET, 'UTF-8');
// Browsers cache GET/HEAD requests by adding conditional headers such as 'IF_MODIFIED_SINCE' after Azure Storage 'Authorization header' calculation,
// which may result in a 403 authorization error. So add timestamp to GET/HEAD request URLs thus avoid the browser cache.
if (azureutil.isBrowser() && (
webResource.method === Constants.HttpConstants.HttpVerbs.GET ||
webResource.method === Constants.HttpConstants.HttpVerbs.HEAD)) {
webResource.withQueryOption(HeaderConstants.FORCE_NO_CACHE_IN_BROWSER, new Date().getTime());
}
if(azureutil.objectIsNull(options.timeoutIntervalInMs)) {
options.timeoutIntervalInMs = this.defaultTimeoutIntervalInMs;
}
if(azureutil.objectIsNull(options.clientRequestTimeoutInMs)) {
options.clientRequestTimeoutInMs = this.defaultClientRequestTimeoutInMs;
}
if(!azureutil.objectIsNull(options.timeoutIntervalInMs) && options.timeoutIntervalInMs > 0) {
webResource.withQueryOption(QueryStringConstants.TIMEOUT, Math.ceil(options.timeoutIntervalInMs / 1000));
}
if(options.accessConditions) {
webResource.withHeader(HeaderConstants.IF_MATCH, options.accessConditions.EtagMatch);
webResource.withHeader(HeaderConstants.IF_MODIFIED_SINCE, options.accessConditions.DateModifedSince);
webResource.withHeader(HeaderConstants.IF_NONE_MATCH, options.accessConditions.EtagNonMatch);
webResource.withHeader(HeaderConstants.IF_UNMODIFIED_SINCE, options.accessConditions.DateUnModifiedSince);
webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_EQUAL, options.accessConditions.SequenceNumberEqual);
webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN, options.accessConditions.SequenceNumberLessThan);
webResource.withHeader(HeaderConstants.SEQUENCE_NUMBER_LESS_THAN_OR_EQUAL, options.accessConditions.SequenceNumberLessThanOrEqual);
webResource.withHeader(HeaderConstants.BLOB_CONDITION_MAX_SIZE, options.accessConditions.MaxBlobSize);
webResource.withHeader(HeaderConstants.BLOB_CONDITION_APPEND_POSITION, options.accessConditions.MaxAppendPosition);
}
if(options.sourceAccessConditions) {
webResource.withHeader(HeaderConstants.SOURCE_IF_MATCH, options.sourceAccessConditions.EtagMatch);
webResource.withHeader(HeaderConstants.SOURCE_IF_MODIFIED_SINCE, options.sourceAccessConditions.DateModifedSince);
webResource.withHeader(HeaderConstants.SOURCE_IF_NONE_MATCH, options.sourceAccessConditions.EtagNonMatch);
webResource.withHeader(HeaderConstants.SOURCE_IF_UNMODIFIED_SINCE, options.sourceAccessConditions.DateUnModifiedSince);
}
if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_TYPE] === undefined) {
// work around to add an empty content type header to prevent the request module from magically adding a content type.
webResource.headers[HeaderConstants.CONTENT_TYPE] = '';
} else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_TYPE] === null) {
delete webResource.headers[HeaderConstants.CONTENT_TYPE];
}
if (!webResource.headers || webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) {
if (body && body.outputData) {
webResource.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(body.outputData, 'UTF8'));
} else if (webResource.headers[HeaderConstants.CONTENT_LENGTH] === undefined) {
webResource.withHeader(HeaderConstants.CONTENT_LENGTH, 0);
}
} else if (webResource.headers && webResource.headers[HeaderConstants.CONTENT_LENGTH] === null) {
delete webResource.headers[HeaderConstants.CONTENT_LENGTH];
}
var enableGlobalHttpAgent = this.enableGlobalHttpAgent;
// Sets the request url in the web resource.
this._setRequestUrl(webResource, options);
this.emit('sendingRequestEvent', webResource);
// Now that the web request is finalized, sign it
this.storageCredentials.signRequest(webResource, function (error) {
var requestOptions = null;
if (!error) {
var targetUrl = webResource.uri;
requestOptions = {
uri: url.format(targetUrl),
method: webResource.method,
headers: webResource.headers,
mode: 'disable-fetch'
};
if (options) {
//set encoding of response data. If set to null, the body is returned as a Buffer
requestOptions.encoding = options.responseEncoding;
}
if (options && options.clientRequestTimeoutInMs) {
requestOptions.timeout = options.clientRequestTimeoutInMs;
} else {
requestOptions.timeout = Constants.DEFAULT_CLIENT_REQUEST_TIMEOUT_IN_MS; // 2 minutes
}
// If global HTTP agent is not enabled, use forever agent.
if (enableGlobalHttpAgent !== true) {
requestOptions.forever = true;
}
}
callback(error, requestOptions);
});
};
/**
* Process the response.
* @ignore
*
* @param {WebResource} webResource The web resource that made the request.
* @param {Response} response The response object.
* @param {Options} options The response parsing options.
* @param {String} options.payloadFormat The payload format.
* @return The normalized responseObject.
*/
StorageServiceClient.prototype._processResponse = function (webResource, response, options) {
var self = this;
function convertRawHeadersToHeaders(rawHeaders) {
var headers = {};
if(!rawHeaders) {
return undefined;
}
for(var i = 0; i < rawHeaders.length; i++) {
var headerName;
if (rawHeaders[i].indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) {
headerName = rawHeaders[i];
} else {
headerName = rawHeaders[i].toLowerCase();
}
headers[headerName] = rawHeaders[++i];
}
return headers;
}
var validResponse = WebResource.validResponse(response.statusCode);
var rsp = StorageServiceClient._buildResponse(validResponse, response.body, convertRawHeadersToHeaders(response.rawHeaders) || response.headers, response.statusCode, response.md5);
var responseObject;
if (validResponse && webResource.rawResponse) {
responseObject = { error: null, response: rsp };
} else {
// attempt to parse the response body, errors will be returned in rsp.error without modifying the body
rsp = StorageServiceClient._parseResponse(rsp, self.xml2jsSettings, options);
if (validResponse && !rsp.error) {
responseObject = { error: null, response: rsp };
} else {
rsp.isSuccessful = false;
if (response.statusCode < 400 || response.statusCode >= 500) {
this.logger.log(Logger.LogLevels.DEBUG,
'ERROR code = ' + response.statusCode + ' :\n' + util.inspect(rsp.body));
}
// responseObject.error should contain normalized parser errors if they occured in _parseResponse
// responseObject.response.body should contain the raw response body in that case
var errorBody = rsp.body;
if(rsp.error) {
errorBody = rsp.error;
delete rsp.error;
}
if (!errorBody) {
var code = Object.keys(HttpResponseCodes).filter(function (name) {
if (HttpResponseCodes[name] === rsp.statusCode) {
return name;
}
});
errorBody = { error: { code: code[0] } };
}
var normalizedError = StorageServiceClient._normalizeError(errorBody, response);
responseObject = { error: normalizedError, response: rsp };
}
}
this.logger.log(Logger.LogLevels.DEBUG, 'RESPONSE:\n' + util.inspect(responseObject));
return responseObject;
};
/**
* Associate a filtering operation with this StorageServiceClient. Filtering operations
* can include logging, automatically retrying, etc. Filter operations are objects
* that implement a method with the signature:
*
* "function handle (requestOptions, next)".
*
* After doing its preprocessing on the request options, the method needs to call
* "next" passing a callback with the following signature:
* signature:
*
* "function (returnObject, finalCallback, next)"
*
* In this callback, and after processing the returnObject (the response from the
* request to the server), the callback needs to either invoke next if it exists to
* continue processing other filters or simply invoke finalCallback otherwise to end
* up the service invocation.
*
* @param {Object} filter The new filter object.
* @return {StorageServiceClient} A new service client with the filter applied.
*/
StorageServiceClient.prototype.withFilter = function (newFilter) {
// Create a new object with the same members as the current service
var derived = _.clone(this);
// If the current service has a filter, merge it with the new filter
// (allowing us to effectively pipeline a series of filters)
var parentFilter = this.filter;
var mergedFilter = newFilter;
if (parentFilter !== undefined) {
// The parentFilterNext is either the operation or the nextPipe function generated on a previous merge
// Ordering is [f3 pre] -> [f2 pre] -> [f1 pre] -> operation -> [f1 post] -> [f2 post] -> [f3 post]
mergedFilter = function (originalRequestOptions, parentFilterNext) {
newFilter.handle(originalRequestOptions, function (postRequestOptions, newFilterCallback) {
// handle parent filter pre and get Parent filter post
var next = function (postPostRequestOptions, parentFilterCallback) {
// The parentFilterNext is the filter next to the merged filter.
// For 2 filters, that'd be the actual operation.
parentFilterNext(postPostRequestOptions, function (responseObject, responseCallback, finalCallback) {
parentFilterCallback(responseObject, finalCallback, function (postResponseObject) {
newFilterCallback(postResponseObject, responseCallback, finalCallback);
});
});
};
parentFilter(postRequestOptions, next);
});
};
}
// Store the filter so it can be applied in performRequest
derived.filter = mergedFilter;
return derived;
};
/*
* Builds a response object with normalized key names.
* @ignore
*
* @param {Bool} isSuccessful Boolean value indicating if the request was successful
* @param {Object} body The response body.
* @param {Object} headers The response headers.
* @param {int} statusCode The response status code.
* @param {string} md5 The response's content md5 hash.
* @return {Object} A response object.
*/
StorageServiceClient._buildResponse = function (isSuccessful, body, headers, statusCode, md5) {
var response = {
isSuccessful: isSuccessful,
statusCode: statusCode,
body: body,
headers: headers,
md5: md5
};
if (!azureutil.objectIsNull(headers)) {
if (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] !== undefined) {
response.requestServerEncrypted = (headers[HeaderConstants.REQUEST_SERVER_ENCRYPTED] === 'true');
}
}
return response;
};
/**
* Parses a server response body from XML or JSON into a JS object.
* This is done using the xml2js library.
* @ignore
*
* @param {object} response The response object with a property "body" with a XML or JSON string content.
* @param {object} xml2jsSettings The XML to json settings.
* @param {Options} options The response parsing options.
* @param {String} options.payloadFormat The payload format.
* @return {object} The same response object with the body part as a JS object instead of a XML or JSON string.
*/
StorageServiceClient._parseResponse = function (response, xml2jsSettings, options) {
function parseXml(body) {
var parsed;
var parser = new xml2js.Parser(xml2jsSettings);
parser.parseString(azureutil.removeBOM(body.toString()), function (err, parsedBody) {
if (err) {
var xmlError = new SyntaxError('EXMLFORMAT');
xmlError.innerError = err;
throw xmlError;
} else { parsed = parsedBody; }
});
return parsed;
}
if (response.body && Buffer.byteLength(response.body.toString()) > 0) {
var contentType = '';
if (response.headers && response.headers['content-type']) {
contentType = response.headers['content-type'].toLowerCase();
}
try {
if (contentType.indexOf('application/json') !== -1) {
if (options && options.payloadFormat && options.payloadFormat !== TableUtilities.PayloadFormat.NO_METADATA) {
var parser = new Parser();
parser.onValue = function (value) {
response.body = value;
};
parser.write(response.body);
} else {
response.body = JSON.parse(response.body);
}
} else if (contentType.indexOf('application/xml') !== -1 || contentType.indexOf('application/atom+xml') !== -1) {
response.body = parseXml(response.body);
} else if (contentType.indexOf('text/html') !== -1) {
response.body = response.body;
} else {
response.body = parseXml(response.body);
// throw new SyntaxError(SR.CONTENT_TYPE_MISSING, null);
}
} catch (e) {
response.error = e;
}
}
return response;
};
/**
* Gets the storage settings.
*
* @param {string} [storageAccountOrConnectionString] The storage account or the connection string.
* @param {string} [storageAccessKey] The storage access key.
* @param {string} [host] The host address.
* @param {object} [sas] The Shared Access Signature string.
* @param {TokenCredential} [token] The {@link TokenCredential} object.
*
* @return {StorageServiceSettings}
*/
StorageServiceClient.getStorageSettings = function (storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token) {
var storageServiceSettings;
if (storageAccountOrConnectionString && !storageAccessKey && !sas) {
// If storageAccountOrConnectionString was passed and no accessKey was passed, assume connection string
storageServiceSettings = StorageServiceSettings.createFromConnectionString(storageAccountOrConnectionString);
} else if ((storageAccountOrConnectionString && storageAccessKey) || sas || token || host) {
// Account and key or credentials or anonymous
storageServiceSettings = StorageServiceSettings.createExplicitly(storageAccountOrConnectionString, storageAccessKey, host, sas, endpointSuffix, token);
} else {
// Use environment variables
storageServiceSettings = StorageServiceSettings.createFromEnvironment();
}
return storageServiceSettings;
};
/**
* Sets the webResource's requestUrl based on the service client settings.
* @ignore
*
* @param {WebResource} webResource The web resource where to set the request url.
*/
StorageServiceClient.prototype._setRequestUrl = function (webResource, options) {
// Normalize the path
// Backup the original path of the webResource to make sure it works fine even this function get executed multiple times - like RetryFilter
webResource.originalPath = webResource.originalPath || webResource.path;
webResource.path = this._getPath(webResource.originalPath);
if(!this.host){
throw new ArgumentNullError('this.host', SR.STORAGE_HOST_LOCATION_REQUIRED);
}
var host = this.host.primaryHost;
if(!azureutil.objectIsNull(options) && options.currentLocation === Constants.StorageLocation.SECONDARY) {
host = this.host.secondaryHost;
}
if(host && host.lastIndexOf('/') !== (host.length - 1)){
host = host + '/';
}
var fullPath = url.format({pathname: webResource.path, query: webResource.queryString});
webResource.uri = url.resolve(host, fullPath);
webResource.path = url.parse(webResource.uri).pathname;
};
/**
* Retrieves the normalized path to be used in a request.
* It also removes any leading "/" of the path in case
* it's there before.
* @ignore
* @param {string} path The path to be normalized.
* @return {string} The normalized path.
*/
StorageServiceClient.prototype._getPath = function (path) {
if (path === null || path === undefined) {
path = '';
} else if (path.indexOf('/') === 0) {
path = path.substring(1);
}
return path;
};
/**
* Get the url of a given path
*/
StorageServiceClient.prototype._getUrl = function (path, sasToken, primary) {
var host;
if (!azureutil.objectIsNull(primary) && primary === false) {
host = this.host.secondaryHost;
} else {
host = this.host.primaryHost;
}
host = azureutil.trimPortFromUri(host);
if(host && host.lastIndexOf('/') !== (host.length - 1)){
host = host + '/';
}
var query = qs.parse(sasToken);
var fullPath = url.format({ pathname: this._getPath(path), query: query });
return url.resolve(host, fullPath);
};
/**
* Initializes the default filter.
* This filter is responsible for chaining the pre filters request into the operation and, after processing the response,
* pass it to the post processing filters. This method should only be invoked by the StorageServiceClient constructor.
* @ignore
*
*/
StorageServiceClient.prototype._initDefaultFilter = function () {
this.filter = function (requestOptions, nextPreCallback) {
if (nextPreCallback) {
// Handle the next pre callback and pass the function to be handled as post call back.
nextPreCallback(requestOptions, function (returnObject, finalCallback, nextPostCallback) {
if (nextPostCallback) {
nextPostCallback(returnObject);
} else if (finalCallback) {
finalCallback(returnObject);
}
});
}
};
};
/**
* Retrieves the metadata headers from the response headers.
* @ignore
*
* @param {object} headers The metadata headers.
* @return {object} An object with the metadata headers (without the "x-ms-" prefix).
*/
StorageServiceClient.prototype.parseMetadataHeaders = function (headers) {
var metadata = {};
if (!headers) {
return metadata;
}
for (var header in headers) {
if (header.indexOf(HeaderConstants.PREFIX_FOR_STORAGE_METADATA) === 0) {
var key = header.substr(HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length, header.length - HeaderConstants.PREFIX_FOR_STORAGE_METADATA.length);
metadata[key] = headers[header];
}
}
return metadata;
};
/**
* Gets the properties of a storage account’s service, including Azure Storage Analytics.
* @ignore
*
* @this {StorageServiceClient}
* @param {object} [options] The request options.
* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to.
* Please see StorageUtilities.LocationMode for the possible values.
* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request.
* The maximum execution time interval begins at the time that the client begins building the request. The maximum
* execution time is checked intermittently while performing requests, and before executing retries.
* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false.
* The default value is false.
* @param {errorOrResult} callback `error` will contain information if an error occurs; otherwise, `result` will contain the properties
* and `response` will contain information related to this operation.
*/
StorageServiceClient.prototype.getAccountServiceProperties = function (optionsOrCallback, callback) {
var userOptions;
azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; });
validate.validateArgs('getServiceProperties', function (v) {
v.callback(callback);
});
var options = extend(true, {}, userOptions);
var webResource = WebResource.get()
.withQueryOption(QueryStringConstants.COMP, 'properties')
.withQueryOption(QueryStringConstants.RESTYPE, 'service');
options.requestLocationMode = RequestLocationMode.PRIMARY_OR_SECONDARY;
var processResponseCallback = function (responseObject, next) {
responseObject.servicePropertiesResult = null;
if (!responseObject.error) {
responseObject.servicePropertiesResult = ServicePropertiesResult.parse(responseObject.response.body.StorageServiceProperties);
}
// function to be called after all filters
var finalCallback = function (returnObject) {
callback(returnObject.error, returnObject.servicePropertiesResult, returnObject.response);
};
// call the first filter
next(responseObject, finalCallback);
};
this.performRequest(webResource, null, options, processResponseCallback);
};
/**
* Sets the properties of a storage account’s service, including Azure Storage Analytics.
* You can also use this operation to set the default request version for all incoming requests that do not have a version specified.
*
* @this {StorageServiceClient}
* @param {object} serviceProperties The service properties.
* @param {object} [options] The request options.
* @param {LocationMode} [options.locationMode] Specifies the location mode used to decide which location the request should be sent to.
* Please see StorageUtilities.LocationMode for the possible values.
* @param {int} [options.timeoutIntervalInMs] The server timeout interval, in milliseconds, to use for the request.
* @param {int} [options.clientRequestTimeoutInMs] The timeout of client requests, in milliseconds, to use for the request.
* @param {int} [options.maximumExecutionTimeInMs] The maximum execution time, in milliseconds, across all potential retries, to use when making this request.
* The maximum execution time interval begins at the time that the client begins building the request. The maximum
* execution time is checked intermittently while performing requests, and before executing retries.
* @param {bool} [options.useNagleAlgorithm] Determines whether the Nagle algorithm is used; true to use the Nagle algorithm; otherwise, false.
* The default value is false.
* @param {errorOrResponse} callback `error` will contain information
* if an error occurs; otherwise, `response`
* will contain information related to this operation.
*/
StorageServiceClient.prototype.setAccountServiceProperties = function (serviceProperties, optionsOrCallback, callback) {
var userOptions;
azureutil.normalizeArgs(optionsOrCallback, callback, function (o, c) { userOptions = o; callback = c; });
validate.validateArgs('setServiceProperties', function (v) {
v.object(serviceProperties, 'serviceProperties');
v.callback(callback);
});
var options = extend(true, {}, userOptions);
var servicePropertiesXml = ServicePropertiesResult.serialize(serviceProperties);
var webResource = WebResource.put()
.withQueryOption(QueryStringConstants.COMP, 'properties')
.withQueryOption(QueryStringConstants.RESTYPE, 'service')
.withHeader(HeaderConstants.CONTENT_TYPE, 'application/xml;charset="utf-8"')
.withHeader(HeaderConstants.CONTENT_LENGTH, Buffer.byteLength(servicePropertiesXml))
.withBody(servicePropertiesXml);
var processResponseCallback = function (responseObject, next) {
var finalCallback = function (returnObject) {
callback(returnObject.error, returnObject.response);
};
next(responseObject, finalCallback);
};
this.performRequest(webResource, webResource.body, options, processResponseCallback);
};
// Other functions
/**
* Processes the error body into a normalized error object with all the properties lowercased.
*
* Error information may be returned by a service call with additional debugging information:
* http://msdn.microsoft.com/en-us/library/windowsazure/dd179382.aspx
*
* Table services returns these properties lowercased, example, "code" instead of "Code". So that the user
* can always expect the same format, this method lower cases everything.
*
* @ignore
*
* @param {Object} error The error object as returned by the service and parsed to JSON by the xml2json.
* @return {Object} The normalized error object with all properties lower cased.
*/
StorageServiceClient._normalizeError = function (error, response) {
if (azureutil.objectIsString(error)) {
return new StorageError(error, null);
} else if (error) {
var normalizedError = {};
// blob/queue errors should have error.Error, table errors should have error['odata.error']
var errorProperties = error.Error || error.error || error['odata.error'] || error['m:error'] || error;
normalizedError.code = errorProperties.message; // The message exists when there is error.Error.
for (var property in errorProperties) {
if (errorProperties.hasOwnProperty(property)) {
var key = property.toLowerCase();
if(key.indexOf('m:') === 0) {
key = key.substring(2);
}
normalizedError[key] = errorProperties[property];
// if this is a table error, message is an object - flatten it to normalize with blob/queue errors
// ex: "message":{"lang":"en-US","value":"The specified resource does not exist."} becomes message: "The specified resource does not exist."
if (key === 'message' && _.isObject(errorProperties[property])) {
if (errorProperties[property]['value']) {
normalizedError[key] = errorProperties[property]['value'];
}
}
}
}
// add status code and server request id if available
if (response) {
if (response.statusCode) {
normalizedError.statusCode = response.statusCode;
}
if (response.headers && response.headers['x-ms-request-id']) {
normalizedError.requestId = response.headers['x-ms-request-id'];
}
}
var errorObject = new StorageError(normalizedError.code, normalizedError);
return errorObject;
}
return null;
};
/**
* Sets proxy object specified by caller.
*
* @param {object} proxy proxy to use for tunneling
* {
* host: hostname
* port: port number
* proxyAuth: 'user:password' for basic auth
* headers: {...} headers for proxy server
* key: key for proxy server
* ca: ca for proxy server
* cert: cert for proxy server
* }
* if null or undefined, clears proxy
*/
StorageServiceClient.prototype.setProxy = function (proxy) {
if (proxy) {
this.proxy = proxy;
} else {
this.proxy = null;
}
};
/**
* Sets the service host default proxy from the environment.
* Can be overridden by calling _setProxyUrl or _setProxy
*
*/
StorageServiceClient.prototype._setDefaultProxy = function () {
var proxyUrl = StorageServiceClient._loadEnvironmentProxyValue();
if (proxyUrl) {
var parsedUrl = url.parse(proxyUrl);
if (!parsedUrl.port) {
parsedUrl.port = 80;
}
this.setProxy(parsedUrl);
} else {
this.setProxy(null);
}
};
/*
* Loads the fields "useProxy" and respective protocol, port and url
* from the environment values HTTPS_PROXY and HTTP_PROXY
* in case those are set.
* @ignore
*
* @return {string} or null
*/
StorageServiceClient._loadEnvironmentProxyValue = function () {
var proxyUrl = null;
if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY]) {
proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY];
} else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()]) {
proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTPS_PROXY.toLowerCase()];
} else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY]) {
proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY];
} else if (process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()]) {
proxyUrl = process.env[StorageServiceClientConstants.EnvironmentVariables.HTTP_PROXY.toLowerCase()];
}
return proxyUrl;
};
/**
* Initializes the location to which the operation is being sent to.
*/
StorageServiceClient.prototype._initializeLocation = function (options) {
if(!azureutil.objectIsNull(options.locationMode)) {
switch(options.locationMode) {
case StorageUtilities.LocationMode.PRIMARY_ONLY:
case StorageUtilities.LocationMode.PRIMARY_THEN_SECONDARY:
options.currentLocation = Constants.StorageLocation.PRIMARY;
break;
case StorageUtilities.LocationMode.SECONDARY_ONLY:
case StorageUtilities.LocationMode.SECONDARY_THEN_PRIMARY:
options.currentLocation = Constants.StorageLocation.SECONDARY;
break;
default:
throw new RangeError(util.format(SR.ARGUMENT_OUT_OF_RANGE_ERROR, 'locationMode', options.locationMode));
}
} else {
options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY;
options.currentLocation = Constants.StorageLocation.PRIMARY;
}
};
/**
* Validates the location to which the operation is being sent to.
*/
StorageServiceClient.prototype._validateLocation = function (options) {
if(this._invalidLocationMode(options.locationMode)) {
throw new ArgumentNullError('host', SR.STORAGE_HOST_MISSING_LOCATION);
}
switch(options.requestLocationMode) {
case Constants.RequestLocationMode.PRIMARY_ONLY:
if(options.locationMode === StorageUtilities.LocationMode.SECONDARY_ONLY) {
throw new ArgumentError('host.primaryHost', SR.PRIMARY_ONLY_COMMAND);
}
options.currentLocation = Constants.StorageLocation.PRIMARY;
options.locationMode = StorageUtilities.LocationMode.PRIMARY_ONLY;
break;
case Constants.RequestLocationMode.SECONDARY_ONLY:
if(options.locationMode === StorageUtilities.LocationMode.PRIMARY_ONLY) {
throw new ArgumentError('host.secondaryHost', SR.SECONDARY_ONLY_COMMAND);
}
options.currentLocation = Constants.StorageLocation.SECONDARY;
options.locationMode = StorageUtilities.LocationMode.SECONDARY_ONLY;
break;
default:
// no op
}
};
/**
* Checks whether we have the relevant host information based on the locationMode.
*/
StorageServiceClient.prototype._invalidLocationMode = function (locationMode) {
switch(locationMode) {
case StorageUtilities.LocationMode.PRIMARY_ONLY:
return azureutil.objectIsNull(this.host.primaryHost);
case StorageUtilities.LocationMode.SECONDARY_ONLY:
return azureutil.objectIsNull(this.host.secondaryHost);
default:
return (azureutil.objectIsNull(this.host.primaryHost) || azureutil.objectIsNull(this.host.secondaryHost));
}
};
/**
* Checks to see if the maximum execution timeout provided has been exceeded.
*/
StorageServiceClient.prototype._maximumExecutionTimeExceeded = function (currentTime, expiryTime) {
if(!azureutil.objectIsNull(expiryTime) && currentTime > expiryTime) {
return true;
} else {
return false;
}
};
/**
* Sets the operation expiry time.
*/
StorageServiceClient.prototype._setOperationExpiryTime = function (options) {
if(azureutil.objectIsNull(options.operationExpiryTime)) {
if(!azureutil.objectIsNull(options.maximumExecutionTimeInMs)) {
options.operationExpiryTime = Date.now() + options.maximumExecutionTimeInMs;
} else if(this.defaultMaximumExecutionTimeInMs) {
options.operationExpiryTime = Date.now() + this.defaultMaximumExecutionTimeInMs;
}
}
};
module.exports = StorageServiceClient; | PypiClean |
/py_viptela-0.2.9.8.5.tar.gz/py_viptela-0.2.9.8.5/py_viptela/api/config/builder/lists/datafqdnall.py | def getAllDataPrefixAndFQDNLists(vmanage):
"""
Get lists for all all data-prefix(IPv4) and Fqdn lists
Parameters:
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn"
response = vmanage.apiCall("GET", endpoint)
return response
def create(vmanage, policylist):
"""
Create policy list
Parameters:
policylist: Policy list
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn"
response = vmanage.apiCall("POST", endpoint, policylist)
return response
def preview(vmanage, policylist):
"""
Preview a policy list based on the policy list type
Parameters:
policylist: Policy list
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn/preview"
response = vmanage.apiCall("POST", endpoint, policylist)
return response
def previewById(vmanage, id):
"""
Preview a specific policy list entry based on id provided
Parameters:
id (string): Policy Id
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn/preview/{id}"
response = vmanage.apiCall("GET", endpoint)
return response
def getListsById(vmanage, id):
"""
Get a specific policy list based on the id
Parameters:
id (string): Policy Id
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn/{id}"
response = vmanage.apiCall("GET", endpoint)
return response
def edit(vmanage, policylist, id):
"""
Edit policy list entries for a specific type of policy list
Parameters:
policylist: Policy list
id (string): Policy Id
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn/{id}"
response = vmanage.apiCall("PUT", endpoint, policylist)
return response
def delete(vmanage, id):
"""
Delete policy list entry for a specific type of policy list
Parameters:
id (string): Policy Id
Returns
response (dict)
"""
endpoint = f"dataservice/template/policy/list/dataprefixfqdn/{id}"
response = vmanage.apiCall("DELETE", endpoint)
return response | PypiClean |
/pypsexec-0.3.0.tar.gz/pypsexec-0.3.0/README.md | # Python PsExec Library
[![Test workflow](https://github.com/jborean93/pypsexec/actions/workflows/ci.yml/badge.svg)](https://github.com/jborean93/pypsexec/actions/workflows/ci.yml)
[![codecov](https://codecov.io/gh/jborean93/pypsexec/branch/master/graph/badge.svg?token=Hi2Nk4RfMF)](https://codecov.io/gh/jborean93/pypsexec)
[![PyPI version](https://badge.fury.io/py/pypsexec.svg)](https://badge.fury.io/py/pypsexec)
[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/jborean93/pypsexec/blob/master/LICENSE)
This library can run commands on a remote Windows host through Python. This
means that it can be run on any host with Python and does not require any
binaries to be present or a specific OS. It uses SMB/RPC to executable commands
in a similar fashion to the popular PsExec tool. More details on this tool
can be read on
[this blog post](https://www.bloggingforlogging.com/2018/03/12/introducing-psexec-for-python/).
The executable wrapper that is sent to the service is based on the
[PAExec](https://github.com/poweradminllc/PAExec) library. PAExec is an free,
redistributable and open source equivalent to Microsoft's
[PsExec](https://docs.microsoft.com/en-us/sysinternals/downloads/psexec)
application. This program is stored as a binary in this package and is used
to run the remote service and start the process execution.
I would like to thank the developers of Power Admin for creating this library
as it has made this library a lot less complex than what it would have been.
## Features
With pypsexec you can run commands of a remote Windows host like you would with
PsExec. Current you can use pypsexec to do the following;
* Run as a specific local or domain user or the user
* Run as the local SYSTEM account
* Run as an interactive process
* Specify the session the interactive process should run on
* Specify the run level of the user token, `highest` or `limited`
* Set the priority of the process
* Set a timeout for the remote process
* Send input through the stdin pipe to the running process
* Set the processors the process can run on
## Further Info
While this info is not necessary for you to use this library it can help people
understand what is happening under the hood. This library runs the following
steps when running a command;
* Create an SMB connection to the host
* Copies across the PAExec binary to the `ADMIN$` share of the remote host
* Binds the Windows Service Manager to the opened `IPC$` tree using RPC
* Creates and starts a Windows service as the `SYSTEM` account to run the binary copied
* Connect to the PAExec named pipe the service creates
* Sends the process details to the PAExec service through the pipe
* Send a request to the PAExec service to start the process based on the settings sent
* Connect to the newly spawned process's stdout, stderr, stdin pipe (if not interactive or async)
* Read the stdout/stderr pipe until the process is complete
* Get the return code of the new process
* Stop and remove the PAExec service
* Remove the PAExec binary from the `ADMIN$` share
* Disconnects from the SMB connection
In the case of a failed process, the PAExec service and binary may not be
removed from the host and may need to be done manually. This is only the case
for a critical error or the cleanup functions not being called.
By default the data being sent to and from the server is encrypted to stop
people listening in on the network from snooping your data. Unfortunately this
uses SMB encryption which was added in the SMB 3.x dialects so hosts running
Windows 7, Server 2008, or Server 2008 R2 will not work with encryption.
This means that any data sent over the wire on these older versions of Windows
is viewable by anyone reading those packets. Any input or output of the process
comes through these packets so any secrets sent over the network won't be
encrypted. PAExec tries to reduce this risk by doing a simple XOR scramble of
the settings set in `run_executable` so it isn't plaintext but it can be
decoded by someone who knows the protocol.
## Requirements
* Python 3.6+
* [smbprotocol](https://github.com/jborean93/smbprotocol)
To install pypsexec, simply run
```bash
pip install pypsexec
```
This will download the required packages that are required and get your
Python environment ready to do.
Out of the box, pypsexec supports authenticating to a Windows host with NTLM
authentication but users in a domain environment can take advantage of Kerberos
authentication as well for added security. The Kerberos libraries are an
optional install which can be installed with;
```bash
# for Debian/Ubuntu/etc:
sudo apt-get install gcc python-dev libkrb5-dev
pip install smbprotocol[kerberos]
# for RHEL/CentOS/etc:
sudo yum install gcc python-devel krb5-devel krb5-workstation python-devel
pip install smbprotocol[kerberos]
```
## Remote Host Requirements
The goal of this package to be able to run executables on a vanilla remote
Windows host with as little setup as possible. Unfortunately there is still
some setup required to get working depending on the OS version and type
that is being used. What pypsexec requires on the host is;
* SMB to be up and running on the Windows port and readable from the Python host
* The `ADMIN$` share to be enabled with read/write access of the user configured
* The above usually means the configured user is an administrator of the Windows host
* At least SMB 2 on the host (Server 2008 and newer)
* The connection user has a full logon token that is not filtered by UAC
* If connecting to localhost and `pywin32` is installed, the script must be run as a user with Administrator privileges
### Firewall Setup
By default, Windows blocks the SMB port 445 and it needs to be opened up before
pypsexec can connect to the host. To do this run either one of the following
commands;
```powershell
# PowerShell (Windows 8 and Server 2012 or Newer)
Set-NetFirewallRule -Name FPS-SMB-In-TCP -Enabled True
# CMD (All OS's)
netsh advfirewall firewall set rule name="File and Printer Sharing (SMB-In)" dir=in new enable=Yes
```
This will open up inbound traffic to port `445` which is used by SMB.
### User Account Control
In some circumstances, UAC will filter any remote logon token and limit the
rights that are available to it. This causes issues with pypsexec and it will
fail with an `ACCESS_IS_DENIED` error message when trying to interact with the
remote SCMR API. This restriction is enforced in various different scenarios
and to get it working with pypsexec you can either;
* In a domain environment, use any domain account that is a member of the local `Administrators` group
* Use any local account that is a member of the local `Administrators` group if [LocalAccountTokenFilterPolicy](https://support.microsoft.com/en-us/help/951016/description-of-user-account-control-and-remote-restrictions-in-windows) is set to `1`
* This means any remote logon token will not be filtered and will have the full rights of that user
* By default this is not defined and needs to be created
* This only affects remote tokens, any local tokens/processes will still be limited as per usual
* Use the builtin local Administrator account (SID `S-1-5-21-*-500`) that is created when Windows was installed
* The builtin Administrator account for English installs is typically called `Administrator` but it can be renamed
* This account is typically disabled by default on the desktop variants of Windows, e.g. Windows 7, 8.1, 10
* When [AdminApprovalMode](https://docs.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2008-R2-and-2008/dd835564(v=ws.10)#BKMK_BuiltInAdmin) is `Enabled` this will not work. `AdminApprovalMode` is not `Enabled` by default
* Use any local account that is a member of the local `Administrators` group if [EnableLUA](https://docs.microsoft.com/en-us/windows-hardware/customize/desktop/unattend/microsoft-windows-lua-settings-enablelua) is `Disabled`
* Unlike the `LocalAccountTokenFilterPolicy` option, this affects local tokens and processes spawned locally
* This effectively disables UAC for any Administrator accounts and should be avoided
To set `LocalAccountTokenFilterPolicy` to allow a full token on a remote logon,
run the following PowerShell commands;
```powershell
$reg_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
$reg_prop_name = "LocalAccountTokenFilterPolicy"
$reg_key = Get-Item -Path $reg_path
$reg_prop = $reg_key.GetValue($reg_prop_name)
if ($null -ne $reg_prop) {
Remove-ItemProperty -Path $reg_path -Name $reg_prop_name
}
New-ItemProperty -Path $reg_path -Name $reg_prop_name -Value 1 -PropertyType DWord
```
To get the name of the builtin Administrator (SID `S-1-5-21-*-500`), you can
run the following PowerShell commands;
```powershell
Add-Type -AssemblyName System.DirectoryServices.AccountManagement
$principal_context = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalContext([System.DirectoryServices.AccountManagement.ContextType]::Machine)
$user_principal = New-Object -TypeName System.DirectoryServices.AccountManagement.UserPrincipal($principal_context)
$searcher = New-Object -TypeName System.DirectoryServices.AccountManagement.PrincipalSearcher($user_principal)
$users = $searcher.FindAll() | Where-Object { $_.Sid -like "*-500" }
$users[0].Name
```
The last resort would be to disable UAC for any local Administrator account.
Once again this should be avoided as there are other options available and this
will reduce the security of your Windows host, but to do so you can run the
following PowerShell commands;
```powershell
$reg_path = "HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Policies\System"
$reg_prop_name = "EnableLUA"
$reg_key = Get-Item -Path $reg_path
$reg_prop = $reg_key.GetValue($reg_prop_name)
if ($null -ne $reg_prop) {
Remove-ItemProperty -Path $reg_path -Name $reg_prop_name
}
New-ItemProperty -Path $reg_path -Name $reg_prop_name -Value 0 -PropertyType DWord
```
After changing the `EnableLUA` setting, the Windows host needs to be rebooted
before the policies are enacted.
## Examples
Here is an example of how to run a command with this library
```python
from pypsexec.client import Client
# creates an encrypted connection to the host with the username and password
c = Client("hostname", username="username", password="password")
# set encrypt=False for Windows 7, Server 2008
c = Client("hostname", username="username", password="password", encrypt=False)
# if Kerberos is available, this will use the default credentials in the
# credential cache
c = Client("hostname")
# you can also tell it to use a specific Kerberos principal in the cache
# without a password
c = Client("hostname", username="[email protected]")
c.connect()
try:
c.create_service()
# After creating the service, you can run multiple exe's without
# reconnecting
# run a simple cmd.exe program with arguments
stdout, stderr, rc = c.run_executable("cmd.exe",
arguments="/c echo Hello World")
# run whoami.exe as the SYSTEM account
stdout, stderr, rc = c.run_executable("whoami.exe", use_system_account=True)
# run command asynchronously (in background), the rc is the PID of the spawned service
stdout, stderr, rc = c.run_executable("longrunning.exe",
arguments="/s other args",
asynchronous=True)
# run whoami.exe as a specific user
stdout, stderr, rc = c.run_executable("whoami",
arguments="/all",
username="local-user",
password="password",
run_elevated=True)
finally:
c.remove_service()
c.disconnect()
```
In the case of a fatal failure, this project may leave behind some the PAExec
payload in `C:\Windows` or the service still installed. As these are uniquely
named they can build up over time. They can be manually removed but you can
also use pypsexec to cleanup them all up at once. To do this run
```python
from pypsexec.client import Client
c = Client("server", username="username", password="password")
c.connect()
c.cleanup() # this is where the magic happens
c.disconnect()
```
The script will delete any files that match `C:\Windows\PAExec-*` and any
services that match `PAExec-*`. For an individual run, the `remove_service()`
function should still be used.
### Client Options
When creating the main pypsexec `Client` object there are some configuration
options that can be set to control the process. These args are;
* `server`: This needs to be set and is the host or IP address of the server to connect to
* `username`: The username to connect with. Can be `None` if `python-gssapi` is installed and a ticket has been granted in the local credential cache
* `password`: The password for `username`. Can be `None` if `python-gssapi` is installed and a ticket has been granted for the user specified
* `port`: Override the default port of `445` when connecting to the server
* `encrypt`: Whether to encrypt the messages or not, default is `True`. Server 2008, 2008 R2 and Windows 7 hosts do not support SMB Encryption and need this to be set to `False`
### Run Executable Options
When calling `run_executable`, there are multiple kwargs that can define
how the remote process will work. These args are;
* `executable`: (string) The path to the executable to be run
* `arguments`: (string) Arguments for the executable
* `processors`: (list<int>) A list of processor numbers that the process can run on
* `asynchronous`: (bool) Doesn't wait until the process is complete before returning. The `rc` returned by the function is the `PID` of the async process, default is `False`
* `load_profile`: (bool) Load the user's profile, default is `True`
* `interactive_session`: (int) The session ID to display the interactive process when `interactive=True`, default is `0`
* `interactive`: (bool) Runs the process as an interactive process. The stdout and stderr buffers will be `None` if `True`, default `False`
* `run_elevated`: (bool) When `username` is defined, will elevated permissions, default `False`
* `run_limited`: (bool) When `username` is defined, will run the process under limited permissions, default `False`
* `username`: (string) Used to run the process under a different user than the one that authenticated the SMB session
* `password`: (string) The password for `username`
* `use_system_account`: (bool) Run the process as `NT AUTHORITY\SYSTEM`
* `working_dir`: (string) The working directory of the process, default `C:\Windows\System32`
* `show_ui_on_win_logon`: (bool) Displays the UI on the Winlogon secure desktop when `use_system_account=True`, default `False`
* `priority`: (pypsexec.ProcessPriority) The priority level of the process, default `NORMAL_PRIORITY_CLASS`
* `remote_log_path`: (string) A path on the remote host to log the PAExec service details
* `timeout_seconds`: (int) The maximum time the process can run for, default is `0` (no timeout)
* `stdout`: (pipe.OutputPipe) A class that implements pipe.OutputPipe that controls how the stdout output is processed and returned, will default to returning the byte string of the stdout. Is ignored when `interactive=True` and `asynchronous=True`
* `stderr`: (pipe.OutputPipe) A class that implements pipe.OutputPipe that controls how the stderr output is processed and returned, will default to returning the byte string of the stderr. Is ignored when `interactive=True` and `asynchronous=True`
* `stdin`: (bytes/generator) A byte string or generator that yields a byte string to send over the stdin pipe, does not work with `interactive=True` and `asynchronous=True`
* `wow64`: (bool) Set to `True` to run the executable in 32-bit mode on 64-bit systems. This does nothing on 32-bit systems, default `False`
## Logging
This library uses the builtin Python logging library and can be used to find
out what is happening in the pypsexec process. Log messages are logged to the
`pypsexec` named logger as well as `pypsexec.*` where `*` is each python script
in the `pypsexec` directory.
A way to enable the logging in your scripts through code is to add the
following to the top of the script being used;
```python
import logging
logger = logging.getLogger("pypsexec")
logger.setLevel(logging.DEBUG) # set to logging.INFO if you don't want DEBUG logs
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - '
'%(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
```
These logs are generally useful when debugging issues as they give you a more
step by step snapshot of what it is doing and what may be going wrong. The
debug level will also print out a human readable string of each SMB packet that
is sent out from the client but this level can get really verbose.
## Testing
To this module, you need to install some pre-requisites first. This can be done
by running;
```bash
pip install -r requirements-test.txt
# you can also run tox by installing tox
pip install tox
```
From there to run the basic tests run;
```bash
py.test -v --cov pypsexec --cov-report term-missing
# or with tox
tox
```
There are extra tests that only run when certain environment variables are set.
To run these tests set the following variables;
* `PYPSEXEC_SERVER`: The hostname or IP to a Windows host
* `PYPSEXEC_USERNAME`: The username to use authenticate with
* `PYPSEXEC_PASSWORD`: The password for `PYPSEXEC_USERNAME`
From there, you can just run `tox` or `py.test` with these environment
variables to run the integration tests.
## Future
Some things I would be interested in looking at adding in the future would be
* Add a Python script that can be called to run adhoc commands like `PsExec.exe`
| PypiClean |
/odoo13_addon_l10n_es_facturae-13.0.2.5.0-py3-none-any.whl/odoo/addons/l10n_es_facturae/models/account_tax_template.py |
from odoo import fields, models
class AccountTaxTemplate(models.Model):
_inherit = "account.tax.template"
facturae_code = fields.Selection(
selection=[
("01", "IVA: Impuesto sobre el valor añadido"),
(
"02",
"IPSI: Impuesto sobre la producción, los servicios y" " la importación",
),
("03", "IGIC: Impuesto general indirecto de Canarias"),
("04", "IRPF: Impuesto sobre la Renta de las personas físicas"),
("05", "Otro"),
(
"06",
"ITPAJD: Impuesto sobre transmisiones patrimoniales y"
" actos jurídicos documentados",
),
("07", "IE: Impuestos especiales"),
("08", "Ra: Renta aduanas"),
(
"09",
"IGTECM: Impuesto general sobre el tráfico de empresas que"
" se aplica en Ceuta y Melilla",
),
(
"10",
"IECDPCAC: Impuesto especial sobre los combustibles"
" derivados del petróleo en la Comunidad Autonoma Canaria",
),
(
"11",
"IIIMAB: Impuesto sobre las instalaciones que inciden sobre"
" el medio ambiente en la Baleares",
),
("12", "ICIO: Impuesto sobre las construcciones, instalaciones y" " obras"),
(
"13",
"IMVDN: Impuesto municipal sobre las viviendas desocupadas"
" en Navarra",
),
("14", "IMSN: Impuesto municipal sobre solares en Navarra"),
("15", "IMGSN: Impuesto municipal sobre gastos suntuarios en" " Navarra"),
("16", "IMPN: Impuesto municipal sobre publicidad en Navarra"),
("17", "REIVA: Régimen especial de IVA para agencias de viajes"),
("18", "REIGIC: Régimen especial de IGIC: para agencias de" "viajes"),
("19", "REIPSI: Régimen especial de IPSI para agencias de viajes"),
],
string="Facturae code",
)
def _get_tax_vals(self, company, tax_template_to_tax):
val = super(AccountTaxTemplate, self)._get_tax_vals(
company, tax_template_to_tax
)
val["facturae_code"] = self.facturae_code
return val | PypiClean |
/otxseg-0.3.1.tar.gz/otxseg-0.3.1/mmseg/models/decode_heads/ema_head.py | import math
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from ..builder import HEADS
from .decode_head import BaseDecodeHead
def reduce_mean(tensor):
"""Reduce mean when distributed training."""
if not (dist.is_available() and dist.is_initialized()):
return tensor
tensor = tensor.clone()
dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)
return tensor
class EMAModule(nn.Module):
"""Expectation Maximization Attention Module used in EMANet.
Args:
channels (int): Channels of the whole module.
num_bases (int): Number of bases.
num_stages (int): Number of the EM iterations.
"""
def __init__(self, channels, num_bases, num_stages, momentum):
super(EMAModule, self).__init__()
assert num_stages >= 1, 'num_stages must be at least 1!'
self.num_bases = num_bases
self.num_stages = num_stages
self.momentum = momentum
bases = torch.zeros(1, channels, self.num_bases)
bases.normal_(0, math.sqrt(2. / self.num_bases))
# [1, channels, num_bases]
bases = F.normalize(bases, dim=1, p=2)
self.register_buffer('bases', bases)
def forward(self, feats):
"""Forward function."""
batch_size, channels, height, width = feats.size()
# [batch_size, channels, height*width]
feats = feats.view(batch_size, channels, height * width)
# [batch_size, channels, num_bases]
bases = self.bases.repeat(batch_size, 1, 1)
with torch.no_grad():
for i in range(self.num_stages):
# [batch_size, height*width, num_bases]
attention = torch.einsum('bcn,bck->bnk', feats, bases)
attention = F.softmax(attention, dim=2)
# l1 norm
attention_normed = F.normalize(attention, dim=1, p=1)
# [batch_size, channels, num_bases]
bases = torch.einsum('bcn,bnk->bck', feats, attention_normed)
# l2 norm
bases = F.normalize(bases, dim=1, p=2)
feats_recon = torch.einsum('bck,bnk->bcn', bases, attention)
feats_recon = feats_recon.view(batch_size, channels, height, width)
if self.training:
bases = bases.mean(dim=0, keepdim=True)
bases = reduce_mean(bases)
# l2 norm
bases = F.normalize(bases, dim=1, p=2)
self.bases = (1 -
self.momentum) * self.bases + self.momentum * bases
return feats_recon
@HEADS.register_module()
class EMAHead(BaseDecodeHead):
"""Expectation Maximization Attention Networks for Semantic Segmentation.
This head is the implementation of `EMANet
<https://arxiv.org/abs/1907.13426>`_.
Args:
ema_channels (int): EMA module channels
num_bases (int): Number of bases.
num_stages (int): Number of the EM iterations.
concat_input (bool): Whether concat the input and output of convs
before classification layer. Default: True
momentum (float): Momentum to update the base. Default: 0.1.
"""
def __init__(self,
ema_channels,
num_bases,
num_stages,
concat_input=True,
momentum=0.1,
**kwargs):
super(EMAHead, self).__init__(**kwargs)
self.ema_channels = ema_channels
self.num_bases = num_bases
self.num_stages = num_stages
self.concat_input = concat_input
self.momentum = momentum
self.ema_module = EMAModule(self.ema_channels, self.num_bases,
self.num_stages, self.momentum)
self.ema_in_conv = ConvModule(
self.in_channels,
self.ema_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
# project (0, inf) -> (-inf, inf)
self.ema_mid_conv = ConvModule(
self.ema_channels,
self.ema_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=None,
act_cfg=None)
for param in self.ema_mid_conv.parameters():
param.requires_grad = False
self.ema_out_conv = ConvModule(
self.ema_channels,
self.ema_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=None)
self.bottleneck = ConvModule(
self.ema_channels,
self.channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
if self.concat_input:
self.conv_cat = ConvModule(
self.in_channels + self.channels,
self.channels,
kernel_size=3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
act_cfg=self.act_cfg)
def forward(self, inputs):
"""Forward function."""
x = self._transform_inputs(inputs)
feats = self.ema_in_conv(x)
identity = feats
feats = self.ema_mid_conv(feats)
recon = self.ema_module(feats)
recon = F.relu(recon, inplace=True)
recon = self.ema_out_conv(recon)
output = F.relu(identity + recon, inplace=True)
output = self.bottleneck(output)
if self.concat_input:
output = self.conv_cat(torch.cat([x, output], dim=1))
output = self.cls_seg(output)
return output | PypiClean |
/mle_monitor-0.0.2.tar.gz/mle_monitor-0.0.2/README.md | # Lightweight Experiment & Resource Monitoring 📺
[![Pyversions](https://img.shields.io/pypi/pyversions/mle-monitor.svg?style=flat-square)](https://pypi.python.org/pypi/mle-monitor)
[![PyPI version](https://badge.fury.io/py/mle-monitor.svg)](https://badge.fury.io/py/mle-monitor)
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
[![codecov](https://codecov.io/gh/mle-infrastructure/mle-monitor/branch/main/graph/badge.svg?token=75FIYZG8BD)](https://codecov.io/gh/mle-infrastructure/mle-monitor)
[![Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/mle-infrastructure/mle-monitor/blob/main/examples/getting_started.ipynb)
<a href="https://github.com/mle-infrastructure/mle-monitor/blob/main/docs/logo_transparent.png?raw=true"><img src="https://github.com/mle-infrastructure/mle-monitor/blob/main/docs/logo_transparent.png?raw=true" width="200" align="right" /></a>
"Did I already run this experiment before? How many resources are currently available on my cluster?" If these are common questions you encounter during your daily life as a researcher, then `mle-monitor` is made for you. It provides a lightweight API for tracking your experiments using a pickle protocol database (e.g. for hyperparameter searches and/or multi-configuration/multi-seed runs). Furthermore, it comes with built-in resource monitoring on Slurm/Grid Engine clusters and local machines/servers.
`mle-monitor` provides three core functionalities:
- **`MLEProtocol`**: A composable protocol database API for ML experiments.
- **`MLEResource`**: A tool for obtaining server/cluster usage statistics.
- **`MLEDashboard`**: A dashboard visualizing resource usage & experiment protocol.
To get started I recommend checking out the [colab notebook](https://colab.research.google.com/github/mle-infrastructure/mle-monitor/blob/main/examples/getting_started.ipynb) and an [example workflow](https://github.com/mle-infrastructure/mle-monitor/blob/main/examples/run_infrastructure.py).
<img src="https://github.com/mle-infrastructure/mle-monitor/blob/main/docs/monitor-promo-gif.gif?raw=true" alt="drawing" width="900"/>
## `MLEProtocol`: Keeping Track of Your Experiments 📝
```python
from mle_monitor import MLEProtocol
# Load protocol database or create new one -> print summary
protocol_db = MLEProtocol("mle_protocol.db", verbose=False)
protocol_db.summary(tail=10, verbose=True)
# Draft data to store in protocol & add it to the protocol
meta_data = {
"purpose": "Grid search", # Purpose of experiment
"project_name": "MNIST", # Project name of experiment
"experiment_type": "hyperparameter-search", # Type of experiment
"experiment_dir": "experiments/logs", # Experiment directory
"num_total_jobs": 10, # Number of total jobs to run
...
}
new_experiment_id = protocol_db.add(meta_data)
# ... train your 10 (pseudo) networks/complete respective jobs
for i in range(10):
protocol_db.update_progress_bar(new_experiment_id)
# Wrap up an experiment (store completion time, etc.)
protocol_db.complete(new_experiment_id)
```
The meta data can contain the following keys:
| Search Type | Description | Default |
|----------------------- | ----------- | --------------- |
| `purpose` | Purpose of experiment | `'None provided'` |
| `project_name` | Project name of experiment | `'default'` |
| `exec_resource` | Resource jobs are run on | `'local'` |
| `experiment_dir` | Experiment log storage directory | `'experiments'` |
| `experiment_type` | Type of experiment to run | `'single'` |
| `base_fname` | Main code script to execute | `'main.py'` |
| `config_fname` | Config file path of experiment | `'base_config.yaml'` |
| `num_seeds` | Number of evaluations seeds | 1 |
| `num_total_jobs` | Number of total jobs to run | 1 |
| `num_job_batches` | Number of jobs in single batch | 1 |
| `num_jobs_per_batch` | Number of sequential job batches | 1 |
| `time_per_job` | Expected duration: days-hours-minutes | `'00:01:00'` |
| `num_cpus` | Number of CPUs used in job | 1 |
| `num_gpus` | Number of GPUs used in job | 0 |
Additionally you can synchronize the protocol with a Google Cloud Storage (GCS) bucket by providing `cloud_settings`. In this case also the results stored in `experiment_dir` will be uploaded to the GCS bucket, when you call `protocol.complete()`.
```python
# Define GCS settings - requires 'GOOGLE_APPLICATION_CREDENTIALS' env var.
cloud_settings = {
"project_name": "mle-toolbox", # GCP project name
"bucket_name": "mle-protocol", # GCS bucket name
"use_protocol_sync": True, # Whether to sync the protocol to GCS
"use_results_storage": True, # Whether to sync experiment_dir to GCS
}
protocol_db = MLEProtocol("mle_protocol.db", cloud_settings, verbose=True)
```
## The `MLEResource`: Keeping Track of Your Resources 📉
#### On Your Local Machine
```python
from mle_monitor import MLEResource
# Instantiate local resource and get usage data
resource = MLEResource(resource_name="local")
resource_data = resource.monitor()
```
#### On a Slurm Cluster
```python
resource = MLEResource(
resource_name="slurm-cluster",
monitor_config={"partitions": ["<partition-1>", "<partition-2>"]},
)
```
#### On a Grid Engine Cluster
```python
resource = MLEResource(
resource_name="sge-cluster",
monitor_config={"queues": ["<queue-1>", "<queue-2>"]}
)
```
## The `MLEDashboard`: Dashboard Visualization 🎞️
```python
from mle_monitor import MLEDashboard
# Instantiate dashboard with protocol and resource
dashboard = MLEDashboard(protocol, resource)
# Get a static snapshot of the protocol & resource utilisation printed in console
dashboard.snapshot()
# Run monitoring in while loop - dashboard
dashboard.live()
```
## Installation ⏳
A PyPI installation is available via:
```
pip install mle-monitor
```
If you want to get the most recent commit, please install directly from the repository:
```
pip install git+https://github.com/mle-infrastructure/mle-monitor.git@main
```
### Citing the MLE-Infrastructure ✏️
If you use `mle-monitor` in your research, please cite it as follows:
```
@software{mle_infrastructure2021github,
author = {Robert Tjarko Lange},
title = {{MLE-Infrastructure}: A Set of Lightweight Tools for Distributed Machine Learning Experimentation},
url = {http://github.com/mle-infrastructure},
year = {2021},
}
```
## Development 👷
You can run the test suite via `python -m pytest -vv tests/`. If you find a bug or are missing your favourite feature, feel free to create an issue and/or start [contributing](CONTRIBUTING.md) 🤗.
| PypiClean |
/PyContracts-1.8.12.tar.gz/PyContracts-1.8.12/docs/source/api/contracts.useful_contracts.rst | useful_contracts Package
========================
:mod:`useful_contracts` Package
-------------------------------
.. automodule:: contracts.useful_contracts
:members:
:undoc-members:
:show-inheritance:
:mod:`numbers` Module
---------------------
.. automodule:: contracts.useful_contracts.numbers
:members:
:undoc-members:
:show-inheritance:
:mod:`numpy_specific` Module
----------------------------
.. automodule:: contracts.useful_contracts.numpy_specific
:members:
:undoc-members:
:show-inheritance:
| PypiClean |
/kiwi-blockchain-1.0.3.tar.gz/kiwi-blockchain-1.0.3/chia/cmds/show.py | from typing import Any, Optional
import click
async def show_async(
rpc_port: Optional[int],
state: bool,
show_connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import aiohttp
import time
import traceback
from time import localtime, struct_time
from typing import List, Optional
from chia.consensus.block_record import BlockRecord
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.server.outbound_message import NodeType
from chia.types.full_block import FullBlock
from chia.util.bech32m import encode_puzzle_hash
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.misc import format_bytes
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
if rpc_port is None:
rpc_port = config["full_node"]["rpc_port"]
client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
if state:
blockchain_state = await client.get_blockchain_state()
if blockchain_state is None:
print("There is no blockchain found yet. Try again shortly")
return None
peak: Optional[BlockRecord] = blockchain_state["peak"]
difficulty = blockchain_state["difficulty"]
sub_slot_iters = blockchain_state["sub_slot_iters"]
synced = blockchain_state["sync"]["synced"]
sync_mode = blockchain_state["sync"]["sync_mode"]
total_iters = peak.total_iters if peak is not None else 0
num_blocks: int = 10
if synced:
print("Current Blockchain Status: Full Node Synced")
print("\nPeak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None and sync_mode:
sync_max_block = blockchain_state["sync"]["sync_tip_height"]
sync_current_block = blockchain_state["sync"]["sync_progress_height"]
print(f"Current Blockchain Status: Syncing {sync_current_block}/{sync_max_block}.")
print("Peak: Hash:", peak.header_hash if peak is not None else "")
elif peak is not None:
print(f"Current Blockchain Status: Not Synced. Peak height: {peak.height}")
else:
print("\nSearching for an initial chain\n")
print("You may be able to expedite with 'kiwi show -a host:port' using a known node.\n")
if peak is not None:
if peak.is_transaction_block:
peak_time = peak.timestamp
else:
peak_hash = peak.header_hash
curr = await client.get_block_record(peak_hash)
while curr is not None and not curr.is_transaction_block:
curr = await client.get_block_record(curr.prev_hash)
peak_time = curr.timestamp
peak_time_struct = struct_time(localtime(peak_time))
print(
" Time:",
f"{time.strftime('%a %b %d %Y %T %Z', peak_time_struct)}",
f" Height: {peak.height:>10}\n",
)
print("Estimated network space: ", end="")
print(format_bytes(blockchain_state["space"]))
print(f"Current difficulty: {difficulty}")
print(f"Current VDF sub_slot_iters: {sub_slot_iters}")
print("Total iterations since the start of the blockchain:", total_iters)
print("")
print(" Height: | Hash:")
added_blocks: List[BlockRecord] = []
curr = await client.get_block_record(peak.header_hash)
while curr is not None and len(added_blocks) < num_blocks and curr.height > 0:
added_blocks.append(curr)
curr = await client.get_block_record(curr.prev_hash)
for b in added_blocks:
print(f"{b.height:>9} | {b.header_hash}")
else:
print("Blockchain has no blocks yet")
# if called together with show_connections, leave a blank line
if show_connections:
print("")
if show_connections:
connections = await client.get_connections()
print("Connections:")
print(
"Type IP Ports NodeID Last Connect"
+ " MiB Up|Dwn"
)
for con in connections:
last_connect_tuple = struct_time(localtime(con["last_message_time"]))
last_connect = time.strftime("%b %d %T", last_connect_tuple)
mb_down = con["bytes_read"] / (1024 * 1024)
mb_up = con["bytes_written"] / (1024 * 1024)
host = con["peer_host"]
# Strip IPv6 brackets
if host[0] == "[":
host = host[1:39]
# Nodetype length is 9 because INTRODUCER will be deprecated
if NodeType(con["type"]) is NodeType.FULL_NODE:
peak_height = con["peak_height"]
peak_hash = con["peak_hash"]
if peak_hash is None:
peak_hash = "No Info"
if peak_height is None:
peak_height = 0
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
f"\n "
f"-SB Height: {peak_height:8.0f} -Hash: {peak_hash[2:10]}..."
)
else:
con_str = (
f"{NodeType(con['type']).name:9} {host:38} "
f"{con['peer_port']:5}/{con['peer_server_port']:<5}"
f" {con['node_id'].hex()[:8]}... "
f"{last_connect} "
f"{mb_up:7.1f}|{mb_down:<7.1f}"
)
print(con_str)
# if called together with state, leave a blank line
if state:
print("")
if exit_node:
node_stop = await client.stop_node()
print(node_stop, "Node stopped")
if add_connection:
if ":" not in add_connection:
print("Enter a valid IP and port in the following format: 10.5.4.3:8000")
else:
ip, port = (
":".join(add_connection.split(":")[:-1]),
add_connection.split(":")[-1],
)
print(f"Connecting to {ip}, {port}")
try:
await client.open_connection(ip, int(port))
except Exception:
print(f"Failed to connect to {ip}:{port}")
if remove_connection:
result_txt = ""
if len(remove_connection) != 8:
result_txt = "Invalid NodeID. Do not include '.'"
else:
connections = await client.get_connections()
for con in connections:
if remove_connection == con["node_id"].hex()[:8]:
print("Attempting to disconnect", "NodeID", remove_connection)
try:
await client.close_connection(con["node_id"])
except Exception:
result_txt = f"Failed to disconnect NodeID {remove_connection}"
else:
result_txt = f"NodeID {remove_connection}... {NodeType(con['type']).name} "
f"{con['peer_host']} disconnected"
elif result_txt == "":
result_txt = f"NodeID {remove_connection}... not found"
print(result_txt)
if block_header_hash_by_height != "":
block_header = await client.get_block_record_by_height(block_header_hash_by_height)
if block_header is not None:
print(f"Header hash of block {block_header_hash_by_height}: " f"{block_header.header_hash.hex()}")
else:
print("Block height", block_header_hash_by_height, "not found")
if block_by_header_hash != "":
block: Optional[BlockRecord] = await client.get_block_record(hexstr_to_bytes(block_by_header_hash))
full_block: Optional[FullBlock] = await client.get_block(hexstr_to_bytes(block_by_header_hash))
# Would like to have a verbose flag for this
if block is not None:
assert full_block is not None
prev_b = await client.get_block_record(block.prev_hash)
if prev_b is not None:
difficulty = block.weight - prev_b.weight
else:
difficulty = block.weight
if block.is_transaction_block:
assert full_block.transactions_info is not None
block_time = struct_time(
localtime(
full_block.foliage_transaction_block.timestamp
if full_block.foliage_transaction_block
else None
)
)
block_time_string = time.strftime("%a %b %d %Y %T %Z", block_time)
cost = str(full_block.transactions_info.cost)
tx_filter_hash = "Not a transaction block"
if full_block.foliage_transaction_block:
tx_filter_hash = full_block.foliage_transaction_block.filter_hash
fees: Any = block.fees
else:
block_time_string = "Not a transaction block"
cost = "Not a transaction block"
tx_filter_hash = "Not a transaction block"
fees = "Not a transaction block"
address_prefix = config["network_overrides"]["config"][config["selected_network"]]["address_prefix"]
farmer_address = encode_puzzle_hash(block.farmer_puzzle_hash, address_prefix)
pool_address = encode_puzzle_hash(block.pool_puzzle_hash, address_prefix)
pool_pk = (
full_block.reward_chain_block.proof_of_space.pool_public_key
if full_block.reward_chain_block.proof_of_space.pool_public_key is not None
else "Pay to pool puzzle hash"
)
print(
f"Block Height {block.height}\n"
f"Header Hash 0x{block.header_hash.hex()}\n"
f"Timestamp {block_time_string}\n"
f"Weight {block.weight}\n"
f"Previous Block 0x{block.prev_hash.hex()}\n"
f"Difficulty {difficulty}\n"
f"Sub-slot iters {block.sub_slot_iters}\n"
f"Cost {cost}\n"
f"Total VDF Iterations {block.total_iters}\n"
f"Is a Transaction Block?{block.is_transaction_block}\n"
f"Deficit {block.deficit}\n"
f"PoSpace 'k' Size {full_block.reward_chain_block.proof_of_space.size}\n"
f"Plot Public Key 0x{full_block.reward_chain_block.proof_of_space.plot_public_key}\n"
f"Pool Public Key {pool_pk}\n"
f"Tx Filter Hash {tx_filter_hash}\n"
f"Farmer Address {farmer_address}\n"
f"Pool Address {pool_address}\n"
f"Fees Amount {fees}\n"
)
else:
print("Block with header hash", block_header_hash_by_height, "not found")
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(f"Connection error. Check if full node rpc is running at {rpc_port}")
print("This is normal if full node is still starting up")
else:
tb = traceback.format_exc()
print(f"Exception from 'show' {tb}")
client.close()
await client.await_closed()
@click.command("show", short_help="Show node information")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=None,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-s", "--state", help="Show the current state of the blockchain", is_flag=True, type=bool, default=False)
@click.option(
"-c", "--connections", help="List nodes connected to this Full Node", is_flag=True, type=bool, default=False
)
@click.option("-e", "--exit-node", help="Shut down the running Full Node", is_flag=True, default=False)
@click.option("-a", "--add-connection", help="Connect to another Full Node by ip:port", type=str, default="")
@click.option(
"-r", "--remove-connection", help="Remove a Node by the first 8 characters of NodeID", type=str, default=""
)
@click.option(
"-bh", "--block-header-hash-by-height", help="Look up a block header hash by block height", type=str, default=""
)
@click.option("-b", "--block-by-header-hash", help="Look up a block by block header hash", type=str, default="")
def show_cmd(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
state: bool,
connections: bool,
exit_node: bool,
add_connection: str,
remove_connection: str,
block_header_hash_by_height: str,
block_by_header_hash: str,
) -> None:
import asyncio
asyncio.run(
show_async(
rpc_port,
state,
connections,
exit_node,
add_connection,
remove_connection,
block_header_hash_by_height,
block_by_header_hash,
)
) | PypiClean |
/katalytic_images-0.9.4.tar.gz/katalytic_images-0.9.4/README.md | # katalytic-images [![version](https://img.shields.io/pypi/v/katalytic-images)](https://pypi.org/project/katalytic-images/) [![tests](https://gitlab.com/katalytic/katalytic-images/badges/main/pipeline.svg?key_text=tests&key_width=38)](https://gitlab.com/katalytic/katalytic-images/-/commits/main) [![coverage](https://gitlab.com/katalytic/katalytic-images/badges/main/coverage.svg)](https://gitlab.com/katalytic/katalytic-images/-/commits/main) [![docs](https://img.shields.io/readthedocs/katalytic-images.svg)](https://katalytic-images.readthedocs.io/en/latest/) [![license: MIT](https://img.shields.io/badge/license-MIT-green.svg)](https://opensource.org/licenses/MIT)
**Don't use in production yet.**
I will probably introduce backwards incompatible changes
Process images with less boilerplate and more flexibility than you've ever dreamed of.
- loading images in a specific colorspace (RGB, HSV, grayscale, and more) with a single command
- converting image colorspaces
- The functions accept numpy arrays and Pillow images as input and return the same type
- defining the shapes to be drawn on top of an image in a declarative way as a list of dicts and passing it to draw()
- Many more (TODO: Link to tocs)
## Example (TODO)
## Installation
First, install opencv
```bash
if [[ ! $(pip freeze | grep -Pi 'opencv') ]]; then
pip install opencv-python
fi
```
Then, install this package
```bash
pip install katalytic-images
```
## Roadmap
- make pillow an optional dependency.
- setup_load_image() should pick opencv if pillow is not available
- image thresholding and masking operations
- interactive data exploration widgets (maybe as part of another package)
- higher level API on top of opencv
- utilities for video processing
## Contributing
We appreciate any form of contribution, including but not limited to:
- **Code contributions**: Enhance our repository with your code and tests.
- **Feature suggestions**: Your ideas can shape the future development of our package.
- **Architectural improvements**: Help us optimize our system's design and API.
- **Bug fixes**: Improve user experience by reporting or resolving issues.
- **Documentation**: Help us maintain clear and up-to-date instructions for users.
| PypiClean |
/google-cloud-video-live-stream-1.5.1.tar.gz/google-cloud-video-live-stream-1.5.1/google/cloud/video/live_stream_v1/services/livestream_service/transports/grpc.py | from typing import Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers, operations_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
import grpc # type: ignore
from google.cloud.video.live_stream_v1.types import resources, service
from .base import DEFAULT_CLIENT_INFO, LivestreamServiceTransport
class LivestreamServiceGrpcTransport(LivestreamServiceTransport):
"""gRPC backend transport for LivestreamService.
Using Live Stream API, you can generate live streams in the
various renditions and streaming formats. The streaming format
include HTTP Live Streaming (HLS) and Dynamic Adaptive Streaming
over HTTP (DASH). You can send a source stream in the various
ways, including Real-Time Messaging Protocol (RTMP) and Secure
Reliable Transport (SRT).
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "livestream.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[grpc.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "livestream.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service."""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_channel_(
self,
) -> Callable[[service.CreateChannelRequest], operations_pb2.Operation]:
r"""Return a callable for the create channel method over gRPC.
Creates a channel with the provided unique ID in the
specified region.
Returns:
Callable[[~.CreateChannelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_channel_" not in self._stubs:
self._stubs["create_channel_"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/CreateChannel",
request_serializer=service.CreateChannelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_channel_"]
@property
def list_channels(
self,
) -> Callable[[service.ListChannelsRequest], service.ListChannelsResponse]:
r"""Return a callable for the list channels method over gRPC.
Returns a list of all channels in the specified
region.
Returns:
Callable[[~.ListChannelsRequest],
~.ListChannelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_channels" not in self._stubs:
self._stubs["list_channels"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/ListChannels",
request_serializer=service.ListChannelsRequest.serialize,
response_deserializer=service.ListChannelsResponse.deserialize,
)
return self._stubs["list_channels"]
@property
def get_channel(self) -> Callable[[service.GetChannelRequest], resources.Channel]:
r"""Return a callable for the get channel method over gRPC.
Returns the specified channel.
Returns:
Callable[[~.GetChannelRequest],
~.Channel]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_channel" not in self._stubs:
self._stubs["get_channel"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/GetChannel",
request_serializer=service.GetChannelRequest.serialize,
response_deserializer=resources.Channel.deserialize,
)
return self._stubs["get_channel"]
@property
def delete_channel(
self,
) -> Callable[[service.DeleteChannelRequest], operations_pb2.Operation]:
r"""Return a callable for the delete channel method over gRPC.
Deletes the specified channel.
Returns:
Callable[[~.DeleteChannelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_channel" not in self._stubs:
self._stubs["delete_channel"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/DeleteChannel",
request_serializer=service.DeleteChannelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_channel"]
@property
def update_channel(
self,
) -> Callable[[service.UpdateChannelRequest], operations_pb2.Operation]:
r"""Return a callable for the update channel method over gRPC.
Updates the specified channel.
Returns:
Callable[[~.UpdateChannelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_channel" not in self._stubs:
self._stubs["update_channel"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/UpdateChannel",
request_serializer=service.UpdateChannelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_channel"]
@property
def start_channel(
self,
) -> Callable[[service.StartChannelRequest], operations_pb2.Operation]:
r"""Return a callable for the start channel method over gRPC.
Starts the specified channel. Part of the video
pipeline will be created only when the StartChannel
request is received by the server.
Returns:
Callable[[~.StartChannelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "start_channel" not in self._stubs:
self._stubs["start_channel"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/StartChannel",
request_serializer=service.StartChannelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["start_channel"]
@property
def stop_channel(
self,
) -> Callable[[service.StopChannelRequest], operations_pb2.Operation]:
r"""Return a callable for the stop channel method over gRPC.
Stops the specified channel. Part of the video
pipeline will be released when the StopChannel request
is received by the server.
Returns:
Callable[[~.StopChannelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "stop_channel" not in self._stubs:
self._stubs["stop_channel"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/StopChannel",
request_serializer=service.StopChannelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["stop_channel"]
@property
def create_input(
self,
) -> Callable[[service.CreateInputRequest], operations_pb2.Operation]:
r"""Return a callable for the create input method over gRPC.
Creates an input with the provided unique ID in the
specified region.
Returns:
Callable[[~.CreateInputRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_input" not in self._stubs:
self._stubs["create_input"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/CreateInput",
request_serializer=service.CreateInputRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_input"]
@property
def list_inputs(
self,
) -> Callable[[service.ListInputsRequest], service.ListInputsResponse]:
r"""Return a callable for the list inputs method over gRPC.
Returns a list of all inputs in the specified region.
Returns:
Callable[[~.ListInputsRequest],
~.ListInputsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_inputs" not in self._stubs:
self._stubs["list_inputs"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/ListInputs",
request_serializer=service.ListInputsRequest.serialize,
response_deserializer=service.ListInputsResponse.deserialize,
)
return self._stubs["list_inputs"]
@property
def get_input(self) -> Callable[[service.GetInputRequest], resources.Input]:
r"""Return a callable for the get input method over gRPC.
Returns the specified input.
Returns:
Callable[[~.GetInputRequest],
~.Input]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_input" not in self._stubs:
self._stubs["get_input"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/GetInput",
request_serializer=service.GetInputRequest.serialize,
response_deserializer=resources.Input.deserialize,
)
return self._stubs["get_input"]
@property
def delete_input(
self,
) -> Callable[[service.DeleteInputRequest], operations_pb2.Operation]:
r"""Return a callable for the delete input method over gRPC.
Deletes the specified input.
Returns:
Callable[[~.DeleteInputRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_input" not in self._stubs:
self._stubs["delete_input"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/DeleteInput",
request_serializer=service.DeleteInputRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_input"]
@property
def update_input(
self,
) -> Callable[[service.UpdateInputRequest], operations_pb2.Operation]:
r"""Return a callable for the update input method over gRPC.
Updates the specified input.
Returns:
Callable[[~.UpdateInputRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_input" not in self._stubs:
self._stubs["update_input"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/UpdateInput",
request_serializer=service.UpdateInputRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_input"]
@property
def create_event(self) -> Callable[[service.CreateEventRequest], resources.Event]:
r"""Return a callable for the create event method over gRPC.
Creates an event with the provided unique ID in the
specified channel.
Returns:
Callable[[~.CreateEventRequest],
~.Event]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_event" not in self._stubs:
self._stubs["create_event"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/CreateEvent",
request_serializer=service.CreateEventRequest.serialize,
response_deserializer=resources.Event.deserialize,
)
return self._stubs["create_event"]
@property
def list_events(
self,
) -> Callable[[service.ListEventsRequest], service.ListEventsResponse]:
r"""Return a callable for the list events method over gRPC.
Returns a list of all events in the specified
channel.
Returns:
Callable[[~.ListEventsRequest],
~.ListEventsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_events" not in self._stubs:
self._stubs["list_events"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/ListEvents",
request_serializer=service.ListEventsRequest.serialize,
response_deserializer=service.ListEventsResponse.deserialize,
)
return self._stubs["list_events"]
@property
def get_event(self) -> Callable[[service.GetEventRequest], resources.Event]:
r"""Return a callable for the get event method over gRPC.
Returns the specified event.
Returns:
Callable[[~.GetEventRequest],
~.Event]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_event" not in self._stubs:
self._stubs["get_event"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/GetEvent",
request_serializer=service.GetEventRequest.serialize,
response_deserializer=resources.Event.deserialize,
)
return self._stubs["get_event"]
@property
def delete_event(self) -> Callable[[service.DeleteEventRequest], empty_pb2.Empty]:
r"""Return a callable for the delete event method over gRPC.
Deletes the specified event.
Returns:
Callable[[~.DeleteEventRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_event" not in self._stubs:
self._stubs["delete_event"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/DeleteEvent",
request_serializer=service.DeleteEventRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_event"]
@property
def create_asset(
self,
) -> Callable[[service.CreateAssetRequest], operations_pb2.Operation]:
r"""Return a callable for the create asset method over gRPC.
Creates a Asset with the provided unique ID in the
specified region.
Returns:
Callable[[~.CreateAssetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_asset" not in self._stubs:
self._stubs["create_asset"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/CreateAsset",
request_serializer=service.CreateAssetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_asset"]
@property
def delete_asset(
self,
) -> Callable[[service.DeleteAssetRequest], operations_pb2.Operation]:
r"""Return a callable for the delete asset method over gRPC.
Deletes the specified asset if it is not used.
Returns:
Callable[[~.DeleteAssetRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_asset" not in self._stubs:
self._stubs["delete_asset"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/DeleteAsset",
request_serializer=service.DeleteAssetRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_asset"]
@property
def get_asset(self) -> Callable[[service.GetAssetRequest], resources.Asset]:
r"""Return a callable for the get asset method over gRPC.
Returns the specified asset.
Returns:
Callable[[~.GetAssetRequest],
~.Asset]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_asset" not in self._stubs:
self._stubs["get_asset"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/GetAsset",
request_serializer=service.GetAssetRequest.serialize,
response_deserializer=resources.Asset.deserialize,
)
return self._stubs["get_asset"]
@property
def list_assets(
self,
) -> Callable[[service.ListAssetsRequest], service.ListAssetsResponse]:
r"""Return a callable for the list assets method over gRPC.
Returns a list of all assets in the specified region.
Returns:
Callable[[~.ListAssetsRequest],
~.ListAssetsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_assets" not in self._stubs:
self._stubs["list_assets"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/ListAssets",
request_serializer=service.ListAssetsRequest.serialize,
response_deserializer=service.ListAssetsResponse.deserialize,
)
return self._stubs["list_assets"]
@property
def get_pool(self) -> Callable[[service.GetPoolRequest], resources.Pool]:
r"""Return a callable for the get pool method over gRPC.
Returns the specified pool.
Returns:
Callable[[~.GetPoolRequest],
~.Pool]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_pool" not in self._stubs:
self._stubs["get_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/GetPool",
request_serializer=service.GetPoolRequest.serialize,
response_deserializer=resources.Pool.deserialize,
)
return self._stubs["get_pool"]
@property
def update_pool(
self,
) -> Callable[[service.UpdatePoolRequest], operations_pb2.Operation]:
r"""Return a callable for the update pool method over gRPC.
Updates the specified pool.
Returns:
Callable[[~.UpdatePoolRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_pool" not in self._stubs:
self._stubs["update_pool"] = self.grpc_channel.unary_unary(
"/google.cloud.video.livestream.v1.LivestreamService/UpdatePool",
request_serializer=service.UpdatePoolRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["update_pool"]
def close(self):
self.grpc_channel.close()
@property
def delete_operation(
self,
) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
r"""Return a callable for the delete_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["delete_operation"]
@property
def cancel_operation(
self,
) -> Callable[[operations_pb2.CancelOperationRequest], None]:
r"""Return a callable for the cancel_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "cancel_operation" not in self._stubs:
self._stubs["cancel_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/CancelOperation",
request_serializer=operations_pb2.CancelOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["cancel_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
@property
def list_locations(
self,
) -> Callable[
[locations_pb2.ListLocationsRequest], locations_pb2.ListLocationsResponse
]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_locations" not in self._stubs:
self._stubs["list_locations"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/ListLocations",
request_serializer=locations_pb2.ListLocationsRequest.SerializeToString,
response_deserializer=locations_pb2.ListLocationsResponse.FromString,
)
return self._stubs["list_locations"]
@property
def get_location(
self,
) -> Callable[[locations_pb2.GetLocationRequest], locations_pb2.Location]:
r"""Return a callable for the list locations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_location" not in self._stubs:
self._stubs["get_location"] = self.grpc_channel.unary_unary(
"/google.cloud.location.Locations/GetLocation",
request_serializer=locations_pb2.GetLocationRequest.SerializeToString,
response_deserializer=locations_pb2.Location.FromString,
)
return self._stubs["get_location"]
@property
def kind(self) -> str:
return "grpc"
__all__ = ("LivestreamServiceGrpcTransport",) | PypiClean |
/zabbix-enums-1.60.1.tar.gz/zabbix-enums-1.60.1/src/zabbix_enums/z52/screenitem.py | from zabbix_enums import _ZabbixEnum
class ScreenItemResourceType(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Type of screen item.
"""
GRAPH = 0
SIMPLE_GRAPH = 1
MAP = 2
PLAIN_TEXT = 3
HOSTS_INFO = 4
TRIGGERS_INFO = 5
SYSTEM_INFORMATION = 6
CLOCK = 7
TRIGGERS_OVERVIEW = 9
DATA_OVERVIEW = 10
URL = 11
HISTORY_OF_ACTIONS = 12
HISTORY_OF_EVENTS = 13
LATEST_HOST_GROUP_ISSUES = 14
PROBLEMS_BY_SEVERITY = 15
LATEST_HOST_ISSUES = 16
SIMPLE_GRAPH_PROTOTYPE = 19
GRAPH_PROTOTYPE = 20
class ScreenItemDynamic(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Whether the screen item is dynamic.
"""
NO = 0
YES = 1
class ScreenItemHalign(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Specifies how the screen item must be aligned horizontally in the cell.
"""
CENTER = 0
LEFT = 1
RIGHT = 2
class ScreenItemSortTriggersActions(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Order in which actions or triggers must be sorted.
Possible values for history of actions screen elements.
"""
TIME_ASCENDING = 3
TIME_DESCENDING = 4
TYPE_ASCENDING = 5
TYPE_DESCENDING = 6
STATUS_ASCENDING = 7
STATUS_DESCENDING = 8
RETRIES_LEFT_ASCENDING = 9
RETRIES_LEFT_DESCENDING = 10
RECIPIENT_ASCENDING = 11
RECIPIENT_DESCENDING = 12
class ScreenItemSortTriggersIssues(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Possible values for latest host group issues and latest host issues screen items:
"""
LAST_CHANGE_DESCENDING = 0
SEVERITY_DESCENDING = 1
HOST_ASCENDING = 2
class ScreenItemStyleOverview(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Screen item display option.
Possible values for data overview and triggers overview screen items:
"""
HOSTS_ON_LEFT = 0
HOSTS_ON_TOP = 1
class ScreenItemStyleInfo(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Screen item display option.
Possible values for hosts info and triggers info screen elements.
"""
HORIZONTAL_LAYOUT = 0
VERTICAL_LAYOUT = 1
class ScreenItemStyleClock(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Screen item display option.
Possible values for clock screen items.
"""
LOCAL_TIME = 0
SERVER_TIME = 1
HOST_TIME = 2
class ScreenItemStylePlaintext(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Screen item display option.
Possible values for plain text screen items.
"""
PLAIN = 0
HTML = 1
class ScreenItemValign(_ZabbixEnum):
"""
https://www.zabbix.com/documentation/5.2/en/manual/api/reference/screenitem/object#screen-item
Specifies how the screen item must be aligned vertically in the cell.
"""
MIDDLE = 0
TOP = 1
BOTTOM = 2 | PypiClean |
/d4-0.1.10.tar.gz/d4-0.1.10/README.md | # D4: Dock to Develop Dynamic Dockerfile
## abstract
D4 is the tool to develop generic Dockerfile. D4 will provide the easier method to build container image from Dockerfile and test the Dockerfile. By this method, you can realize TDD(Test Driven Development) of Dockerfile easily.
## requirements
D4 requires the following software.
* [docker](https://www.docker.com/)
* [container-structure-test](https://github.com/GoogleContainerTools/container-structure-test)
* python 3.4+
* python packages
* docker
* pyyaml
## install
```
$ pip install d4
```
## quick start
1. Create project `httpd`
```
$ d4 init httpd
$ cd httpd
```
2. Describe image name and registry server in `common.yaml`
```
image_name: <your dockerhub username>/httpd:latest
registry_server: docker.io
```
3. Describe test code in `tests/config.yaml`
```
schemaVersion: '2.0.0'
commandTests:
- name: "check httpd pkg"
command: "rpm"
args:
- "-qa"
expectedOutput:
- "httpd-.*"
metadataTest:
exposedPorts:
- "80"
cmd:
- "httpd"
- "-DFOREGROUND"
```
4. Describe mock in `Dockerfile`
```
FROM docker.io/centos:latest
```
5. Run develop subcommand and confirm test fails
```
$ d4 develop
```
6. Describe implementation in `Dockerfile`
```
FROM docker.io/centos:latest
RUN yum install -y httpd
EXPOSE 80
CMD ["httpd", "-DFOREGROUND"]
```
7. Run develop subcommand and confirm test succeeds
```
$ d4 develop
```
8. Release image to your repository of dockerhub
```
$ d4 release -u <your dockerhub username> -p <your dockerhub password>
```
## usage
### project directory architecture
d4 project need to have the following directory architecture;
```
(project name)
|-- Dockerfile
|-- build.yaml
|-- common.yaml
`-- tests
|-- config_1.yaml
|-- ...
`-- config_N.yaml
```
* Dockerfile
The Dockerfile which belongs to the project. For this Dockerfile, container image will be built and test will be executed.
* build.yaml
The yaml file which has arguments to be used by building image. All arguments of the Dockerfile should be described in this file. for example;
```
args:
ARG_1: "VALUE_1"
ARG_2: "VALUE_2"
ARG_3: "VALUE_3"
```
* common.yaml
The yaml file which has parameters to be used by both building image and testing image. for example;
```
image_name: bbrfkr0129/httpd:latest
registry_server: docker.io
```
* tests/\<yaml config file\>
The yaml files which has tests to be used by testing image. These tests in the yaml files need to be written as container-structure-test can be processed.
### TDD method with D4
1. Create project with subcommand `init`. By this subcommand, basis directory architecture and sample files are created.
```
$ d4 init tdd_project
$ cd tdd_project
$ tree . --charset=C
.
|-- Dockerfile
|-- build.yaml
|-- common.yaml
`-- tests
`-- config.yaml
```
2. Specify developed image name and registry server pushed developed image in `common.yaml`;
```
$ cat <<EOF > common.yaml
image_name: <your dockerhub username>/tdd_image:latest
registry_server: docker.io
EOF
```
3. Write mock in `Dockerfile` and `build.yaml`. Implementation is not done yet.
```
$ cat <<EOF > Dockerfile
FROM <base image name>:<tag>
EOF
$ cat <<EOF > build.yaml
args:
EOF
```
4. Write test code according to the syntax of container-structure-test in `tests/config.yaml`. In TDD, test code is written before writing `Dockerfile`.
5. Execute test with subcommand `develop`. By this subcommand, mock container image is built and tested. Then tests should be failed.
```
$ d4 develop
```
6. Write implementation of `Dockerfile` and `build.yaml`. In `build.yaml`, The arguments in `ARG` statements of `Dockerfile`should be written;
```
args:
ARG_1: "VALUE_1"
ARG_2: "VALUE_2"
ARG_3: "VALUE_3"
```
7. Execute build and test with subcommand `develop`. By this subcommand, implementation of `Dockerfile` is applied, and then generated container image is tested. All tests should be passed.
```
$ d4 develop
```
8. Repeat procedures 4.-7.. until required container image is got.
9. Release got container image to registry server with subcommand `release`. By this subcommand, final build and test will be processed, then pushed to registry server specified in `common.yaml`.
```
$ d4 release
```
10. Run container from pushed container image!
```
$ docker run <your dockerhub username>/tdd_image:latest
```
### valid subcommands
* d4 init
Create and initialize project.
* d4 build
Only build process runs.
* d4 test
Only test process runs.
* d4 login
Only login into registry server specified in `common.yaml`.
* d4 push
Only push container image specified in `common.yaml`.
* d4 develop
build and test processes run.
* d4 release
build, test, login and push processes run.
| PypiClean |
/clickatell-0.1.2.tar.gz/clickatell-0.1.2/README.md | Clickatell Python Library
================================
Master: [![Build Status](https://secure.travis-ci.org/arcturial/clickatell-python.png?branch=master)](http://travis-ci.org/arcturial/clickatell-python)
This library allows easy access to connecting the [Clickatell's](http://www.clickatell.com) different messenging API's.
The library supports both version **2** and **3** of Python.
1. Installation
------------------
You can install this library via PIP as part of you requirements file.
```
pip install clickatell
```
[Clickatell Python PyPI](https://pypi.python.org/pypi?name=clickatell&version=0.0.1&:action=display)
2. Usage
------------------
The library currently supports the `Http` and `Rest` protocols.
### HTTP API
``` python
from clickatell.http import Http
clickatell = Http(username, password, apiID)
response = clickatell.sendMessage(['1111111111'], "My Message")
for entry in response:
print(entry['id'])
# entry['id']
# entry['destination']
# entry['error']
# entry['errorCode']
```
### REST API
``` python
from clickatell.rest import Rest
clickatell = Rest(token);
response = clickatell.sendMessage(['1111111111'], "My Message")
for entry in response:
print(entry['id'])
# entry['id']
# entry['destination']
# entry['error']
# entry['errorCode']
```
### Sending to multiple numbers
The `sendMessage` call `to` parameter can take an array of numbers. If you specify only a single number like `clickatell.sendMessage(1111111111, "Message")` the library will automatically convert it to an array for your convenience.
3. Supported API calls
------------------
The available calls are defined in the `clickatell.Transport` interface.
``` python
def sendMessage(self, to, message, extra={})
def getBalance(self)
def stopMessage(self, apiMsgId)
def queryMessage(self, apiMsgId)
def routeCoverage(self, msisdn)
def getMessageCharge(self, apiMsgId)
```
4. Dealing with extra parameters in sendMessage
--------------------------------------
For usability purposes the `sendMessage` call focuses on the recipients and the content. In order to specify and of the additional parameters defined
in the [Clickatell document](http://www.clickatell.com), you can use the `extra` parameter and pass them as a dictionary. | PypiClean |
/odoo14_addon_invader_payment_adyen-14.0.1.0.4-py3-none-any.whl/odoo/addons/invader_payment_adyen/services/payment_adyen.py |
import logging
from odoo.addons.base_rest import restapi
from odoo.addons.base_rest.components.service import (
skip_secure_response,
to_bool,
to_int,
)
from odoo.addons.component.core import AbstractComponent
_logger = logging.getLogger(__name__)
try:
from cerberus import Validator
except ImportError as err:
_logger.debug(err)
# map Adyen transaction statuses to Odoo payment.transaction statuses
ADYEN_TRANSACTION_STATUSES = {
"Authorised": "done",
"Refused": "error",
"Cancelled": "cancel",
"Received": "pending",
"RedirectShopper": "draft",
}
APP_NAME = "INVADER"
payment_completion_details = [
"MD",
"PaReq",
"PaRes",
"billingToken",
"cupsecureplus.smscode",
"facilitatorAccessToken",
"oneTimePasscode",
"orderID",
"payerID",
"payload",
"paymentID",
"paymentStatus",
"redirectResult",
"returnUrlQueryString",
"threeds2.challengeResult",
"threeds2.fingerprint",
]
def filter_completion_details(details):
"""
Filter authorized details in order to pass just those ones to the API
:param details: The details values as a dict
:type details: dict
"""
if not details:
return
unknown_params = []
new_details = {}
for key, value in details.items():
if key not in payment_completion_details:
unknown_params.append(key)
else:
new_details[key] = value
if unknown_params:
# Log unknown keys
message = (
"PaymentCompletionDetails contains unknown params: %s"
% ",".join([str(param) for param in unknown_params])
)
_logger.info(message)
return new_details
class PaymentServiceAdyen(AbstractComponent):
_name = "payment.service.adyen"
_inherit = "base.rest.service"
_usage = "payment_adyen"
_description = "REST Services for Adyen payments"
@property
def payment_service(self):
return self.component(usage="invader.payment")
def _get_adyen_service(self, transaction):
"""
Return an intialized library
:param transaction:
:return:
"""
return transaction._get_adyen_service()
def _validator_paymentMethods(self):
res = self.payment_service._invader_get_target_validator()
res.update(
{
"payment_mode_id": {
"coerce": to_int,
"type": "integer",
"required": True,
}
}
)
return res
def _validator_return_paymentMethods(self):
return Validator(
{
"paymentMethods": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"name": {"type": "string"},
"type": {"type": "string"},
},
},
},
"transaction_id": {"type": "integer"},
},
allow_unknown=True,
)
def paymentMethods(self, target, **params):
"""
https://docs.adyen.com/checkout/drop-in-web#step-1-get-available-payment-methods
This is the service to provide Payment Methods depending on transaction
details and on partner country.
:return:
"""
payment_mode_id = params.get("payment_mode_id")
transaction_obj = self.env["payment.transaction"]
payable = self.payment_service._invader_find_payable_from_target(
target, **params
)
# Adyen part
acquirer = self.env["payment.acquirer"].browse(payment_mode_id)
self.payment_service._check_provider(acquirer, "adyen")
transaction = transaction_obj.create(
payable._invader_prepare_payment_transaction_data(acquirer)
)
request = self._prepare_adyen_payment_methods_request(transaction)
adyen = self._get_adyen_service(transaction)
response = adyen.checkout.payment_methods(request)
return self._generate_adyen_response(
response, payable, target, transaction, **params
)
def _prepare_adyen_payment_methods_request(self, transaction):
"""
https://docs.adyen.com/checkout/drop-in-web#step-1-get-available-payment-methods
Prepare retrieval of available payment methods
:param transaction:
:return:
"""
currency = transaction.currency_id
amount = transaction.amount
request = {
"merchantAccount": self._get_adyen_merchant_account(transaction),
"countryCode": transaction.partner_id.country_id.code,
"amount": {
"value": self._get_formatted_amount(transaction, amount),
"currency": currency.name,
},
"channel": "Web",
}
return request
def _validator_payments(self):
"""
Validator of payments service
target: see _invader_get_target_validator()
payment_mode_id: The payment mode used to pay
transaction_id: As the request to Adyen so not create some kind of
transaction 'token', we must pass the transaction_id to the flow
:return: dict
"""
res = self.payment_service._invader_get_target_validator()
res.update(
{
"payment_mode_id": {
"coerce": to_int,
"type": "integer",
"required": True,
},
"transaction_id": {
"coerce": to_int,
"type": "integer",
"required": True,
},
"payment_method": {"type": "dict", "required": True},
"return_url": {"type": "string", "required": True},
}
)
return res
def _validator_return_payments(self):
return Validator(
{
"redirect": {
"type": "dict",
"schema": {
"data": {"type": "dict"},
"url": {"type": "string"},
"method": {"type": "string"},
},
},
"resultCode": {"type": "string"},
"pspReference": {"type": "string"},
"details": {"type": "list"},
"action": {"type": "dict"},
},
allow_unknown=True,
)
def payments(
self,
target,
transaction_id,
payment_mode_id,
payment_method,
return_url,
**params
):
"""
https://docs.adyen.com/checkout/drop-in-web#step-3-make-a-payment
:param target: the payable (e.g.: "current_cart")
:param transaction_id: the previously created transaction
:param payment_mode_id: the payment mode
:param payment_method: the Adyen payment method (bcmc, scheme, ...)
:param return_url: the url to return to (in case of redirect)
:param params: other parameters
:return:
"""
transaction_obj = self.env["payment.transaction"]
payable = self.payment_service._invader_find_payable_from_target(
target, **params
)
acquirer = self.env["payment.acquirer"].browse(payment_mode_id)
self.payment_service._check_provider(acquirer, "adyen")
transaction = transaction_obj.browse(transaction_id)
transaction.return_url = return_url
request = self._prepare_adyen_payments_request(
transaction, payment_method
)
adyen = self._get_adyen_service(transaction)
response = adyen.checkout.payments(request)
self._update_transaction_with_response(transaction, response)
result_code = response.message.get("resultCode")
if result_code == "Authorised":
transaction._set_transaction_done()
else:
transaction.write(
{"state": ADYEN_TRANSACTION_STATUSES[result_code]}
)
return self._generate_adyen_response(
response, payable, target, transaction, **params
)
def _prepare_adyen_payments_request(self, transaction, payment_method):
"""
https://docs.adyen.com/checkout/drop-in-web#step-3-make-a-payment
Prepare payments request
:param transaction:
:param payment_method:
:return:
"""
return transaction._prepare_adyen_payments_request(payment_method)
def _prepare_payment_details(self, transaction, **params):
"""
Remove specific entries from params and keep received by Adyen ones
Pass saved paymentData on transaction level to request
:param transaction:
:param params:
:return:
"""
params = filter_completion_details(params)
request = {
"paymentData": transaction.adyen_payment_data,
"details": params,
}
return request
def _validator_payment_details(self):
"""
Validator of payments service
target: see _allowed_payment_target()
payment_mode_id: The payment mode used to pay
:return: dict
"""
res = self.payment_service._invader_get_target_validator()
res.update(
{
"data": {"type": "dict", "required": True},
"transaction_id": {
"coerce": to_int,
"type": "integer",
"required": True,
},
}
)
return res
def _validator_return_payment_details(self):
return Validator(
{
"resultCode": {"type": "string"},
"pspReference": {"type": "string"},
"action": {"type": "dict"},
},
allow_unknown=True,
)
def payment_details(self, **params):
"""
https://docs.adyen.com/checkout/drop-in-web#step-5-additional-payment-details
Intended to manage onAddtionalDetails event from drop-in component
:param params:
:return:
"""
transaction_id = params.get("transaction_id")
transaction = self.env["payment.transaction"].browse(transaction_id)
adyen = self._get_adyen_service(transaction)
request = self._prepare_payment_details(transaction, **params)
response = adyen.checkout.payments_details(request)
return response
def _validator_paymentResult(self):
schema = {
"transaction_id": {
"coerce": to_int,
"type": "integer",
"required": True,
},
"success_redirect": {"type": "string"},
"cancel_redirect": {"type": "string"},
}
return Validator(schema, allow_unknown=True)
def _validator_return_paymentResult(self):
schema = {"redirect_to": {"type": "string"}}
return Validator(schema, allow_unknown=True)
@restapi.method(
[(["/paymentResult"], ["GET", "POST"])],
input_param=restapi.CerberusValidator("_validator_paymentResult"),
output_param=restapi.CerberusValidator(
"_validator_return_paymentResult"
),
)
def paymentResult(self, **params):
"""
This is intended to manage callbacks after a merchant redirection
(3DS, challenge, ...)
:param params:
:return:
"""
transaction = self.env["payment.transaction"].browse(
params.get("transaction_id")
)
# Response will be an AdyenResult object
adyen = self._get_adyen_service(transaction)
request = self._prepare_payment_details(transaction, **params)
response = adyen.checkout.payments_details(request)
self._update_transaction_with_response(transaction, response)
result_code = response.message.get("resultCode")
return_url = params.get("success_redirect")
notify = False
if result_code == "Authorised":
if transaction.state == "draft":
transaction._set_transaction_done()
else:
notify = True
elif result_code in ("Cancelled", "Refused"):
return_url = params.get("cancel_redirect")
transaction.write(
{"state": ADYEN_TRANSACTION_STATUSES[result_code]}
)
else:
transaction.write(
{"state": ADYEN_TRANSACTION_STATUSES[result_code]}
)
if notify:
# Payment state has been changed through another process
# (e.g. webhook). So, do the stuff for shopinvader_session
transaction._notify_state_changed_event()
res = {}
res["redirect_to"] = return_url
return res
def _get_formatted_amount(self, transaction, amount):
"""
The expected amount format by Adyen
:param amount: float
:return: int
"""
return self.env["invader.payable"]._get_formatted_amount(
transaction, amount
)
def _get_adyen_merchant_account(self, transaction):
"""
Return adyen merchant account depending on
payment.transaction recordset
:param transaction: payment.transaction
:return: string
"""
return transaction._get_adyen_merchant_account()
def _update_additional_details(self, response):
"""
Hook to be able to enrich transaction with response
additionalData
Deprecated. Should be filled on the transaction
:param vals:
:param response:
:return:
"""
_logger.warning(
"DEPRECATED: You should use _update_additional_details() on the transaction"
)
return {}
def _update_transaction_with_response(self, transaction, response):
"""
Update the transaction with Adyen response
:param transaction: payment.transaction
:param response: AdyenResult
:return:
"""
vals = {}
# Only for backward compatibility. Following line should be removed
vals.update(self._update_additional_details(response))
transaction.update(vals)
return transaction._update_with_adyen_response(response)
def _generate_adyen_response(
self, response, payable, target, transaction=False, **params
):
"""
This is the message returned to client
:param response: The response generated by Adyen call (AdyenResult)
:param payable: invader.payable record
:return: dict
"""
message = response.message
if transaction:
message.update({"transaction_id": transaction.id})
return message
def _validator_webhook(self):
schema = {
"live": {"coerce": to_bool, "required": True},
"notificationItems": {
"type": "list",
"schema": {
"type": "dict",
"schema": {
"NotificationRequestItem": {
"type": "dict",
"schema": {"additionalData": {"type": "dict"}},
}
},
},
},
}
return Validator(schema, allow_unknown=True)
def _validator_return_webhook(self):
"""
Returns nothing
:return:
"""
schema = {}
return Validator(schema, allow_unknown=True)
@skip_secure_response
def webhook(self, **params):
"""
Implement the webhook notification.
See: https://docs.adyen.com/development-resources/notifications
:param params:
:return:
"""
payment_acquirer_obj = self.env["payment.acquirer"]
for element in params.get("notificationItems"):
notification_item = element.get("NotificationRequestItem")
with self.env.cr.savepoint():
# Continue to handle items even if error
payment_acquirer_obj._handle_adyen_notification_item(
notification_item
)
return "[accepted]" | PypiClean |
/edx_enterprise-4.1.6-py3-none-any.whl/enterprise/decorators.py | import inspect
import warnings
from functools import wraps
from urllib.parse import parse_qs, urlencode, urlparse, urlunparse
from requests.utils import quote
from django.http import Http404
from django.shortcuts import redirect
from enterprise.utils import get_enterprise_customer_or_404, get_identity_provider
FRESH_LOGIN_PARAMETER = 'new_enterprise_login'
def deprecated(extra):
"""
Flag a method as deprecated.
:param extra: Extra text you'd like to display after the default text.
"""
def decorator(func):
"""
Return a decorated function that emits a deprecation warning on use.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
Wrap the function.
"""
message = 'You called the deprecated function `{function}`. {extra}'.format(
function=func.__name__,
extra=extra
)
frame = inspect.currentframe().f_back
warnings.warn_explicit(
message,
category=DeprecationWarning,
filename=inspect.getfile(frame.f_code),
lineno=frame.f_lineno
)
return func(*args, **kwargs)
return wrapper
return decorator
def ignore_warning(warning):
"""
Ignore any emitted warnings from a function.
:param warning: The category of warning to ignore.
"""
def decorator(func):
"""
Return a decorated function whose emitted warnings are ignored.
"""
@wraps(func)
def wrapper(*args, **kwargs):
"""
Wrap the function.
"""
warnings.simplefilter('ignore', warning)
return func(*args, **kwargs)
return wrapper
return decorator
def disable_for_loaddata(signal_handler):
"""
Use this decorator to turn off signal handlers when loading fixture data.
Django docs instruct to avoid further changes to the DB if raw=True as it might not be in a consistent state.
See https://docs.djangoproject.com/en/dev/ref/signals/#post-save
"""
# http://stackoverflow.com/a/15625121/882918
@wraps(signal_handler)
def wrapper(*args, **kwargs):
"""
Wrap the function.
"""
if kwargs.get('raw', False):
return
signal_handler(*args, **kwargs)
return wrapper
def enterprise_login_required(view):
"""
View decorator for allowing authenticated user with valid enterprise UUID.
This decorator requires enterprise identifier as a parameter
`enterprise_uuid`.
This decorator will throw 404 if no kwarg `enterprise_uuid` is provided to
the decorated view .
If there is no enterprise in database against the kwarg `enterprise_uuid`
or if the user is not authenticated then it will redirect the user to the
enterprise-linked SSO login page.
Usage::
@enterprise_login_required()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the decorator.
"""
if 'enterprise_uuid' not in kwargs:
raise Http404
enterprise_uuid = kwargs['enterprise_uuid']
enterprise_customer = get_enterprise_customer_or_404(enterprise_uuid)
query_params = request.GET
tpa_hint_param = query_params.get('tpa_hint')
# Now verify if the user is logged in. If user is not logged in then
# send the user to the login screen to sign in with an
# Enterprise-linked IdP and the pipeline will get them back here.
if not request.user.is_authenticated:
parsed_current_url = urlparse(request.get_full_path())
parsed_query_string = parse_qs(parsed_current_url.query)
tpa_hint = enterprise_customer.get_tpa_hint(tpa_hint_param)
if tpa_hint:
parsed_query_string.update({
'tpa_hint': tpa_hint,
})
parsed_query_string.update({
FRESH_LOGIN_PARAMETER: 'yes'
})
next_url = '{current_path}?{query_string}'.format(
current_path=quote(parsed_current_url.path),
query_string=urlencode(parsed_query_string, doseq=True)
)
return redirect(
'{login_url}?{params}'.format(
login_url='/login',
params=urlencode(
{'next': next_url}
)
)
)
# Otherwise, they can proceed to the original view.
return view(request, *args, **kwargs)
return wrapper
def force_fresh_session(view):
"""
View decorator which terminates stale TPA sessions.
This decorator forces the user to obtain a new session
the first time they access the decorated view. This prevents
TPA-authenticated users from hijacking the session of another
user who may have been previously logged in using the same
browser window.
This decorator should be used in conjunction with the
enterprise_login_required decorator.
Usage::
@enterprise_login_required
@force_fresh_session()
def my_view(request, enterprise_uuid):
# Some functionality ...
OR
class MyView(View):
...
@method_decorator(enterprise_login_required)
@method_decorator(force_fresh_session)
def get(self, request, enterprise_uuid):
# Some functionality ...
"""
@wraps(view)
def wrapper(request, *args, **kwargs):
"""
Wrap the function.
"""
if not request.GET.get(FRESH_LOGIN_PARAMETER):
# The enterprise_login_required decorator promises to set the fresh login URL
# parameter for this URL when it was the agent that initiated the login process;
# if that parameter isn't set, we can safely assume that the session is "stale";
# that isn't necessarily an issue, though. Redirect the user to
# log out and then come back here - the enterprise_login_required decorator will
# then take effect prior to us arriving back here again.
enterprise_customer = get_enterprise_customer_or_404(kwargs.get('enterprise_uuid'))
if not enterprise_customer.has_multiple_idps:
provider_id = enterprise_customer.identity_provider \
if enterprise_customer.identity_provider else ''
else:
provider_id = ''
sso_provider = get_identity_provider(provider_id)
if sso_provider:
# Parse the current request full path, quote just the path portion,
# then reconstruct the full path string.
# The path and query portions should be the only non-empty strings here.
scheme, netloc, path, params, query, fragment = urlparse(request.get_full_path())
redirect_url = urlunparse((scheme, netloc, quote(path), params, query, fragment))
return redirect(
'{logout_url}?{params}'.format(
logout_url='/logout',
params=urlencode(
{'redirect_url': redirect_url}
)
)
)
return view(request, *args, **kwargs)
return wrapper
def null_decorator(func):
"""
Use this decorator to stub out decorators for testing.
If we're unable to import social_core.pipeline.partial, which is the case in our CI platform,
we need to be able to wrap the function with something.
"""
return func | PypiClean |
/sat_mapping_cyborg_ai-0.0.37-py3-none-any.whl/sat_mapping/Lib/gsutil/gslib/vendored/boto/boto/cloudfront/distribution.py |
import uuid
import base64
import time
from boto.compat import six, json
from boto.cloudfront.identity import OriginAccessIdentity
from boto.cloudfront.object import Object, StreamingObject
from boto.cloudfront.signers import ActiveTrustedSigners, TrustedSigners
from boto.cloudfront.logging import LoggingInfo
from boto.cloudfront.origin import S3Origin, CustomOrigin
from boto.s3.acl import ACL
class DistributionConfig(object):
def __init__(self, connection=None, origin=None, enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, default_root_object=None,
logging=None):
"""
:param origin: Origin information to associate with the
distribution. If your distribution will use
an Amazon S3 origin, then this should be an
S3Origin object. If your distribution will use
a custom origin (non Amazon S3), then this
should be a CustomOrigin object.
:type origin: :class:`boto.cloudfront.origin.S3Origin` or
:class:`boto.cloudfront.origin.CustomOrigin`
:param enabled: Whether the distribution is enabled to accept
end user requests for content.
:type enabled: bool
:param caller_reference: A unique number that ensures the
request can't be replayed. If no
caller_reference is provided, boto
will generate a type 4 UUID for use
as the caller reference.
:type enabled: str
:param cnames: A CNAME alias you want to associate with this
distribution. You can have up to 10 CNAME aliases
per distribution.
:type enabled: array of str
:param comment: Any comments you want to include about the
distribution.
:type comment: str
:param trusted_signers: Specifies any AWS accounts you want to
permit to create signed URLs for private
content. If you want the distribution to
use signed URLs, this should contain a
TrustedSigners object; if you want the
distribution to use basic URLs, leave
this None.
:type trusted_signers: :class`boto.cloudfront.signers.TrustedSigners`
:param default_root_object: Designates a default root object.
Only include a DefaultRootObject value
if you are going to assign a default
root object for the distribution.
:type comment: str
:param logging: Controls whether access logs are written for the
distribution. If you want to turn on access logs,
this should contain a LoggingInfo object; otherwise
it should contain None.
:type logging: :class`boto.cloudfront.logging.LoggingInfo`
"""
self.connection = connection
self.origin = origin
self.enabled = enabled
if caller_reference:
self.caller_reference = caller_reference
else:
self.caller_reference = str(uuid.uuid4())
self.cnames = []
if cnames:
self.cnames = cnames
self.comment = comment
self.trusted_signers = trusted_signers
self.logging = logging
self.default_root_object = default_root_object
def __repr__(self):
return "DistributionConfig:%s" % self.origin
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<DistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self></Self>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
if self.default_root_object:
dro = self.default_root_object
s += '<DefaultRootObject>%s</DefaultRootObject>\n' % dro
s += '</DistributionConfig>\n'
return s
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'Logging':
self.logging = LoggingInfo()
return self.logging
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
else:
return None
def endElement(self, name, value, connection):
if name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'CallerReference':
self.caller_reference = value
elif name == 'DefaultRootObject':
self.default_root_object = value
else:
setattr(self, name, value)
class StreamingDistributionConfig(DistributionConfig):
def __init__(self, connection=None, origin='', enabled=False,
caller_reference='', cnames=None, comment='',
trusted_signers=None, logging=None):
super(StreamingDistributionConfig, self).__init__(connection=connection,
origin=origin, enabled=enabled,
caller_reference=caller_reference,
cnames=cnames, comment=comment,
trusted_signers=trusted_signers,
logging=logging)
def to_xml(self):
s = '<?xml version="1.0" encoding="UTF-8"?>\n'
s += '<StreamingDistributionConfig xmlns="http://cloudfront.amazonaws.com/doc/2010-07-15/">\n'
if self.origin:
s += self.origin.to_xml()
s += ' <CallerReference>%s</CallerReference>\n' % self.caller_reference
for cname in self.cnames:
s += ' <CNAME>%s</CNAME>\n' % cname
if self.comment:
s += ' <Comment>%s</Comment>\n' % self.comment
s += ' <Enabled>'
if self.enabled:
s += 'true'
else:
s += 'false'
s += '</Enabled>\n'
if self.trusted_signers:
s += '<TrustedSigners>\n'
for signer in self.trusted_signers:
if signer == 'Self':
s += ' <Self/>\n'
else:
s += ' <AwsAccountNumber>%s</AwsAccountNumber>\n' % signer
s += '</TrustedSigners>\n'
if self.logging:
s += '<Logging>\n'
s += ' <Bucket>%s</Bucket>\n' % self.logging.bucket
s += ' <Prefix>%s</Prefix>\n' % self.logging.prefix
s += '</Logging>\n'
s += '</StreamingDistributionConfig>\n'
return s
class DistributionSummary(object):
def __init__(self, connection=None, domain_name='', id='',
last_modified_time=None, status='', origin=None,
cname='', comment='', enabled=False):
self.connection = connection
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.origin = origin
self.enabled = enabled
self.cnames = []
if cname:
self.cnames.append(cname)
self.comment = comment
self.trusted_signers = None
self.etag = None
self.streaming = False
def __repr__(self):
return "DistributionSummary:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'TrustedSigners':
self.trusted_signers = TrustedSigners()
return self.trusted_signers
elif name == 'S3Origin':
self.origin = S3Origin()
return self.origin
elif name == 'CustomOrigin':
self.origin = CustomOrigin()
return self.origin
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'Status':
self.status = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'DomainName':
self.domain_name = value
elif name == 'Origin':
self.origin = value
elif name == 'CNAME':
self.cnames.append(value)
elif name == 'Comment':
self.comment = value
elif name == 'Enabled':
if value.lower() == 'true':
self.enabled = True
else:
self.enabled = False
elif name == 'StreamingDistributionSummary':
self.streaming = True
else:
setattr(self, name, value)
def get_distribution(self):
return self.connection.get_distribution_info(self.id)
class StreamingDistributionSummary(DistributionSummary):
def get_distribution(self):
return self.connection.get_streaming_distribution_info(self.id)
class Distribution(object):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
self.connection = connection
self.config = config
self.domain_name = domain_name
self.id = id
self.last_modified_time = last_modified_time
self.status = status
self.in_progress_invalidation_batches = 0
self.active_signers = None
self.etag = None
self._bucket = None
self._object_class = Object
def __repr__(self):
return "Distribution:%s" % self.domain_name
def startElement(self, name, attrs, connection):
if name == 'DistributionConfig':
self.config = DistributionConfig()
return self.config
elif name == 'ActiveTrustedSigners':
self.active_signers = ActiveTrustedSigners()
return self.active_signers
else:
return None
def endElement(self, name, value, connection):
if name == 'Id':
self.id = value
elif name == 'LastModifiedTime':
self.last_modified_time = value
elif name == 'Status':
self.status = value
elif name == 'InProgressInvalidationBatches':
self.in_progress_invalidation_batches = int(value)
elif name == 'DomainName':
self.domain_name = value
else:
setattr(self, name, value)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the Distribution. The only values
of the DistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set ``Distribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the Distribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = DistributionConfig(self.connection, self.config.origin,
self.config.enabled, self.config.caller_reference,
self.config.cnames, self.config.comment,
self.config.trusted_signers,
self.config.default_root_object)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_distribution_config(self.id, self.etag, new_config)
self.config = new_config
self._object_class = Object
def enable(self):
"""
Activate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=True)
def disable(self):
"""
Deactivate the Distribution. A convenience wrapper around
the update method.
"""
self.update(enabled=False)
def delete(self):
"""
Delete this CloudFront Distribution. The content
associated with the Distribution is not deleted from
the underlying Origin bucket in S3.
"""
self.connection.delete_distribution(self.id, self.etag)
def _get_bucket(self):
if isinstance(self.config.origin, S3Origin):
if not self._bucket:
bucket_dns_name = self.config.origin.dns_name
bucket_name = bucket_dns_name.replace('.s3.amazonaws.com', '')
from boto.s3.connection import S3Connection
s3 = S3Connection(self.connection.aws_access_key_id,
self.connection.aws_secret_access_key,
proxy=self.connection.proxy,
proxy_port=self.connection.proxy_port,
proxy_user=self.connection.proxy_user,
proxy_pass=self.connection.proxy_pass)
self._bucket = s3.get_bucket(bucket_name)
self._bucket.distribution = self
self._bucket.set_key_class(self._object_class)
return self._bucket
else:
raise NotImplementedError('Unable to get_objects on CustomOrigin')
def get_objects(self):
"""
Return a list of all content objects in this distribution.
:rtype: list of :class:`boto.cloudfront.object.Object`
:return: The content objects
"""
bucket = self._get_bucket()
objs = []
for key in bucket:
objs.append(key)
return objs
def set_permissions(self, object, replace=False):
"""
Sets the S3 ACL grants for the given object to the appropriate
value based on the type of Distribution. If the Distribution
is serving private content the ACL will be set to include the
Origin Access Identity associated with the Distribution. If
the Distribution is serving public content the content will
be set up with "public-read".
:type object: :class:`boto.cloudfront.object.Object`
:param enabled: The Object whose ACL is being set
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
if isinstance(self.config.origin, S3Origin):
if self.config.origin.origin_access_identity:
id = self.config.origin.origin_access_identity.split('/')[-1]
oai = self.connection.get_origin_access_identity_info(id)
policy = object.get_acl()
if replace:
policy.acl = ACL()
policy.acl.add_user_grant('READ', oai.s3_user_id)
object.set_acl(policy)
else:
object.set_canned_acl('public-read')
def set_permissions_all(self, replace=False):
"""
Sets the S3 ACL grants for all objects in the Distribution
to the appropriate value based on the type of Distribution.
:type replace: bool
:param replace: If False, the Origin Access Identity will be
appended to the existing ACL for the object.
If True, the ACL for the object will be
completely replaced with one that grants
READ permission to the Origin Access Identity.
"""
bucket = self._get_bucket()
for key in bucket:
self.set_permissions(key, replace)
def add_object(self, name, content, headers=None, replace=True):
"""
Adds a new content object to the Distribution. The content
for the object will be copied to a new Key in the S3 Bucket
and the permissions will be set appropriately for the type
of Distribution.
:type name: str or unicode
:param name: The name or key of the new object.
:type content: file-like object
:param content: A file-like object that contains the content
for the new object.
:type headers: dict
:param headers: A dictionary containing additional headers
you would like associated with the new
object in S3.
:rtype: :class:`boto.cloudfront.object.Object`
:return: The newly created object.
"""
if self.config.origin.origin_access_identity:
policy = 'private'
else:
policy = 'public-read'
bucket = self._get_bucket()
object = bucket.new_key(name)
object.set_contents_from_file(content, headers=headers, policy=policy)
if self.config.origin.origin_access_identity:
self.set_permissions(object, replace)
return object
def create_signed_url(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates a signed CloudFront URL that is only valid within the specified
parameters.
:type url: str
:param url: The URL of the protected object.
:type keypair_id: str
:param keypair_id: The keypair ID of the Amazon KeyPair used to sign
theURL. This ID MUST correspond to the private key
specified with private_key_file or private_key_string.
:type expire_time: int
:param expire_time: The expiry time of the URL. If provided, the URL
will expire after the time has passed. If not provided the URL will
never expire. Format is a unix epoch.
Use int(time.time() + duration_in_sec).
:type valid_after_time: int
:param valid_after_time: If provided, the URL will not be valid until
after valid_after_time. Format is a unix epoch.
Use int(time.time() + secs_until_valid).
:type ip_address: str
:param ip_address: If provided, only allows access from the specified
IP address. Use '192.168.0.10' for a single IP or
use '192.168.0.0/24' CIDR notation for a subnet.
:type policy_url: str
:param policy_url: If provided, allows the signature to contain
wildcard globs in the URL. For example, you could
provide: 'http://example.com/media/\*' and the policy
and signature would allow access to all contents of
the media subdirectory. If not specified, only
allow access to the exact url provided in 'url'.
:type private_key_file: str or file object.
:param private_key_file: If provided, contains the filename of the
private key file used for signing or an open
file object containing the private key
contents. Only one of private_key_file or
private_key_string can be provided.
:type private_key_string: str
:param private_key_string: If provided, contains the private key string
used for signing. Only one of private_key_file or
private_key_string can be provided.
:rtype: str
:return: The signed URL.
"""
# Get the required parameters
params = self._create_signing_params(
url=url, keypair_id=keypair_id, expire_time=expire_time,
valid_after_time=valid_after_time, ip_address=ip_address,
policy_url=policy_url, private_key_file=private_key_file,
private_key_string=private_key_string)
#combine these into a full url
if "?" in url:
sep = "&"
else:
sep = "?"
signed_url_params = []
for key in ["Expires", "Policy", "Signature", "Key-Pair-Id"]:
if key in params:
param = "%s=%s" % (key, params[key])
signed_url_params.append(param)
signed_url = url + sep + "&".join(signed_url_params)
return signed_url
def _create_signing_params(self, url, keypair_id,
expire_time=None, valid_after_time=None,
ip_address=None, policy_url=None,
private_key_file=None, private_key_string=None):
"""
Creates the required URL parameters for a signed URL.
"""
params = {}
# Check if we can use a canned policy
if expire_time and not valid_after_time and not ip_address and not policy_url:
# we manually construct this policy string to ensure formatting
# matches signature
policy = self._canned_policy(url, expire_time)
params["Expires"] = str(expire_time)
else:
# If no policy_url is specified, default to the full url.
if policy_url is None:
policy_url = url
# Can't use canned policy
policy = self._custom_policy(policy_url, expires=expire_time,
valid_after=valid_after_time,
ip_address=ip_address)
encoded_policy = self._url_base64_encode(policy)
params["Policy"] = encoded_policy
#sign the policy
signature = self._sign_string(policy, private_key_file, private_key_string)
#now base64 encode the signature (URL safe as well)
encoded_signature = self._url_base64_encode(signature)
params["Signature"] = encoded_signature
params["Key-Pair-Id"] = keypair_id
return params
@staticmethod
def _canned_policy(resource, expires):
"""
Creates a canned policy string.
"""
policy = ('{"Statement":[{"Resource":"%(resource)s",'
'"Condition":{"DateLessThan":{"AWS:EpochTime":'
'%(expires)s}}}]}' % locals())
return policy
@staticmethod
def _custom_policy(resource, expires=None, valid_after=None, ip_address=None):
"""
Creates a custom policy string based on the supplied parameters.
"""
condition = {}
# SEE: http://docs.amazonwebservices.com/AmazonCloudFront/latest/DeveloperGuide/RestrictingAccessPrivateContent.html#CustomPolicy
# The 'DateLessThan' property is required.
if not expires:
# Defaults to ONE day
expires = int(time.time()) + 86400
condition["DateLessThan"] = {"AWS:EpochTime": expires}
if valid_after:
condition["DateGreaterThan"] = {"AWS:EpochTime": valid_after}
if ip_address:
if '/' not in ip_address:
ip_address += "/32"
condition["IpAddress"] = {"AWS:SourceIp": ip_address}
policy = {"Statement": [{
"Resource": resource,
"Condition": condition}]}
return json.dumps(policy, separators=(",", ":"))
@staticmethod
def _sign_string(message, private_key_file=None, private_key_string=None):
"""
Signs a string for use with Amazon CloudFront.
Requires the rsa library be installed.
"""
try:
import rsa
except ImportError:
raise NotImplementedError("Boto depends on the python rsa "
"library to generate signed URLs for "
"CloudFront")
# Make sure only one of private_key_file and private_key_string is set
if private_key_file and private_key_string:
raise ValueError("Only specify the private_key_file or the private_key_string not both")
if not private_key_file and not private_key_string:
raise ValueError("You must specify one of private_key_file or private_key_string")
# If private_key_file is a file name, open it and read it
if private_key_string is None:
if isinstance(private_key_file, six.string_types):
with open(private_key_file, 'r') as file_handle:
private_key_string = file_handle.read()
# Otherwise, treat it like a file
else:
private_key_string = private_key_file.read()
# Sign it!
private_key = rsa.PrivateKey.load_pkcs1(private_key_string)
signature = rsa.sign(str(message), private_key, 'SHA-1')
return signature
@staticmethod
def _url_base64_encode(msg):
"""
Base64 encodes a string using the URL-safe characters specified by
Amazon.
"""
msg_base64 = base64.b64encode(msg)
msg_base64 = msg_base64.replace('+', '-')
msg_base64 = msg_base64.replace('=', '_')
msg_base64 = msg_base64.replace('/', '~')
return msg_base64
class StreamingDistribution(Distribution):
def __init__(self, connection=None, config=None, domain_name='',
id='', last_modified_time=None, status=''):
super(StreamingDistribution, self).__init__(connection, config,
domain_name, id, last_modified_time, status)
self._object_class = StreamingObject
def startElement(self, name, attrs, connection):
if name == 'StreamingDistributionConfig':
self.config = StreamingDistributionConfig()
return self.config
else:
return super(StreamingDistribution, self).startElement(name, attrs,
connection)
def update(self, enabled=None, cnames=None, comment=None):
"""
Update the configuration of the StreamingDistribution. The only values
of the StreamingDistributionConfig that can be directly updated are:
* CNAMES
* Comment
* Whether the Distribution is enabled or not
Any changes to the ``trusted_signers`` or ``origin`` properties of
this distribution's current config object will also be included in
the update. Therefore, to set the origin access identity for this
distribution, set
``StreamingDistribution.config.origin.origin_access_identity``
before calling this update method.
:type enabled: bool
:param enabled: Whether the StreamingDistribution is active or not.
:type cnames: list of str
:param cnames: The DNS CNAME's associated with this
Distribution. Maximum of 10 values.
:type comment: str or unicode
:param comment: The comment associated with the Distribution.
"""
new_config = StreamingDistributionConfig(self.connection,
self.config.origin,
self.config.enabled,
self.config.caller_reference,
self.config.cnames,
self.config.comment,
self.config.trusted_signers)
if enabled is not None:
new_config.enabled = enabled
if cnames is not None:
new_config.cnames = cnames
if comment is not None:
new_config.comment = comment
self.etag = self.connection.set_streaming_distribution_config(self.id,
self.etag,
new_config)
self.config = new_config
self._object_class = StreamingObject
def delete(self):
self.connection.delete_streaming_distribution(self.id, self.etag) | PypiClean |
/django_watermark-0.2.0-py3-none-any.whl/watermarker/utils.py | import re
import random
from PIL import Image, ImageEnhance
from .conf import settings
def _percent(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var, True)
def _int(var):
"""
Just a simple interface to the _val function with a more meaningful name.
"""
return _val(var)
def _val(var, is_percent=False):
"""
Tries to determine the appropriate value of a particular variable that is
passed in. If the value is supposed to be a percentage, a whole integer
will be sought after and then turned into a floating point number between
0 and 1. If the value is supposed to be an integer, the variable is cast
into an integer.
"""
try:
if is_percent:
var = float(int(var.strip("%")) / 100.0)
else:
var = int(var)
except ValueError:
raise ValueError("invalid watermark parameter: " + var)
return var
def reduce_opacity(img, opacity):
"""
Returns an image with reduced opacity.
"""
assert opacity >= 0 and opacity <= 1
if img.mode != "RGBA":
img = img.convert("RGBA")
else:
img = img.copy()
alpha = img.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
img.putalpha(alpha)
return img
def determine_scale(scale, img, mark):
"""
Scales an image using a specified ratio, 'F' or 'R%%'. If `scale` is
'F', the image is scaled to be as big as possible to fit in `img`
without falling off the edges. If `scale` is 'R%%', the watermark
resizes to a percentage of minimum size of source image. Returns
the scaled `mark`.
"""
if scale:
try:
scale = float(scale)
except (ValueError, TypeError):
pass
if isinstance(scale, str) and scale.upper() == "F":
# scale watermark to full, but preserve the aspect ratio
scale = min(float(img.size[0]) / mark.size[0], float(img.size[1]) / mark.size[1])
elif isinstance(scale, str) and re.match(r"R\d{1,3}\%", scale.upper()):
# scale watermark to % of source image and preserve the aspect ratio
percentage = float(re.match(r"R(\d{1,3})\%", scale.upper()).group(1))
scale = (
min(float(img.size[0]) / mark.size[0], float(img.size[1]) / mark.size[1])
/ 100 * percentage
)
elif not isinstance(scale, (float, int)):
raise ValueError(
'Invalid scale value "%s"! Valid values are "F" '
'for ratio-preserving scaling, "R%%" for percantage aspect '
"ratio of source image and floating-point numbers and "
"integers greater than 0." % scale
)
# determine the new width and height
w = int(mark.size[0] * float(scale))
h = int(mark.size[1] * float(scale))
# apply the new width and height, and return the new `mark`
return (w, h)
else:
return mark.size
def determine_rotation(rotation, mark):
"""
Determines the number of degrees to rotate the watermark image.
"""
if isinstance(rotation, str) and rotation.lower() == "r":
rotation = random.randint(0, 359)
else:
rotation = _int(rotation)
return rotation
def determine_position(position, img, mark):
"""
Options:
TL: top-left
TR: top-right
BR: bottom-right
BL: bottom-left
C: centered
R: random
X%xY%: relative positioning on both the X and Y axes
X%xY: relative positioning on the X axis and absolute positioning on the
Y axis
XxY%: absolute positioning on the X axis and relative positioning on the
Y axis
XxY: absolute positioning on both the X and Y axes
"""
left = top = 0
max_left = max(img.size[0] - mark.size[0], 0)
max_top = max(img.size[1] - mark.size[1], 0)
if not position:
position = "r"
if isinstance(position, tuple):
left, top = position
elif isinstance(position, str):
position = position.lower()
# corner positioning
if position in ["tl", "tr", "br", "bl"]:
if "t" in position:
top = 0
elif "b" in position:
top = max_top
if "l" in position:
left = 0
elif "r" in position:
left = max_left
# center positioning
elif position == "c":
left = int(max_left / 2)
top = int(max_top / 2)
# random positioning
elif position == "r":
left = random.randint(0, max_left)
top = random.randint(0, max_top)
# relative or absolute positioning
elif "x" in position:
left, top = position.split("x")
if "%" in left:
left = max_left * _percent(left)
else:
left = _int(left)
if "%" in top:
top = max_top * _percent(top)
else:
top = _int(top)
return int(left), int(top)
def watermark(
img,
mark,
position=(0, 0),
opacity=1,
scale=1.0,
tile=False,
greyscale=False,
rotation=0,
return_name=False,
**kwargs
):
"""Adds a watermark to an image"""
if opacity < 1:
mark = reduce_opacity(mark, opacity)
if not isinstance(scale, tuple):
scale = determine_scale(scale, img, mark)
if scale[0] != mark.size[0] and scale[1] != mark.size[1]:
mark = mark.resize(scale, resample=Image.ANTIALIAS)
if greyscale and mark.mode != "LA":
mark = mark.convert("LA")
rotation = determine_rotation(rotation, mark)
if rotation != 0:
# give some leeway for rotation overlapping
new_w = int(mark.size[0] * 1.5)
new_h = int(mark.size[1] * 1.5)
new_mark = Image.new("RGBA", (new_w, new_h), (0, 0, 0, 0))
# center the watermark in the newly resized image
new_l = int((new_w - mark.size[0]) / 2)
new_t = int((new_h - mark.size[1]) / 2)
new_mark.paste(mark, (new_l, new_t))
mark = new_mark.rotate(rotation)
position = determine_position(position, img, mark)
if img.mode != "RGBA":
img = img.convert("RGBA")
# make sure we have a tuple for a position now
assert isinstance(position, tuple), 'Invalid position "%s"!' % position
# create a transparent layer the size of the image and draw the
# watermark in that layer.
layer = Image.new("RGBA", img.size, (0, 0, 0, 0))
if tile:
first_y = int(position[1] % mark.size[1] - mark.size[1])
first_x = int(position[0] % mark.size[0] - mark.size[0])
for y in range(first_y, img.size[1], mark.size[1]):
for x in range(first_x, img.size[0], mark.size[0]):
layer.paste(mark, (x, y))
else:
layer.paste(mark, position)
# composite the watermark with the layer
return Image.composite(layer, img, layer) | PypiClean |
/timeeval_gutenTAG-1.4.0-py3-none-any.whl/gutenTAG/base_oscillations/cylinder_bell_funnel.py | from typing import Optional, Sequence, Callable
import numpy as np
from . import BaseOscillation
from .interface import BaseOscillationInterface
from ..utils.default_values import default_values
from ..utils.global_variables import BASE_OSCILLATION_NAMES, BASE_OSCILLATIONS, PARAMETERS
from ..utils.types import BOGenerationContext
class CylinderBellFunnel(BaseOscillationInterface):
KIND = BASE_OSCILLATION_NAMES.CYLINDER_BELL_FUNNEL
def get_base_oscillation_kind(self) -> str:
return self.KIND
def get_timeseries_periods(self) -> Optional[int]:
if self.avg_pattern_length > 0:
return self.length // self.avg_pattern_length
return None
def is_periodic(self) -> bool:
"""CylinderBellFunnel has reoccurring patterns but no fixed periodicity!"""
return False
def generate_only_base(self,
ctx: BOGenerationContext,
length: Optional[int] = None,
variance: Optional[float] = None,
amplitude: Optional[float] = None,
variance_pattern_length: Optional[int] = None,
*args, **kwargs) -> np.ndarray:
length = length or self.length
variance = variance or self.variance
amplitude = amplitude or self.amplitude
variance_pattern_length = variance_pattern_length or self.variance_pattern_length
return cylinder_bell_funnel(
ctx.rng,
length=length,
avg_pattern_length=self.avg_pattern_length,
avg_amplitude=amplitude,
default_variance=variance,
variance_pattern_length=variance_pattern_length,
variance_amplitude=self.variance_amplitude
)
def generate_timeseries_and_variations(self, ctx: BOGenerationContext, **kwargs) -> BaseOscillationInterface:
super().generate_timeseries_and_variations(ctx)
if self.timeseries is not None and self.noise is not None:
self.timeseries -= self.noise
else:
raise AssertionError("`timeseries` and `noise` are None. Please, generate `timeseries` and `noise` before calling this method!")
return self
BaseOscillation.register(CylinderBellFunnel.KIND, CylinderBellFunnel)
# Taken from https://github.com/KDD-OpenSource/data-generation/blob/master/generation/cbf.py
# cylinder bell funnel based on "Learning comprehensible descriptions of multivariate time series"
def cylinder_bell_funnel(rng: np.random.Generator = np.random.default_rng(),
length: int = default_values[BASE_OSCILLATIONS][PARAMETERS.LENGTH],
avg_pattern_length: int = default_values[BASE_OSCILLATIONS][PARAMETERS.AVG_PATTERN_LENGTH],
avg_amplitude: float = default_values[BASE_OSCILLATIONS][PARAMETERS.AMPLITUDE],
default_variance: float = default_values[BASE_OSCILLATIONS][PARAMETERS.VARIANCE],
variance_pattern_length: int = default_values[BASE_OSCILLATIONS][PARAMETERS.VARIANCE_PATTERN_LENGTH],
variance_amplitude: float = default_values[BASE_OSCILLATIONS][PARAMETERS.VARIANCE_AMPLITUDE],
include_negatives: bool = True) -> np.ndarray:
def generate_bell(n: int, a: float, v: float) -> np.ndarray:
bell = rng.normal(0, v, n) + a * np.arange(n) / n
return bell
def generate_funnel(n: int, a: float, v: float) -> np.ndarray:
funnel = rng.normal(0, v, n) + a * np.arange(n)[::-1] / n
return funnel
def generate_cylinder(n: int, a: float, v: float) -> np.ndarray:
cylinder = rng.normal(0, v, n) + a
return cylinder
generators: Sequence[Callable[[int, float, float], np.ndarray]] = (generate_bell, generate_funnel, generate_cylinder)
data = rng.normal(0, default_variance, length)
current_start = rng.integers(0, avg_pattern_length)
current_length = max(1, int(np.ceil(rng.normal(avg_pattern_length, variance_pattern_length))))
while current_start + current_length < length:
generator: Callable[[int, float, float], np.ndarray] = rng.choice(generators) # type: ignore # strange numpy type prevents chosing a callable
current_amplitude = rng.normal(avg_amplitude, variance_amplitude)
while current_length <= 0:
current_length = -(current_length - 1)
pattern = generator(current_length, current_amplitude, default_variance)
if include_negatives and rng.random() > 0.5:
pattern = -1 * pattern
data[current_start: current_start + current_length] = pattern
current_start = current_start + current_length + rng.integers(0, avg_pattern_length)
current_length = max(1, int(np.ceil(rng.normal(avg_pattern_length, variance_pattern_length))))
return data | PypiClean |
/Mathics_Django-6.0.0-py3-none-any.whl/mathics_django/web/media/js/mathjax/jax/output/SVG/fonts/TeX/Fraktur/Bold/PUA.js | MathJax.Hub.Insert(MathJax.OutputJax.SVG.FONTDATA.FONTS["MathJax_Fraktur-bold"],{58113:[630,27,587,64,512,"388 427Q320 485 242 524T128 563H116Q95 563 87 561L77 559Q72 563 69 566T65 570T65 572L75 576Q106 592 154 611T212 630Q230 630 262 622T358 581T492 498L508 486Q512 463 512 396Q512 246 469 112L465 102Q453 94 341 25Q252 -27 247 -27Q243 -27 174 24T97 84Q90 100 90 214Q90 285 98 345Q100 360 102 363T118 377Q175 422 262 465Q264 463 270 460L277 456Q277 455 267 447T244 428T228 414Q206 382 206 269Q206 187 214 164T259 110Q286 89 342 58Q391 131 391 313Q391 355 388 412V427"],58114:[693,212,394,37,408,"39 362L37 366L38 368L82 405H133V474Q135 563 143 589T198 658Q210 669 224 676T247 687L255 690H253Q241 690 253 692Q254 692 256 692T260 693Q263 693 262 691L261 690Q300 690 361 662L373 656L388 666Q404 675 405 675L406 674Q406 672 406 670T406 664L408 655L301 555Q300 555 287 564T254 584T221 597Q190 597 176 583T161 550Q161 525 184 495T232 440T261 405H387V399Q377 389 364 379L340 359H258V315Q258 52 228 -18L172 -120L121 -211H109Q102 -212 96 -212L109 -174Q131 -108 135 -80T139 53V76V157V362H39"],58115:[681,219,387,36,384,"41 352Q40 354 39 355T37 358L36 360H37Q48 370 61 380L84 400H108Q131 400 131 402Q121 424 104 501L100 519Q109 560 134 602T196 664Q230 681 271 681Q291 681 316 669T358 644L373 631Q373 630 304 553Q299 548 294 547Q292 547 290 546H287Q286 546 274 562T243 593T205 609Q180 609 165 596T150 562Q150 526 191 488L217 462Q248 431 253 405V400H381L384 394L349 352H251V332Q249 271 231 17L227 -37L120 -217L109 -218Q103 -219 97 -219Q97 -218 101 -206T110 -177T118 -151Q126 -129 128 -120T136 -46T141 127Q141 250 136 340V352H41"],58116:[474,212,593,67,531,"107 370Q127 384 172 409T255 454T294 473L306 468Q356 446 425 431L435 429L524 468Q528 465 531 461Q499 395 499 271V263Q499 146 509 71T519 -8Q519 -28 512 -45Q510 -50 435 -123T355 -197Q296 -212 257 -212Q209 -212 164 -196T98 -167T67 -143L133 -44H144Q167 -88 216 -111T320 -134Q371 -134 390 -118T410 -69Q410 -52 404 -12T392 60T385 92L193 -29L158 5Q124 39 110 51L96 63V71Q94 79 94 121Q94 130 94 148T93 174Q93 230 96 275T103 344T107 370ZM221 397Q200 334 200 254Q200 170 210 140Q216 126 234 109T268 81L283 71L383 119V127Q384 132 384 241L385 347L368 349Q325 357 290 369T240 389T221 397"],58117:[684,27,393,33,387,"103 453Q103 631 95 661Q95 663 102 667T110 672L114 664Q117 655 123 641T131 621L140 597L154 606Q208 641 275 673L297 684Q300 683 302 682T307 679T310 678L314 676Q283 658 256 625Q238 601 231 579T223 515L224 512L282 548Q339 583 341 583T365 548T386 509Q326 443 318 443L316 446Q314 448 311 452T304 460T294 470T283 480T272 488T260 494T248 497Q231 497 223 474Q220 468 218 440T215 407V401H345L309 360H218V314Q218 181 221 139V129L253 108Q306 73 310 73Q315 73 343 83L373 92L374 87Q375 82 375 79T375 74T360 65T308 36T229 -13L208 -27L192 -13Q149 24 90 61Q89 61 89 62L90 68Q91 73 93 87T97 125T100 191T103 291V360H33V366L34 371L85 405H94L103 404V453"],58120:[679,220,981,31,875,"602 575Q505 508 505 489Q505 488 505 482T506 463T507 432Q507 314 456 237L449 226L434 216Q420 208 325 143L316 137Q453 82 488 82Q527 82 585 127L596 136Q597 136 599 126L602 115Q578 85 511 27T428 -31Q400 -31 308 10T170 51Q143 51 123 43T92 24T54 -15L34 6L41 14Q65 41 170 129L188 144L204 145Q254 147 293 164T350 208Q378 249 378 344Q378 422 362 478T320 563T268 605T213 618Q177 618 156 600T134 561Q134 539 162 508T217 446T245 394Q245 368 213 337T85 250L62 262Q73 269 86 279T116 308T133 338T108 378T57 439T32 499Q32 556 117 617T291 679Q350 679 393 658Q415 647 433 631T462 600T480 572T490 550T494 541T499 544T516 556T547 578T603 613T689 662L720 679L730 670Q742 659 756 649T785 629T810 615T836 601T855 590Q855 587 860 536T870 419T875 312Q875 114 800 -25Q794 -35 781 -47Q584 -220 398 -220Q322 -220 278 -190Q253 -173 239 -155L244 -150Q248 -145 255 -138T271 -120T290 -100T310 -80T328 -63T341 -51T349 -46Q350 -46 351 -46T354 -47Q357 -47 357 -52Q359 -68 364 -83T383 -118T424 -151T491 -166Q559 -166 613 -129Q629 -118 641 -108T674 -68T710 1T735 107T746 260Q746 433 727 507Q727 512 685 535T615 570L602 575"],58121:[717,137,727,17,633,"351 571Q317 571 247 563T171 555Q153 555 133 563T107 584Q94 605 98 609Q101 615 138 658T190 717H207Q204 710 204 699Q204 673 231 666Q235 665 264 665Q296 665 345 667T426 669Q474 669 501 660T545 626Q553 612 553 594Q553 531 498 474T379 384Q371 379 371 378Q371 376 390 376H411H434Q520 376 602 318Q621 303 627 288T633 234Q633 59 540 -34Q465 -109 348 -130Q308 -137 235 -137Q159 -136 143 -129Q132 -125 132 -118V-53Q118 -24 90 -24Q69 -24 37 -39L27 -44L25 -42Q23 -39 21 -35T17 -30Q17 -28 40 -14T103 19T177 44Q183 45 205 45Q219 45 227 44T245 37T259 20T264 -12Q264 -33 262 -48T259 -80Q259 -93 260 -95Q271 -110 305 -110Q343 -110 383 -86T443 -33Q491 34 491 154Q491 223 467 249Q428 288 334 288H322Q288 288 237 276L222 273L206 286L262 367Q279 369 303 377T358 403T410 452T431 524Q431 531 431 533T427 545T416 558T392 566T351 571"]});MathJax.Ajax.loadComplete(MathJax.OutputJax.SVG.fontDir+"/Fraktur/Bold/PUA.js"); | PypiClean |
/quri_parts_chem-0.14.0-py3-none-any.whl/quri_parts/chem/mol/models.py |
from abc import abstractmethod, abstractproperty
from dataclasses import dataclass
from enum import Enum, auto
from typing import NamedTuple, Optional, Protocol, Sequence
import numpy as np
import numpy.typing as npt # noqa: F401
from quri_parts.chem.mol import get_core_and_active_orbital_indices
class OrbitalType(Enum):
"""Type of orbital."""
#: core orbital.
CORE = auto()
#: active orbital.
ACTIVE = auto()
#: virtual orbital.
VIRTUAL = auto()
@dataclass(frozen=True)
class ActiveSpace:
"""An immutable (frozen) dataclass representing a active space."""
#: number of active electrons.
n_active_ele: int
#: number of active orbitals.
n_active_orb: int
#: sequence of spatial orbital indices of the active orbitals.
active_orbs_indices: Optional[Sequence[int]] = None
def cas(
n_active_ele: int,
n_active_orb: int,
active_orbs_indices: Optional[Sequence[int]] = None,
) -> ActiveSpace:
"""Convenient function for constructing the active space."""
return ActiveSpace(n_active_ele, n_active_orb, active_orbs_indices)
class MolecularOrbitals(Protocol):
"""Interface protocol for a data of the molecule."""
@abstractproperty
def n_electron(self) -> int:
"""Returns the number of electrons."""
...
@abstractproperty
def spin(self) -> int:
"""Returns the total spin of the electrons.
Note:
We follow the quantum chemistry convention where:
.. math::
N_{\\alpha} - N_{\\beta} = 2S
and spin = 2S.
"""
...
@abstractproperty
def n_spatial_orb(self) -> int:
"""Returns the number of spatial orbitals."""
...
@abstractproperty
def mo_coeff(self) -> "npt.NDArray[np.complex128]":
"""Returns molecular orbital coefficients."""
...
class ActiveSpaceMolecularOrbitals(MolecularOrbitals):
"""Represents a data of the active space for the molecule.
Args:
mo: :class:`MolecularOrbitals` for the molecule.
active_space: :class:`ActiveSpace` for the molecule.
"""
def __init__(
self,
mo: MolecularOrbitals,
active_space: ActiveSpace,
):
self._mo = mo
self._active_space = active_space
self._check_active_space_consistency()
@property
def n_electron(self) -> int:
"""Returns a number of electrons."""
return self._mo.n_electron
@property
def spin(self) -> int:
"""Returns the total spin of the electrons."""
return self._mo.spin
@property
def n_active_ele(self) -> int:
"""Returns the number of active electrons."""
return self._active_space.n_active_ele
@property
def n_core_ele(self) -> int:
"""Returns the number of core electrons."""
n_core_electron = self.n_electron - self.n_active_ele
return n_core_electron
@property
def n_ele_alpha(self) -> int:
"""Return the number of spin up electrons."""
return self.n_active_ele - self.n_ele_beta
@property
def n_ele_beta(self) -> int:
"""Return the number of spin down electrons."""
return (self.n_active_ele - self.spin) // 2
@property
def n_spatial_orb(self) -> int:
"""Returns the number of spatial orbitals."""
return self._mo.n_spatial_orb
@property
def n_active_orb(self) -> int:
"""Returns the number of active orbitals."""
return self._active_space.n_active_orb
@property
def n_core_orb(self) -> int:
"""Returns the number of core orbitals."""
return self.n_core_ele // 2
@property
def n_vir_orb(self) -> int:
"""Returns the number of virtual orbitals."""
return self._mo.n_spatial_orb - self.n_active_orb - self.n_core_orb
@property
def mo_coeff(self) -> "npt.NDArray[np.complex128]":
"""Returns molecular orbital coefficients."""
return self._mo.mo_coeff
def get_core_and_active_orb(self) -> tuple[Sequence[int], Sequence[int]]:
"""Returns a set of core and active orbitals.
The output orbitals are represented as the spatial orbital.
"""
n_active_ele = self._active_space.n_active_ele
n_active_orb = self._active_space.n_active_orb
active_orbs_indices = self._active_space.active_orbs_indices
core_orb_list, active_orb_list = get_core_and_active_orbital_indices(
n_active_ele,
n_active_orb,
self._mo.n_electron,
active_orbs_indices,
)
return core_orb_list, active_orb_list
def orb_type(self, mo_index: int) -> OrbitalType:
"""Returns a type of the given orbital index.
Args:
mo_index: Orbital index.
"""
core_orb_list, active_orb_list = self.get_core_and_active_orb()
if mo_index in core_orb_list:
return OrbitalType.CORE
elif mo_index in active_orb_list:
return OrbitalType.ACTIVE
else:
return OrbitalType.VIRTUAL
def _check_active_space_consistency(self) -> None:
"""Consistency check of the active space configuration."""
assert self.n_core_ele % 2 == 0, ValueError(
"The number of electrons in core must be even."
" Please set the active electron to an {} number".format(
{0: "even", 1: "odd"}[self.n_electron % 2]
)
)
assert self.n_core_ele >= 0, ValueError(
f"Number of core electrons should be a positive integer or zero.\n"
f" n_core_ele = {self.n_core_ele}.\n"
f" Possible fix: n_active_ele should be less than"
f" total number of electrons: {self.n_electron}."
)
assert self.n_vir_orb >= 0, ValueError(
f"Number of virtual orbitals should be a positive integer or zero.\n"
f" n_vir = {self.n_vir_orb}.\n"
f" Possible fix: (n_active_orb - n_active_ele//2) should not"
f" exceed {self.n_spatial_orb - self.n_electron//2}."
)
assert self.n_ele_alpha >= 0, ValueError(
f"Number of spin up electrons should be a positive integer or zero.\n"
f" n_ele_alpha = {self.n_ele_alpha}"
f" Possible_fix: - n_active_ele should not"
f" be less then the value of spin: {self.spin}."
)
assert self.n_ele_beta >= 0, ValueError(
f"Number of spin down electrons should be a positive integer or zero.\n"
f" n_ele_beta = {self.n_ele_beta}.\n"
f" Possible_fix: n_active_ele should not"
f" exceed the value of spin: {self.spin}."
)
assert self.n_ele_alpha <= self.n_active_orb, ValueError(
f"Number of spin up electrons should not exceed the number of active orbitals.\n" # noqa: E501
f" n_ele_alpha = {self.n_ele_alpha},\n"
f" n_active_orb = {self.n_active_orb}.\n"
f" Possible fix: [(n_active_ele + spin)//2] should be"
f" less than n_active_orb: {self.n_active_orb}"
)
assert self.n_ele_beta <= self.n_active_orb, ValueError(
f"Number of spin down electrons should not exceed the number of active orbitals.\n" # noqa: E501
f" n_ele_beta = {self.n_ele_beta},\n"
f" n_active_orb = {self.n_active_orb}\n"
f" Possible fix: [(n_active_ele - spin)//2] should be"
f" less than n_active_orb: {self.n_active_orb}"
)
def __str__(self) -> str:
info_dict = {
"n_electron": self.n_electron,
"n_active_ele": self.n_active_ele,
"n_core_ele": self.n_core_ele,
"n_ele_alpha": self.n_ele_alpha,
"n_ele_beta": self.n_ele_beta,
"n_spatial_orb": self.n_spatial_orb,
"n_active_orb": self.n_active_orb,
"n_core_orb": self.n_core_orb,
"n_vir_orb": self.n_vir_orb,
}
info_str = "\n".join(f"{key}: {str(value)}" for key, value in info_dict.items())
return info_str
class AO1eInt(Protocol):
"""Interface protocol for an atomic orbital one-electron integral."""
@abstractproperty
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
...
@abstractmethod
def to_mo1int(self, mo_coeff: "npt.NDArray[np.complex128]") -> "SpinMO1eInt":
"""This method converts an atomic orbital one-electron integral into
the molecular orbital one-electron integral.
Args:
mo_coeff: molecular orbital coefficients.
"""
...
class AO2eInt(Protocol):
"""Interface protocol for an atomic orbital two-electron integral."""
@abstractproperty
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
...
@abstractmethod
def to_mo2int(self, mo_coeff: "npt.NDArray[np.complex128]") -> "SpinMO2eInt":
"""This method converts an atomic orbital two-electron integral into
the molecular orbital two-electron integral.
Args:
mo_coeff: molecular orbital coefficients.
"""
...
class AOeIntSet(Protocol):
"""AOeIntSet holds a constant and the atomic orbital electron integrals."""
#: constant.
constant: float
#: atomic orbital one-electron integral :class:`AO1eInt`.
ao_1e_int: AO1eInt
#: atomic orbital two-electron integral :class:`AO2eInt`.
ao_2e_int: AO2eInt
def to_full_space_mo_int(
self,
mo: MolecularOrbitals,
) -> "SpinMOeIntSet":
"""Compute the full space spin mo integral."""
...
def to_active_space_mo_int(
self,
active_space_mo: ActiveSpaceMolecularOrbitals,
) -> "SpinMOeIntSet":
"""Compute the active space spin mo integral."""
...
class SpatialMO1eInt(Protocol):
"""Interface protocol for a molecular orbital one-electron integral."""
@abstractproperty
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
...
class SpatialMO2eInt(Protocol):
"""Interface protocol for a molecular orbital two-electron integral."""
@abstractproperty
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
...
class SpatialMO1eIntArray(SpatialMO1eInt):
"""A class of a molecular orbital one-electron integral.
This interface has an integral as numpy ndarray.
"""
def __init__(self, array: "npt.NDArray[np.complex128]"):
self._array = array
@property
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
return self._array
class SpatialMO2eIntArray(SpatialMO2eInt):
"""A class of a molecular orbital two-electron integral.
This interface has an integral as numpy ndarray.
"""
def __init__(self, array: "npt.NDArray[np.complex128]"):
self._array = array
@property
def array(self) -> "npt.NDArray[np.complex128]":
"""Returns integral as numpy ndarray."""
return self._array
class SpatialMOeIntSet(NamedTuple):
"""SpatialMOeIntSet holds a constant and a set of molecular orbital
electron integral."""
#: constant.
const: float
#: molecular orbital one-electron integral.
mo_1e_int: SpatialMO1eInt
#: molecular orbital two-electron integral.
mo_2e_int: SpatialMO2eInt
class SpinMO1eInt(Protocol):
"""Interface protocol for a molecular spin orbital one-electron
integral."""
@abstractproperty
def array(self) -> npt.NDArray[np.complex128]:
"""Returns integral as numpy ndarray."""
...
class SpinMO2eInt(Protocol):
"""Interface protocol for a molecular spin orbital two-electron
integral."""
@abstractproperty
def array(self) -> npt.NDArray[np.complex128]:
"""Returns integral as numpy ndarray."""
...
class SpinMO1eIntArray(SpinMO1eInt):
"""Stores the array of the 1-electron integrals."""
def __init__(self, array: npt.NDArray[np.complex128]) -> None:
self._array = array
@property
def array(self) -> npt.NDArray[np.complex128]:
"""Returns integral as numpy ndarray."""
return self._array
class SpinMO2eIntArray(SpinMO2eInt):
"""Stores the array of the 2-electron integrals."""
def __init__(self, array: npt.NDArray[np.complex128]) -> None:
self._array = array
@property
def array(self) -> npt.NDArray[np.complex128]:
"""Returns integral as numpy ndarray."""
return self._array
class SpinMOeIntSet(NamedTuple):
"""SpinMOeIntSet holds a constant and a set of molecular orbital electron
integral."""
#: constant.
const: float
#: spin molecular orbital one-electron integral.
mo_1e_int: SpinMO1eInt
#: spin molecular orbital two-electron integral.
mo_2e_int: SpinMO2eInt | PypiClean |
/ol_concourse-0.5.3-py3-none-any.whl/ol_concourse/lib/resources.py | from typing import Optional, Union
from ol_concourse.lib.models.pipeline import Identifier, RegistryImage, Resource
from ol_concourse.lib.models.resource import Git
def git_repo( # noqa: PLR0913
name: Identifier,
uri: str,
branch: str = "main",
check_every: str = "60s",
paths: Optional[list[str]] = None,
depth: Optional[int] = None,
**kwargs,
) -> Resource:
return Resource(
name=name,
type="git",
icon="git",
check_every=check_every,
source=Git(uri=uri, branch=branch, paths=paths).model_dump(exclude_none=True),
**kwargs,
)
def ssh_git_repo(
name: Identifier,
uri: str,
private_key: str,
branch: str = "main",
paths: Optional[list[str]] = None,
) -> Resource:
return Resource(
name=name,
type="git",
icon="git",
source=Git(
uri=uri, branch=branch, paths=paths, private_key=private_key
).model_dump(exclude_none=True),
)
def github_release(name: Identifier, owner: str, repository: str) -> Resource:
"""Generate a github-release resource for the given owner/repository.
:param name: The name of the resource. This will get used across subsequent
pipeline steps that reference this resource.
:type name: Identifier
:param owner: The owner of the repository (e.g. the GitHub user or organization)
:type owner: str
:param repository: The name of the repository as it appears in GitHub
:type repository: str
:returns: A configured Concourse resource object that can be used in a pipeline.
:rtype: Resource
"""
return Resource(
name=name,
type="github-release",
icon="github",
check_every="24h",
source={"repository": repository, "owner": owner, "release": True},
)
def hashicorp_release(name: Identifier, project: str) -> Resource:
"""Generate a hashicorp-release resource for the given application. # noqa: DAR201
:param name: The name of the resourc. This will get used across subsequent
pipeline steps taht reference this resource.
:type name: Identifier
:param project: The name of the hashicorp project to check for a release of.
:type project: str
"""
return Resource(
name=name,
type="hashicorp-release",
icon="lock-check",
check_every="24h",
source={"project": project},
)
def amazon_ami(
name: Identifier,
filters: dict[str, Union[str, bool]],
region: str = "us-east-1",
) -> Resource:
return Resource(
name=name,
type="amazon-ami",
icon="server",
check_every="30m",
source={
"region": region,
"filters": filters,
},
)
def pulumi_provisioner(
name: Identifier, project_name: str, project_path: str
) -> Resource:
return Resource(
name=name,
type="pulumi-provisioner",
icon="cloud-braces",
source={
"env_pulumi": {"AWS_SHARED_CREDENTIALS_FILE": "aws_creds/credentials"},
"action": "update",
"project_name": project_name,
"source_dir": project_path,
},
)
def schedule(name: Identifier, interval: str) -> Resource:
return Resource(
name=name,
type="time",
icon="clock",
source={"interval": interval},
)
def registry_image( # noqa: PLR0913
name: Identifier,
image_repository: str,
image_tag: Optional[str] = None,
variant: Optional[str] = None,
username=None,
password=None,
) -> Resource:
image_source = RegistryImage(
repository=image_repository, tag=image_tag or "latest"
).model_dump()
if username and password:
image_source["username"] = username
image_source["password"] = password
if variant:
image_source["variant"] = variant
return Resource(
name=name,
type="registry-image",
source=image_source,
)
# https://github.com/arbourd/concourse-slack-alert-resource
# We use only a very basic implementation of this notification framework
def slack_notification(name: Identifier, url: str) -> Resource:
return Resource(
name=name, type="slack-notification", source={"url": url, "disabled": False}
) | PypiClean |
/jupyterlab_remote_contents-0.1.1.tar.gz/jupyterlab_remote_contents-0.1.1/node_modules/eslint/lib/linter/linter.js | "use strict";
//------------------------------------------------------------------------------
// Requirements
//------------------------------------------------------------------------------
const
path = require("path"),
eslintScope = require("eslint-scope"),
evk = require("eslint-visitor-keys"),
espree = require("espree"),
merge = require("lodash.merge"),
pkg = require("../../package.json"),
astUtils = require("../shared/ast-utils"),
{
Legacy: {
ConfigOps,
ConfigValidator,
environments: BuiltInEnvironments
}
} = require("@eslint/eslintrc/universal"),
Traverser = require("../shared/traverser"),
{ SourceCode } = require("../source-code"),
CodePathAnalyzer = require("./code-path-analysis/code-path-analyzer"),
applyDisableDirectives = require("./apply-disable-directives"),
ConfigCommentParser = require("./config-comment-parser"),
NodeEventGenerator = require("./node-event-generator"),
createReportTranslator = require("./report-translator"),
Rules = require("./rules"),
createEmitter = require("./safe-emitter"),
SourceCodeFixer = require("./source-code-fixer"),
timing = require("./timing"),
ruleReplacements = require("../../conf/replacements.json");
const { getRuleFromConfig } = require("../config/flat-config-helpers");
const { FlatConfigArray } = require("../config/flat-config-array");
const debug = require("debug")("eslint:linter");
const MAX_AUTOFIX_PASSES = 10;
const DEFAULT_PARSER_NAME = "espree";
const DEFAULT_ECMA_VERSION = 5;
const commentParser = new ConfigCommentParser();
const DEFAULT_ERROR_LOC = { start: { line: 1, column: 0 }, end: { line: 1, column: 1 } };
const parserSymbol = Symbol.for("eslint.RuleTester.parser");
const globals = require("../../conf/globals");
//------------------------------------------------------------------------------
// Typedefs
//------------------------------------------------------------------------------
/** @typedef {InstanceType<import("../cli-engine/config-array").ConfigArray>} ConfigArray */
/** @typedef {InstanceType<import("../cli-engine/config-array").ExtractedConfig>} ExtractedConfig */
/** @typedef {import("../shared/types").ConfigData} ConfigData */
/** @typedef {import("../shared/types").Environment} Environment */
/** @typedef {import("../shared/types").GlobalConf} GlobalConf */
/** @typedef {import("../shared/types").LintMessage} LintMessage */
/** @typedef {import("../shared/types").SuppressedLintMessage} SuppressedLintMessage */
/** @typedef {import("../shared/types").ParserOptions} ParserOptions */
/** @typedef {import("../shared/types").LanguageOptions} LanguageOptions */
/** @typedef {import("../shared/types").Processor} Processor */
/** @typedef {import("../shared/types").Rule} Rule */
/* eslint-disable jsdoc/valid-types -- https://github.com/jsdoc-type-pratt-parser/jsdoc-type-pratt-parser/issues/4#issuecomment-778805577 */
/**
* @template T
* @typedef {{ [P in keyof T]-?: T[P] }} Required
*/
/* eslint-enable jsdoc/valid-types -- https://github.com/jsdoc-type-pratt-parser/jsdoc-type-pratt-parser/issues/4#issuecomment-778805577 */
/**
* @typedef {Object} DisableDirective
* @property {("disable"|"enable"|"disable-line"|"disable-next-line")} type Type of directive
* @property {number} line The line number
* @property {number} column The column number
* @property {(string|null)} ruleId The rule ID
* @property {string} justification The justification of directive
*/
/**
* The private data for `Linter` instance.
* @typedef {Object} LinterInternalSlots
* @property {ConfigArray|null} lastConfigArray The `ConfigArray` instance that the last `verify()` call used.
* @property {SourceCode|null} lastSourceCode The `SourceCode` instance that the last `verify()` call used.
* @property {SuppressedLintMessage[]} lastSuppressedMessages The `SuppressedLintMessage[]` instance that the last `verify()` call produced.
* @property {Map<string, Parser>} parserMap The loaded parsers.
* @property {Rules} ruleMap The loaded rules.
*/
/**
* @typedef {Object} VerifyOptions
* @property {boolean} [allowInlineConfig] Allow/disallow inline comments' ability
* to change config once it is set. Defaults to true if not supplied.
* Useful if you want to validate JS without comments overriding rules.
* @property {boolean} [disableFixes] if `true` then the linter doesn't make `fix`
* properties into the lint result.
* @property {string} [filename] the filename of the source code.
* @property {boolean | "off" | "warn" | "error"} [reportUnusedDisableDirectives] Adds reported errors for
* unused `eslint-disable` directives.
*/
/**
* @typedef {Object} ProcessorOptions
* @property {(filename:string, text:string) => boolean} [filterCodeBlock] the
* predicate function that selects adopt code blocks.
* @property {Processor.postprocess} [postprocess] postprocessor for report
* messages. If provided, this should accept an array of the message lists
* for each code block returned from the preprocessor, apply a mapping to
* the messages as appropriate, and return a one-dimensional array of
* messages.
* @property {Processor.preprocess} [preprocess] preprocessor for source text.
* If provided, this should accept a string of source text, and return an
* array of code blocks to lint.
*/
/**
* @typedef {Object} FixOptions
* @property {boolean | ((message: LintMessage) => boolean)} [fix] Determines
* whether fixes should be applied.
*/
/**
* @typedef {Object} InternalOptions
* @property {string | null} warnInlineConfig The config name what `noInlineConfig` setting came from. If `noInlineConfig` setting didn't exist, this is null. If this is a config name, then the linter warns directive comments.
* @property {"off" | "warn" | "error"} reportUnusedDisableDirectives (boolean values were normalized)
*/
//------------------------------------------------------------------------------
// Helpers
//------------------------------------------------------------------------------
/**
* Determines if a given object is Espree.
* @param {Object} parser The parser to check.
* @returns {boolean} True if the parser is Espree or false if not.
*/
function isEspree(parser) {
return !!(parser === espree || parser[parserSymbol] === espree);
}
/**
* Retrieves globals for the given ecmaVersion.
* @param {number} ecmaVersion The version to retrieve globals for.
* @returns {Object} The globals for the given ecmaVersion.
*/
function getGlobalsForEcmaVersion(ecmaVersion) {
switch (ecmaVersion) {
case 3:
return globals.es3;
case 5:
return globals.es5;
default:
if (ecmaVersion < 2015) {
return globals[`es${ecmaVersion + 2009}`];
}
return globals[`es${ecmaVersion}`];
}
}
/**
* Ensures that variables representing built-in properties of the Global Object,
* and any globals declared by special block comments, are present in the global
* scope.
* @param {Scope} globalScope The global scope.
* @param {Object} configGlobals The globals declared in configuration
* @param {{exportedVariables: Object, enabledGlobals: Object}} commentDirectives Directives from comment configuration
* @returns {void}
*/
function addDeclaredGlobals(globalScope, configGlobals, { exportedVariables, enabledGlobals }) {
// Define configured global variables.
for (const id of new Set([...Object.keys(configGlobals), ...Object.keys(enabledGlobals)])) {
/*
* `ConfigOps.normalizeConfigGlobal` will throw an error if a configured global value is invalid. However, these errors would
* typically be caught when validating a config anyway (validity for inline global comments is checked separately).
*/
const configValue = configGlobals[id] === void 0 ? void 0 : ConfigOps.normalizeConfigGlobal(configGlobals[id]);
const commentValue = enabledGlobals[id] && enabledGlobals[id].value;
const value = commentValue || configValue;
const sourceComments = enabledGlobals[id] && enabledGlobals[id].comments;
if (value === "off") {
continue;
}
let variable = globalScope.set.get(id);
if (!variable) {
variable = new eslintScope.Variable(id, globalScope);
globalScope.variables.push(variable);
globalScope.set.set(id, variable);
}
variable.eslintImplicitGlobalSetting = configValue;
variable.eslintExplicitGlobal = sourceComments !== void 0;
variable.eslintExplicitGlobalComments = sourceComments;
variable.writeable = (value === "writable");
}
// mark all exported variables as such
Object.keys(exportedVariables).forEach(name => {
const variable = globalScope.set.get(name);
if (variable) {
variable.eslintUsed = true;
}
});
/*
* "through" contains all references which definitions cannot be found.
* Since we augment the global scope using configuration, we need to update
* references and remove the ones that were added by configuration.
*/
globalScope.through = globalScope.through.filter(reference => {
const name = reference.identifier.name;
const variable = globalScope.set.get(name);
if (variable) {
/*
* Links the variable and the reference.
* And this reference is removed from `Scope#through`.
*/
reference.resolved = variable;
variable.references.push(reference);
return false;
}
return true;
});
}
/**
* creates a missing-rule message.
* @param {string} ruleId the ruleId to create
* @returns {string} created error message
* @private
*/
function createMissingRuleMessage(ruleId) {
return Object.prototype.hasOwnProperty.call(ruleReplacements.rules, ruleId)
? `Rule '${ruleId}' was removed and replaced by: ${ruleReplacements.rules[ruleId].join(", ")}`
: `Definition for rule '${ruleId}' was not found.`;
}
/**
* creates a linting problem
* @param {Object} options to create linting error
* @param {string} [options.ruleId] the ruleId to report
* @param {Object} [options.loc] the loc to report
* @param {string} [options.message] the error message to report
* @param {string} [options.severity] the error message to report
* @returns {LintMessage} created problem, returns a missing-rule problem if only provided ruleId.
* @private
*/
function createLintingProblem(options) {
const {
ruleId = null,
loc = DEFAULT_ERROR_LOC,
message = createMissingRuleMessage(options.ruleId),
severity = 2
} = options;
return {
ruleId,
message,
line: loc.start.line,
column: loc.start.column + 1,
endLine: loc.end.line,
endColumn: loc.end.column + 1,
severity,
nodeType: null
};
}
/**
* Creates a collection of disable directives from a comment
* @param {Object} options to create disable directives
* @param {("disable"|"enable"|"disable-line"|"disable-next-line")} options.type The type of directive comment
* @param {token} options.commentToken The Comment token
* @param {string} options.value The value after the directive in the comment
* comment specified no specific rules, so it applies to all rules (e.g. `eslint-disable`)
* @param {string} options.justification The justification of the directive
* @param {function(string): {create: Function}} options.ruleMapper A map from rule IDs to defined rules
* @returns {Object} Directives and problems from the comment
*/
function createDisableDirectives(options) {
const { commentToken, type, value, justification, ruleMapper } = options;
const ruleIds = Object.keys(commentParser.parseListConfig(value));
const directiveRules = ruleIds.length ? ruleIds : [null];
const result = {
directives: [], // valid disable directives
directiveProblems: [] // problems in directives
};
const parentComment = { commentToken, ruleIds };
for (const ruleId of directiveRules) {
// push to directives, if the rule is defined(including null, e.g. /*eslint enable*/)
if (ruleId === null || !!ruleMapper(ruleId)) {
if (type === "disable-next-line") {
result.directives.push({
parentComment,
type,
line: commentToken.loc.end.line,
column: commentToken.loc.end.column + 1,
ruleId,
justification
});
} else {
result.directives.push({
parentComment,
type,
line: commentToken.loc.start.line,
column: commentToken.loc.start.column + 1,
ruleId,
justification
});
}
} else {
result.directiveProblems.push(createLintingProblem({ ruleId, loc: commentToken.loc }));
}
}
return result;
}
/**
* Extract the directive and the justification from a given directive comment and trim them.
* @param {string} value The comment text to extract.
* @returns {{directivePart: string, justificationPart: string}} The extracted directive and justification.
*/
function extractDirectiveComment(value) {
const match = /\s-{2,}\s/u.exec(value);
if (!match) {
return { directivePart: value.trim(), justificationPart: "" };
}
const directive = value.slice(0, match.index).trim();
const justification = value.slice(match.index + match[0].length).trim();
return { directivePart: directive, justificationPart: justification };
}
/**
* Parses comments in file to extract file-specific config of rules, globals
* and environments and merges them with global config; also code blocks
* where reporting is disabled or enabled and merges them with reporting config.
* @param {ASTNode} ast The top node of the AST.
* @param {function(string): {create: Function}} ruleMapper A map from rule IDs to defined rules
* @param {string|null} warnInlineConfig If a string then it should warn directive comments as disabled. The string value is the config name what the setting came from.
* @returns {{configuredRules: Object, enabledGlobals: {value:string,comment:Token}[], exportedVariables: Object, problems: Problem[], disableDirectives: DisableDirective[]}}
* A collection of the directive comments that were found, along with any problems that occurred when parsing
*/
function getDirectiveComments(ast, ruleMapper, warnInlineConfig) {
const configuredRules = {};
const enabledGlobals = Object.create(null);
const exportedVariables = {};
const problems = [];
const disableDirectives = [];
const validator = new ConfigValidator({
builtInRules: Rules
});
ast.comments.filter(token => token.type !== "Shebang").forEach(comment => {
const { directivePart, justificationPart } = extractDirectiveComment(comment.value);
const match = /^(eslint(?:-env|-enable|-disable(?:(?:-next)?-line)?)?|exported|globals?)(?:\s|$)/u.exec(directivePart);
if (!match) {
return;
}
const directiveText = match[1];
const lineCommentSupported = /^eslint-disable-(next-)?line$/u.test(directiveText);
if (comment.type === "Line" && !lineCommentSupported) {
return;
}
if (warnInlineConfig) {
const kind = comment.type === "Block" ? `/*${directiveText}*/` : `//${directiveText}`;
problems.push(createLintingProblem({
ruleId: null,
message: `'${kind}' has no effect because you have 'noInlineConfig' setting in ${warnInlineConfig}.`,
loc: comment.loc,
severity: 1
}));
return;
}
if (directiveText === "eslint-disable-line" && comment.loc.start.line !== comment.loc.end.line) {
const message = `${directiveText} comment should not span multiple lines.`;
problems.push(createLintingProblem({
ruleId: null,
message,
loc: comment.loc
}));
return;
}
const directiveValue = directivePart.slice(match.index + directiveText.length);
switch (directiveText) {
case "eslint-disable":
case "eslint-enable":
case "eslint-disable-next-line":
case "eslint-disable-line": {
const directiveType = directiveText.slice("eslint-".length);
const options = { commentToken: comment, type: directiveType, value: directiveValue, justification: justificationPart, ruleMapper };
const { directives, directiveProblems } = createDisableDirectives(options);
disableDirectives.push(...directives);
problems.push(...directiveProblems);
break;
}
case "exported":
Object.assign(exportedVariables, commentParser.parseStringConfig(directiveValue, comment));
break;
case "globals":
case "global":
for (const [id, { value }] of Object.entries(commentParser.parseStringConfig(directiveValue, comment))) {
let normalizedValue;
try {
normalizedValue = ConfigOps.normalizeConfigGlobal(value);
} catch (err) {
problems.push(createLintingProblem({
ruleId: null,
loc: comment.loc,
message: err.message
}));
continue;
}
if (enabledGlobals[id]) {
enabledGlobals[id].comments.push(comment);
enabledGlobals[id].value = normalizedValue;
} else {
enabledGlobals[id] = {
comments: [comment],
value: normalizedValue
};
}
}
break;
case "eslint": {
const parseResult = commentParser.parseJsonConfig(directiveValue, comment.loc);
if (parseResult.success) {
Object.keys(parseResult.config).forEach(name => {
const rule = ruleMapper(name);
const ruleValue = parseResult.config[name];
if (!rule) {
problems.push(createLintingProblem({ ruleId: name, loc: comment.loc }));
return;
}
try {
validator.validateRuleOptions(rule, name, ruleValue);
} catch (err) {
problems.push(createLintingProblem({
ruleId: name,
message: err.message,
loc: comment.loc
}));
// do not apply the config, if found invalid options.
return;
}
configuredRules[name] = ruleValue;
});
} else {
problems.push(parseResult.error);
}
break;
}
// no default
}
});
return {
configuredRules,
enabledGlobals,
exportedVariables,
problems,
disableDirectives
};
}
/**
* Normalize ECMAScript version from the initial config
* @param {Parser} parser The parser which uses this options.
* @param {number} ecmaVersion ECMAScript version from the initial config
* @returns {number} normalized ECMAScript version
*/
function normalizeEcmaVersion(parser, ecmaVersion) {
if (isEspree(parser)) {
if (ecmaVersion === "latest") {
return espree.latestEcmaVersion;
}
}
/*
* Calculate ECMAScript edition number from official year version starting with
* ES2015, which corresponds with ES6 (or a difference of 2009).
*/
return ecmaVersion >= 2015 ? ecmaVersion - 2009 : ecmaVersion;
}
/**
* Normalize ECMAScript version from the initial config into languageOptions (year)
* format.
* @param {any} [ecmaVersion] ECMAScript version from the initial config
* @returns {number} normalized ECMAScript version
*/
function normalizeEcmaVersionForLanguageOptions(ecmaVersion) {
switch (ecmaVersion) {
case 3:
return 3;
// void 0 = no ecmaVersion specified so use the default
case 5:
case void 0:
return 5;
default:
if (typeof ecmaVersion === "number") {
return ecmaVersion >= 2015 ? ecmaVersion : ecmaVersion + 2009;
}
}
/*
* We default to the latest supported ecmaVersion for everything else.
* Remember, this is for languageOptions.ecmaVersion, which sets the version
* that is used for a number of processes inside of ESLint. It's normally
* safe to assume people want the latest unless otherwise specified.
*/
return espree.latestEcmaVersion + 2009;
}
const eslintEnvPattern = /\/\*\s*eslint-env\s(.+?)(?:\*\/|$)/gsu;
/**
* Checks whether or not there is a comment which has "eslint-env *" in a given text.
* @param {string} text A source code text to check.
* @returns {Object|null} A result of parseListConfig() with "eslint-env *" comment.
*/
function findEslintEnv(text) {
let match, retv;
eslintEnvPattern.lastIndex = 0;
while ((match = eslintEnvPattern.exec(text)) !== null) {
if (match[0].endsWith("*/")) {
retv = Object.assign(
retv || {},
commentParser.parseListConfig(extractDirectiveComment(match[1]).directivePart)
);
}
}
return retv;
}
/**
* Convert "/path/to/<text>" to "<text>".
* `CLIEngine#executeOnText()` method gives "/path/to/<text>" if the filename
* was omitted because `configArray.extractConfig()` requires an absolute path.
* But the linter should pass `<text>` to `RuleContext#getFilename()` in that
* case.
* Also, code blocks can have their virtual filename. If the parent filename was
* `<text>`, the virtual filename is `<text>/0_foo.js` or something like (i.e.,
* it's not an absolute path).
* @param {string} filename The filename to normalize.
* @returns {string} The normalized filename.
*/
function normalizeFilename(filename) {
const parts = filename.split(path.sep);
const index = parts.lastIndexOf("<text>");
return index === -1 ? filename : parts.slice(index).join(path.sep);
}
/**
* Normalizes the possible options for `linter.verify` and `linter.verifyAndFix` to a
* consistent shape.
* @param {VerifyOptions} providedOptions Options
* @param {ConfigData} config Config.
* @returns {Required<VerifyOptions> & InternalOptions} Normalized options
*/
function normalizeVerifyOptions(providedOptions, config) {
const linterOptions = config.linterOptions || config;
// .noInlineConfig for eslintrc, .linterOptions.noInlineConfig for flat
const disableInlineConfig = linterOptions.noInlineConfig === true;
const ignoreInlineConfig = providedOptions.allowInlineConfig === false;
const configNameOfNoInlineConfig = config.configNameOfNoInlineConfig
? ` (${config.configNameOfNoInlineConfig})`
: "";
let reportUnusedDisableDirectives = providedOptions.reportUnusedDisableDirectives;
if (typeof reportUnusedDisableDirectives === "boolean") {
reportUnusedDisableDirectives = reportUnusedDisableDirectives ? "error" : "off";
}
if (typeof reportUnusedDisableDirectives !== "string") {
reportUnusedDisableDirectives =
linterOptions.reportUnusedDisableDirectives
? "warn" : "off";
}
return {
filename: normalizeFilename(providedOptions.filename || "<input>"),
allowInlineConfig: !ignoreInlineConfig,
warnInlineConfig: disableInlineConfig && !ignoreInlineConfig
? `your config${configNameOfNoInlineConfig}`
: null,
reportUnusedDisableDirectives,
disableFixes: Boolean(providedOptions.disableFixes)
};
}
/**
* Combines the provided parserOptions with the options from environments
* @param {Parser} parser The parser which uses this options.
* @param {ParserOptions} providedOptions The provided 'parserOptions' key in a config
* @param {Environment[]} enabledEnvironments The environments enabled in configuration and with inline comments
* @returns {ParserOptions} Resulting parser options after merge
*/
function resolveParserOptions(parser, providedOptions, enabledEnvironments) {
const parserOptionsFromEnv = enabledEnvironments
.filter(env => env.parserOptions)
.reduce((parserOptions, env) => merge(parserOptions, env.parserOptions), {});
const mergedParserOptions = merge(parserOptionsFromEnv, providedOptions || {});
const isModule = mergedParserOptions.sourceType === "module";
if (isModule) {
/*
* can't have global return inside of modules
* TODO: espree validate parserOptions.globalReturn when sourceType is setting to module.(@aladdin-add)
*/
mergedParserOptions.ecmaFeatures = Object.assign({}, mergedParserOptions.ecmaFeatures, { globalReturn: false });
}
mergedParserOptions.ecmaVersion = normalizeEcmaVersion(parser, mergedParserOptions.ecmaVersion);
return mergedParserOptions;
}
/**
* Converts parserOptions to languageOptions for backwards compatibility with eslintrc.
* @param {ConfigData} config Config object.
* @param {Object} config.globals Global variable definitions.
* @param {Parser} config.parser The parser to use.
* @param {ParserOptions} config.parserOptions The parserOptions to use.
* @returns {LanguageOptions} The languageOptions equivalent.
*/
function createLanguageOptions({ globals: configuredGlobals, parser, parserOptions }) {
const {
ecmaVersion,
sourceType
} = parserOptions;
return {
globals: configuredGlobals,
ecmaVersion: normalizeEcmaVersionForLanguageOptions(ecmaVersion),
sourceType,
parser,
parserOptions
};
}
/**
* Combines the provided globals object with the globals from environments
* @param {Record<string, GlobalConf>} providedGlobals The 'globals' key in a config
* @param {Environment[]} enabledEnvironments The environments enabled in configuration and with inline comments
* @returns {Record<string, GlobalConf>} The resolved globals object
*/
function resolveGlobals(providedGlobals, enabledEnvironments) {
return Object.assign(
{},
...enabledEnvironments.filter(env => env.globals).map(env => env.globals),
providedGlobals
);
}
/**
* Strips Unicode BOM from a given text.
* @param {string} text A text to strip.
* @returns {string} The stripped text.
*/
function stripUnicodeBOM(text) {
/*
* Check Unicode BOM.
* In JavaScript, string data is stored as UTF-16, so BOM is 0xFEFF.
* http://www.ecma-international.org/ecma-262/6.0/#sec-unicode-format-control-characters
*/
if (text.charCodeAt(0) === 0xFEFF) {
return text.slice(1);
}
return text;
}
/**
* Get the options for a rule (not including severity), if any
* @param {Array|number} ruleConfig rule configuration
* @returns {Array} of rule options, empty Array if none
*/
function getRuleOptions(ruleConfig) {
if (Array.isArray(ruleConfig)) {
return ruleConfig.slice(1);
}
return [];
}
/**
* Analyze scope of the given AST.
* @param {ASTNode} ast The `Program` node to analyze.
* @param {LanguageOptions} languageOptions The parser options.
* @param {Record<string, string[]>} visitorKeys The visitor keys.
* @returns {ScopeManager} The analysis result.
*/
function analyzeScope(ast, languageOptions, visitorKeys) {
const parserOptions = languageOptions.parserOptions;
const ecmaFeatures = parserOptions.ecmaFeatures || {};
const ecmaVersion = languageOptions.ecmaVersion || DEFAULT_ECMA_VERSION;
return eslintScope.analyze(ast, {
ignoreEval: true,
nodejsScope: ecmaFeatures.globalReturn,
impliedStrict: ecmaFeatures.impliedStrict,
ecmaVersion: typeof ecmaVersion === "number" ? ecmaVersion : 6,
sourceType: languageOptions.sourceType || "script",
childVisitorKeys: visitorKeys || evk.KEYS,
fallback: Traverser.getKeys
});
}
/**
* Parses text into an AST. Moved out here because the try-catch prevents
* optimization of functions, so it's best to keep the try-catch as isolated
* as possible
* @param {string} text The text to parse.
* @param {LanguageOptions} languageOptions Options to pass to the parser
* @param {string} filePath The path to the file being parsed.
* @returns {{success: false, error: Problem}|{success: true, sourceCode: SourceCode}}
* An object containing the AST and parser services if parsing was successful, or the error if parsing failed
* @private
*/
function parse(text, languageOptions, filePath) {
const textToParse = stripUnicodeBOM(text).replace(astUtils.shebangPattern, (match, captured) => `//${captured}`);
const { ecmaVersion, sourceType, parser } = languageOptions;
const parserOptions = Object.assign(
{ ecmaVersion, sourceType },
languageOptions.parserOptions,
{
loc: true,
range: true,
raw: true,
tokens: true,
comment: true,
eslintVisitorKeys: true,
eslintScopeManager: true,
filePath
}
);
/*
* Check for parsing errors first. If there's a parsing error, nothing
* else can happen. However, a parsing error does not throw an error
* from this method - it's just considered a fatal error message, a
* problem that ESLint identified just like any other.
*/
try {
debug("Parsing:", filePath);
const parseResult = (typeof parser.parseForESLint === "function")
? parser.parseForESLint(textToParse, parserOptions)
: { ast: parser.parse(textToParse, parserOptions) };
debug("Parsing successful:", filePath);
const ast = parseResult.ast;
const parserServices = parseResult.services || {};
const visitorKeys = parseResult.visitorKeys || evk.KEYS;
debug("Scope analysis:", filePath);
const scopeManager = parseResult.scopeManager || analyzeScope(ast, languageOptions, visitorKeys);
debug("Scope analysis successful:", filePath);
return {
success: true,
/*
* Save all values that `parseForESLint()` returned.
* If a `SourceCode` object is given as the first parameter instead of source code text,
* linter skips the parsing process and reuses the source code object.
* In that case, linter needs all the values that `parseForESLint()` returned.
*/
sourceCode: new SourceCode({
text,
ast,
parserServices,
scopeManager,
visitorKeys
})
};
} catch (ex) {
// If the message includes a leading line number, strip it:
const message = `Parsing error: ${ex.message.replace(/^line \d+:/iu, "").trim()}`;
debug("%s\n%s", message, ex.stack);
return {
success: false,
error: {
ruleId: null,
fatal: true,
severity: 2,
message,
line: ex.lineNumber,
column: ex.column
}
};
}
}
/**
* Gets the scope for the current node
* @param {ScopeManager} scopeManager The scope manager for this AST
* @param {ASTNode} currentNode The node to get the scope of
* @returns {eslint-scope.Scope} The scope information for this node
*/
function getScope(scopeManager, currentNode) {
// On Program node, get the outermost scope to avoid return Node.js special function scope or ES modules scope.
const inner = currentNode.type !== "Program";
for (let node = currentNode; node; node = node.parent) {
const scope = scopeManager.acquire(node, inner);
if (scope) {
if (scope.type === "function-expression-name") {
return scope.childScopes[0];
}
return scope;
}
}
return scopeManager.scopes[0];
}
/**
* Marks a variable as used in the current scope
* @param {ScopeManager} scopeManager The scope manager for this AST. The scope may be mutated by this function.
* @param {ASTNode} currentNode The node currently being traversed
* @param {LanguageOptions} languageOptions The options used to parse this text
* @param {string} name The name of the variable that should be marked as used.
* @returns {boolean} True if the variable was found and marked as used, false if not.
*/
function markVariableAsUsed(scopeManager, currentNode, languageOptions, name) {
const parserOptions = languageOptions.parserOptions;
const sourceType = languageOptions.sourceType;
const hasGlobalReturn =
(parserOptions.ecmaFeatures && parserOptions.ecmaFeatures.globalReturn) ||
sourceType === "commonjs";
const specialScope = hasGlobalReturn || sourceType === "module";
const currentScope = getScope(scopeManager, currentNode);
// Special Node.js scope means we need to start one level deeper
const initialScope = currentScope.type === "global" && specialScope ? currentScope.childScopes[0] : currentScope;
for (let scope = initialScope; scope; scope = scope.upper) {
const variable = scope.variables.find(scopeVar => scopeVar.name === name);
if (variable) {
variable.eslintUsed = true;
return true;
}
}
return false;
}
/**
* Runs a rule, and gets its listeners
* @param {Rule} rule A normalized rule with a `create` method
* @param {Context} ruleContext The context that should be passed to the rule
* @throws {any} Any error during the rule's `create`
* @returns {Object} A map of selector listeners provided by the rule
*/
function createRuleListeners(rule, ruleContext) {
try {
return rule.create(ruleContext);
} catch (ex) {
ex.message = `Error while loading rule '${ruleContext.id}': ${ex.message}`;
throw ex;
}
}
/**
* Gets all the ancestors of a given node
* @param {ASTNode} node The node
* @returns {ASTNode[]} All the ancestor nodes in the AST, not including the provided node, starting
* from the root node and going inwards to the parent node.
*/
function getAncestors(node) {
const ancestorsStartingAtParent = [];
for (let ancestor = node.parent; ancestor; ancestor = ancestor.parent) {
ancestorsStartingAtParent.push(ancestor);
}
return ancestorsStartingAtParent.reverse();
}
// methods that exist on SourceCode object
const DEPRECATED_SOURCECODE_PASSTHROUGHS = {
getSource: "getText",
getSourceLines: "getLines",
getAllComments: "getAllComments",
getNodeByRangeIndex: "getNodeByRangeIndex",
getComments: "getComments",
getCommentsBefore: "getCommentsBefore",
getCommentsAfter: "getCommentsAfter",
getCommentsInside: "getCommentsInside",
getJSDocComment: "getJSDocComment",
getFirstToken: "getFirstToken",
getFirstTokens: "getFirstTokens",
getLastToken: "getLastToken",
getLastTokens: "getLastTokens",
getTokenAfter: "getTokenAfter",
getTokenBefore: "getTokenBefore",
getTokenByRangeStart: "getTokenByRangeStart",
getTokens: "getTokens",
getTokensAfter: "getTokensAfter",
getTokensBefore: "getTokensBefore",
getTokensBetween: "getTokensBetween"
};
const BASE_TRAVERSAL_CONTEXT = Object.freeze(
Object.keys(DEPRECATED_SOURCECODE_PASSTHROUGHS).reduce(
(contextInfo, methodName) =>
Object.assign(contextInfo, {
[methodName](...args) {
return this.getSourceCode()[DEPRECATED_SOURCECODE_PASSTHROUGHS[methodName]](...args);
}
}),
{}
)
);
/**
* Runs the given rules on the given SourceCode object
* @param {SourceCode} sourceCode A SourceCode object for the given text
* @param {Object} configuredRules The rules configuration
* @param {function(string): Rule} ruleMapper A mapper function from rule names to rules
* @param {string | undefined} parserName The name of the parser in the config
* @param {LanguageOptions} languageOptions The options for parsing the code.
* @param {Object} settings The settings that were enabled in the config
* @param {string} filename The reported filename of the code
* @param {boolean} disableFixes If true, it doesn't make `fix` properties.
* @param {string | undefined} cwd cwd of the cli
* @param {string} physicalFilename The full path of the file on disk without any code block information
* @returns {Problem[]} An array of reported problems
*/
function runRules(sourceCode, configuredRules, ruleMapper, parserName, languageOptions, settings, filename, disableFixes, cwd, physicalFilename) {
const emitter = createEmitter();
const nodeQueue = [];
let currentNode = sourceCode.ast;
Traverser.traverse(sourceCode.ast, {
enter(node, parent) {
node.parent = parent;
nodeQueue.push({ isEntering: true, node });
},
leave(node) {
nodeQueue.push({ isEntering: false, node });
},
visitorKeys: sourceCode.visitorKeys
});
/*
* Create a frozen object with the ruleContext properties and methods that are shared by all rules.
* All rule contexts will inherit from this object. This avoids the performance penalty of copying all the
* properties once for each rule.
*/
const sharedTraversalContext = Object.freeze(
Object.assign(
Object.create(BASE_TRAVERSAL_CONTEXT),
{
getAncestors: () => getAncestors(currentNode),
getDeclaredVariables: sourceCode.scopeManager.getDeclaredVariables.bind(sourceCode.scopeManager),
getCwd: () => cwd,
getFilename: () => filename,
getPhysicalFilename: () => physicalFilename || filename,
getScope: () => getScope(sourceCode.scopeManager, currentNode),
getSourceCode: () => sourceCode,
markVariableAsUsed: name => markVariableAsUsed(sourceCode.scopeManager, currentNode, languageOptions, name),
parserOptions: {
...languageOptions.parserOptions
},
parserPath: parserName,
languageOptions,
parserServices: sourceCode.parserServices,
settings
}
)
);
const lintingProblems = [];
Object.keys(configuredRules).forEach(ruleId => {
const severity = ConfigOps.getRuleSeverity(configuredRules[ruleId]);
// not load disabled rules
if (severity === 0) {
return;
}
const rule = ruleMapper(ruleId);
if (!rule) {
lintingProblems.push(createLintingProblem({ ruleId }));
return;
}
const messageIds = rule.meta && rule.meta.messages;
let reportTranslator = null;
const ruleContext = Object.freeze(
Object.assign(
Object.create(sharedTraversalContext),
{
id: ruleId,
options: getRuleOptions(configuredRules[ruleId]),
report(...args) {
/*
* Create a report translator lazily.
* In a vast majority of cases, any given rule reports zero errors on a given
* piece of code. Creating a translator lazily avoids the performance cost of
* creating a new translator function for each rule that usually doesn't get
* called.
*
* Using lazy report translators improves end-to-end performance by about 3%
* with Node 8.4.0.
*/
if (reportTranslator === null) {
reportTranslator = createReportTranslator({
ruleId,
severity,
sourceCode,
messageIds,
disableFixes
});
}
const problem = reportTranslator(...args);
if (problem.fix && !(rule.meta && rule.meta.fixable)) {
throw new Error("Fixable rules must set the `meta.fixable` property to \"code\" or \"whitespace\".");
}
if (problem.suggestions && !(rule.meta && rule.meta.hasSuggestions === true)) {
if (rule.meta && rule.meta.docs && typeof rule.meta.docs.suggestion !== "undefined") {
// Encourage migration from the former property name.
throw new Error("Rules with suggestions must set the `meta.hasSuggestions` property to `true`. `meta.docs.suggestion` is ignored by ESLint.");
}
throw new Error("Rules with suggestions must set the `meta.hasSuggestions` property to `true`.");
}
lintingProblems.push(problem);
}
}
)
);
const ruleListeners = createRuleListeners(rule, ruleContext);
/**
* Include `ruleId` in error logs
* @param {Function} ruleListener A rule method that listens for a node.
* @returns {Function} ruleListener wrapped in error handler
*/
function addRuleErrorHandler(ruleListener) {
return function ruleErrorHandler(...listenerArgs) {
try {
return ruleListener(...listenerArgs);
} catch (e) {
e.ruleId = ruleId;
throw e;
}
};
}
// add all the selectors from the rule as listeners
Object.keys(ruleListeners).forEach(selector => {
const ruleListener = timing.enabled
? timing.time(ruleId, ruleListeners[selector])
: ruleListeners[selector];
emitter.on(
selector,
addRuleErrorHandler(ruleListener)
);
});
});
// only run code path analyzer if the top level node is "Program", skip otherwise
const eventGenerator = nodeQueue[0].node.type === "Program"
? new CodePathAnalyzer(new NodeEventGenerator(emitter, { visitorKeys: sourceCode.visitorKeys, fallback: Traverser.getKeys }))
: new NodeEventGenerator(emitter, { visitorKeys: sourceCode.visitorKeys, fallback: Traverser.getKeys });
nodeQueue.forEach(traversalInfo => {
currentNode = traversalInfo.node;
try {
if (traversalInfo.isEntering) {
eventGenerator.enterNode(currentNode);
} else {
eventGenerator.leaveNode(currentNode);
}
} catch (err) {
err.currentNode = currentNode;
throw err;
}
});
return lintingProblems;
}
/**
* Ensure the source code to be a string.
* @param {string|SourceCode} textOrSourceCode The text or source code object.
* @returns {string} The source code text.
*/
function ensureText(textOrSourceCode) {
if (typeof textOrSourceCode === "object") {
const { hasBOM, text } = textOrSourceCode;
const bom = hasBOM ? "\uFEFF" : "";
return bom + text;
}
return String(textOrSourceCode);
}
/**
* Get an environment.
* @param {LinterInternalSlots} slots The internal slots of Linter.
* @param {string} envId The environment ID to get.
* @returns {Environment|null} The environment.
*/
function getEnv(slots, envId) {
return (
(slots.lastConfigArray && slots.lastConfigArray.pluginEnvironments.get(envId)) ||
BuiltInEnvironments.get(envId) ||
null
);
}
/**
* Get a rule.
* @param {LinterInternalSlots} slots The internal slots of Linter.
* @param {string} ruleId The rule ID to get.
* @returns {Rule|null} The rule.
*/
function getRule(slots, ruleId) {
return (
(slots.lastConfigArray && slots.lastConfigArray.pluginRules.get(ruleId)) ||
slots.ruleMap.get(ruleId)
);
}
/**
* Normalize the value of the cwd
* @param {string | undefined} cwd raw value of the cwd, path to a directory that should be considered as the current working directory, can be undefined.
* @returns {string | undefined} normalized cwd
*/
function normalizeCwd(cwd) {
if (cwd) {
return cwd;
}
if (typeof process === "object") {
return process.cwd();
}
// It's more explicit to assign the undefined
// eslint-disable-next-line no-undefined -- Consistently returning a value
return undefined;
}
/**
* The map to store private data.
* @type {WeakMap<Linter, LinterInternalSlots>}
*/
const internalSlotsMap = new WeakMap();
/**
* Throws an error when the given linter is in flat config mode.
* @param {Linter} linter The linter to check.
* @returns {void}
* @throws {Error} If the linter is in flat config mode.
*/
function assertEslintrcConfig(linter) {
const { configType } = internalSlotsMap.get(linter);
if (configType === "flat") {
throw new Error("This method cannot be used with flat config. Add your entries directly into the config array.");
}
}
//------------------------------------------------------------------------------
// Public Interface
//------------------------------------------------------------------------------
/**
* Object that is responsible for verifying JavaScript text
* @name Linter
*/
class Linter {
/**
* Initialize the Linter.
* @param {Object} [config] the config object
* @param {string} [config.cwd] path to a directory that should be considered as the current working directory, can be undefined.
* @param {"flat"|"eslintrc"} [config.configType="eslintrc"] the type of config used.
*/
constructor({ cwd, configType } = {}) {
internalSlotsMap.set(this, {
cwd: normalizeCwd(cwd),
lastConfigArray: null,
lastSourceCode: null,
lastSuppressedMessages: [],
configType, // TODO: Remove after flat config conversion
parserMap: new Map([["espree", espree]]),
ruleMap: new Rules()
});
this.version = pkg.version;
}
/**
* Getter for package version.
* @static
* @returns {string} The version from package.json.
*/
static get version() {
return pkg.version;
}
/**
* Same as linter.verify, except without support for processors.
* @param {string|SourceCode} textOrSourceCode The text to parse or a SourceCode object.
* @param {ConfigData} providedConfig An ESLintConfig instance to configure everything.
* @param {VerifyOptions} [providedOptions] The optional filename of the file being checked.
* @throws {Error} If during rule execution.
* @returns {(LintMessage|SuppressedLintMessage)[]} The results as an array of messages or an empty array if no messages.
*/
_verifyWithoutProcessors(textOrSourceCode, providedConfig, providedOptions) {
const slots = internalSlotsMap.get(this);
const config = providedConfig || {};
const options = normalizeVerifyOptions(providedOptions, config);
let text;
// evaluate arguments
if (typeof textOrSourceCode === "string") {
slots.lastSourceCode = null;
text = textOrSourceCode;
} else {
slots.lastSourceCode = textOrSourceCode;
text = textOrSourceCode.text;
}
// Resolve parser.
let parserName = DEFAULT_PARSER_NAME;
let parser = espree;
if (typeof config.parser === "object" && config.parser !== null) {
parserName = config.parser.filePath;
parser = config.parser.definition;
} else if (typeof config.parser === "string") {
if (!slots.parserMap.has(config.parser)) {
return [{
ruleId: null,
fatal: true,
severity: 2,
message: `Configured parser '${config.parser}' was not found.`,
line: 0,
column: 0
}];
}
parserName = config.parser;
parser = slots.parserMap.get(config.parser);
}
// search and apply "eslint-env *".
const envInFile = options.allowInlineConfig && !options.warnInlineConfig
? findEslintEnv(text)
: {};
const resolvedEnvConfig = Object.assign({ builtin: true }, config.env, envInFile);
const enabledEnvs = Object.keys(resolvedEnvConfig)
.filter(envName => resolvedEnvConfig[envName])
.map(envName => getEnv(slots, envName))
.filter(env => env);
const parserOptions = resolveParserOptions(parser, config.parserOptions || {}, enabledEnvs);
const configuredGlobals = resolveGlobals(config.globals || {}, enabledEnvs);
const settings = config.settings || {};
const languageOptions = createLanguageOptions({
globals: config.globals,
parser,
parserOptions
});
if (!slots.lastSourceCode) {
const parseResult = parse(
text,
languageOptions,
options.filename
);
if (!parseResult.success) {
return [parseResult.error];
}
slots.lastSourceCode = parseResult.sourceCode;
} else {
/*
* If the given source code object as the first argument does not have scopeManager, analyze the scope.
* This is for backward compatibility (SourceCode is frozen so it cannot rebind).
*/
if (!slots.lastSourceCode.scopeManager) {
slots.lastSourceCode = new SourceCode({
text: slots.lastSourceCode.text,
ast: slots.lastSourceCode.ast,
parserServices: slots.lastSourceCode.parserServices,
visitorKeys: slots.lastSourceCode.visitorKeys,
scopeManager: analyzeScope(slots.lastSourceCode.ast, languageOptions)
});
}
}
const sourceCode = slots.lastSourceCode;
const commentDirectives = options.allowInlineConfig
? getDirectiveComments(sourceCode.ast, ruleId => getRule(slots, ruleId), options.warnInlineConfig)
: { configuredRules: {}, enabledGlobals: {}, exportedVariables: {}, problems: [], disableDirectives: [] };
// augment global scope with declared global variables
addDeclaredGlobals(
sourceCode.scopeManager.scopes[0],
configuredGlobals,
{ exportedVariables: commentDirectives.exportedVariables, enabledGlobals: commentDirectives.enabledGlobals }
);
const configuredRules = Object.assign({}, config.rules, commentDirectives.configuredRules);
let lintingProblems;
try {
lintingProblems = runRules(
sourceCode,
configuredRules,
ruleId => getRule(slots, ruleId),
parserName,
languageOptions,
settings,
options.filename,
options.disableFixes,
slots.cwd,
providedOptions.physicalFilename
);
} catch (err) {
err.message += `\nOccurred while linting ${options.filename}`;
debug("An error occurred while traversing");
debug("Filename:", options.filename);
if (err.currentNode) {
const { line } = err.currentNode.loc.start;
debug("Line:", line);
err.message += `:${line}`;
}
debug("Parser Options:", parserOptions);
debug("Parser Path:", parserName);
debug("Settings:", settings);
if (err.ruleId) {
err.message += `\nRule: "${err.ruleId}"`;
}
throw err;
}
return applyDisableDirectives({
directives: commentDirectives.disableDirectives,
disableFixes: options.disableFixes,
problems: lintingProblems
.concat(commentDirectives.problems)
.sort((problemA, problemB) => problemA.line - problemB.line || problemA.column - problemB.column),
reportUnusedDisableDirectives: options.reportUnusedDisableDirectives
});
}
/**
* Verifies the text against the rules specified by the second argument.
* @param {string|SourceCode} textOrSourceCode The text to parse or a SourceCode object.
* @param {ConfigData|ConfigArray} config An ESLintConfig instance to configure everything.
* @param {(string|(VerifyOptions&ProcessorOptions))} [filenameOrOptions] The optional filename of the file being checked.
* If this is not set, the filename will default to '<input>' in the rule context. If
* an object, then it has "filename", "allowInlineConfig", and some properties.
* @returns {LintMessage[]} The results as an array of messages or an empty array if no messages.
*/
verify(textOrSourceCode, config, filenameOrOptions) {
debug("Verify");
const { configType } = internalSlotsMap.get(this);
const options = typeof filenameOrOptions === "string"
? { filename: filenameOrOptions }
: filenameOrOptions || {};
if (config) {
if (configType === "flat") {
/*
* Because of how Webpack packages up the files, we can't
* compare directly to `FlatConfigArray` using `instanceof`
* because it's not the same `FlatConfigArray` as in the tests.
* So, we work around it by assuming an array is, in fact, a
* `FlatConfigArray` if it has a `getConfig()` method.
*/
let configArray = config;
if (!Array.isArray(config) || typeof config.getConfig !== "function") {
configArray = new FlatConfigArray(config);
configArray.normalizeSync();
}
return this._distinguishSuppressedMessages(this._verifyWithFlatConfigArray(textOrSourceCode, configArray, options, true));
}
if (typeof config.extractConfig === "function") {
return this._distinguishSuppressedMessages(this._verifyWithConfigArray(textOrSourceCode, config, options));
}
}
/*
* If we get to here, it means `config` is just an object rather
* than a config array so we can go right into linting.
*/
/*
* `Linter` doesn't support `overrides` property in configuration.
* So we cannot apply multiple processors.
*/
if (options.preprocess || options.postprocess) {
return this._distinguishSuppressedMessages(this._verifyWithProcessor(textOrSourceCode, config, options));
}
return this._distinguishSuppressedMessages(this._verifyWithoutProcessors(textOrSourceCode, config, options));
}
/**
* Verify with a processor.
* @param {string|SourceCode} textOrSourceCode The source code.
* @param {FlatConfig} config The config array.
* @param {VerifyOptions&ProcessorOptions} options The options.
* @param {FlatConfigArray} [configForRecursive] The `ConfigArray` object to apply multiple processors recursively.
* @returns {(LintMessage|SuppressedLintMessage)[]} The found problems.
*/
_verifyWithFlatConfigArrayAndProcessor(textOrSourceCode, config, options, configForRecursive) {
const filename = options.filename || "<input>";
const filenameToExpose = normalizeFilename(filename);
const physicalFilename = options.physicalFilename || filenameToExpose;
const text = ensureText(textOrSourceCode);
const preprocess = options.preprocess || (rawText => [rawText]);
const postprocess = options.postprocess || (messagesList => messagesList.flat());
const filterCodeBlock =
options.filterCodeBlock ||
(blockFilename => blockFilename.endsWith(".js"));
const originalExtname = path.extname(filename);
const messageLists = preprocess(text, filenameToExpose).map((block, i) => {
debug("A code block was found: %o", block.filename || "(unnamed)");
// Keep the legacy behavior.
if (typeof block === "string") {
return this._verifyWithFlatConfigArrayAndWithoutProcessors(block, config, options);
}
const blockText = block.text;
const blockName = path.join(filename, `${i}_${block.filename}`);
// Skip this block if filtered.
if (!filterCodeBlock(blockName, blockText)) {
debug("This code block was skipped.");
return [];
}
// Resolve configuration again if the file content or extension was changed.
if (configForRecursive && (text !== blockText || path.extname(blockName) !== originalExtname)) {
debug("Resolving configuration again because the file content or extension was changed.");
return this._verifyWithFlatConfigArray(
blockText,
configForRecursive,
{ ...options, filename: blockName, physicalFilename }
);
}
// Does lint.
return this._verifyWithFlatConfigArrayAndWithoutProcessors(
blockText,
config,
{ ...options, filename: blockName, physicalFilename }
);
});
return postprocess(messageLists, filenameToExpose);
}
/**
* Same as linter.verify, except without support for processors.
* @param {string|SourceCode} textOrSourceCode The text to parse or a SourceCode object.
* @param {FlatConfig} providedConfig An ESLintConfig instance to configure everything.
* @param {VerifyOptions} [providedOptions] The optional filename of the file being checked.
* @throws {Error} If during rule execution.
* @returns {(LintMessage|SuppressedLintMessage)[]} The results as an array of messages or an empty array if no messages.
*/
_verifyWithFlatConfigArrayAndWithoutProcessors(textOrSourceCode, providedConfig, providedOptions) {
const slots = internalSlotsMap.get(this);
const config = providedConfig || {};
const options = normalizeVerifyOptions(providedOptions, config);
let text;
// evaluate arguments
if (typeof textOrSourceCode === "string") {
slots.lastSourceCode = null;
text = textOrSourceCode;
} else {
slots.lastSourceCode = textOrSourceCode;
text = textOrSourceCode.text;
}
const languageOptions = config.languageOptions;
languageOptions.ecmaVersion = normalizeEcmaVersionForLanguageOptions(
languageOptions.ecmaVersion
);
// add configured globals and language globals
const configuredGlobals = {
...(getGlobalsForEcmaVersion(languageOptions.ecmaVersion)),
...(languageOptions.sourceType === "commonjs" ? globals.commonjs : void 0),
...languageOptions.globals
};
// Espree expects this information to be passed in
if (isEspree(languageOptions.parser)) {
const parserOptions = languageOptions.parserOptions;
if (languageOptions.sourceType) {
parserOptions.sourceType = languageOptions.sourceType;
if (
parserOptions.sourceType === "module" &&
parserOptions.ecmaFeatures &&
parserOptions.ecmaFeatures.globalReturn
) {
parserOptions.ecmaFeatures.globalReturn = false;
}
}
}
const settings = config.settings || {};
if (!slots.lastSourceCode) {
const parseResult = parse(
text,
languageOptions,
options.filename
);
if (!parseResult.success) {
return [parseResult.error];
}
slots.lastSourceCode = parseResult.sourceCode;
} else {
/*
* If the given source code object as the first argument does not have scopeManager, analyze the scope.
* This is for backward compatibility (SourceCode is frozen so it cannot rebind).
*/
if (!slots.lastSourceCode.scopeManager) {
slots.lastSourceCode = new SourceCode({
text: slots.lastSourceCode.text,
ast: slots.lastSourceCode.ast,
parserServices: slots.lastSourceCode.parserServices,
visitorKeys: slots.lastSourceCode.visitorKeys,
scopeManager: analyzeScope(slots.lastSourceCode.ast, languageOptions)
});
}
}
const sourceCode = slots.lastSourceCode;
const commentDirectives = options.allowInlineConfig
? getDirectiveComments(
sourceCode.ast,
ruleId => getRuleFromConfig(ruleId, config),
options.warnInlineConfig
)
: { configuredRules: {}, enabledGlobals: {}, exportedVariables: {}, problems: [], disableDirectives: [] };
// augment global scope with declared global variables
addDeclaredGlobals(
sourceCode.scopeManager.scopes[0],
configuredGlobals,
{ exportedVariables: commentDirectives.exportedVariables, enabledGlobals: commentDirectives.enabledGlobals }
);
const configuredRules = Object.assign({}, config.rules, commentDirectives.configuredRules);
let lintingProblems;
try {
lintingProblems = runRules(
sourceCode,
configuredRules,
ruleId => getRuleFromConfig(ruleId, config),
void 0,
languageOptions,
settings,
options.filename,
options.disableFixes,
slots.cwd,
providedOptions.physicalFilename
);
} catch (err) {
err.message += `\nOccurred while linting ${options.filename}`;
debug("An error occurred while traversing");
debug("Filename:", options.filename);
if (err.currentNode) {
const { line } = err.currentNode.loc.start;
debug("Line:", line);
err.message += `:${line}`;
}
debug("Parser Options:", languageOptions.parserOptions);
// debug("Parser Path:", parserName);
debug("Settings:", settings);
if (err.ruleId) {
err.message += `\nRule: "${err.ruleId}"`;
}
throw err;
}
return applyDisableDirectives({
directives: commentDirectives.disableDirectives,
disableFixes: options.disableFixes,
problems: lintingProblems
.concat(commentDirectives.problems)
.sort((problemA, problemB) => problemA.line - problemB.line || problemA.column - problemB.column),
reportUnusedDisableDirectives: options.reportUnusedDisableDirectives
});
}
/**
* Verify a given code with `ConfigArray`.
* @param {string|SourceCode} textOrSourceCode The source code.
* @param {ConfigArray} configArray The config array.
* @param {VerifyOptions&ProcessorOptions} options The options.
* @returns {(LintMessage|SuppressedLintMessage)[]} The found problems.
*/
_verifyWithConfigArray(textOrSourceCode, configArray, options) {
debug("With ConfigArray: %s", options.filename);
// Store the config array in order to get plugin envs and rules later.
internalSlotsMap.get(this).lastConfigArray = configArray;
// Extract the final config for this file.
const config = configArray.extractConfig(options.filename);
const processor =
config.processor &&
configArray.pluginProcessors.get(config.processor);
// Verify.
if (processor) {
debug("Apply the processor: %o", config.processor);
const { preprocess, postprocess, supportsAutofix } = processor;
const disableFixes = options.disableFixes || !supportsAutofix;
return this._verifyWithProcessor(
textOrSourceCode,
config,
{ ...options, disableFixes, postprocess, preprocess },
configArray
);
}
return this._verifyWithoutProcessors(textOrSourceCode, config, options);
}
/**
* Verify a given code with a flat config.
* @param {string|SourceCode} textOrSourceCode The source code.
* @param {FlatConfigArray} configArray The config array.
* @param {VerifyOptions&ProcessorOptions} options The options.
* @param {boolean} [firstCall=false] Indicates if this is being called directly
* from verify(). (TODO: Remove once eslintrc is removed.)
* @returns {(LintMessage|SuppressedLintMessage)[]} The found problems.
*/
_verifyWithFlatConfigArray(textOrSourceCode, configArray, options, firstCall = false) {
debug("With flat config: %s", options.filename);
// we need a filename to match configs against
const filename = options.filename || "<input>";
// Store the config array in order to get plugin envs and rules later.
internalSlotsMap.get(this).lastConfigArray = configArray;
const config = configArray.getConfig(filename);
// Verify.
if (config.processor) {
debug("Apply the processor: %o", config.processor);
const { preprocess, postprocess, supportsAutofix } = config.processor;
const disableFixes = options.disableFixes || !supportsAutofix;
return this._verifyWithFlatConfigArrayAndProcessor(
textOrSourceCode,
config,
{ ...options, filename, disableFixes, postprocess, preprocess },
configArray
);
}
// check for options-based processing
if (firstCall && (options.preprocess || options.postprocess)) {
return this._verifyWithFlatConfigArrayAndProcessor(textOrSourceCode, config, options);
}
return this._verifyWithFlatConfigArrayAndWithoutProcessors(textOrSourceCode, config, options);
}
/**
* Verify with a processor.
* @param {string|SourceCode} textOrSourceCode The source code.
* @param {ConfigData|ExtractedConfig} config The config array.
* @param {VerifyOptions&ProcessorOptions} options The options.
* @param {ConfigArray} [configForRecursive] The `ConfigArray` object to apply multiple processors recursively.
* @returns {(LintMessage|SuppressedLintMessage)[]} The found problems.
*/
_verifyWithProcessor(textOrSourceCode, config, options, configForRecursive) {
const filename = options.filename || "<input>";
const filenameToExpose = normalizeFilename(filename);
const physicalFilename = options.physicalFilename || filenameToExpose;
const text = ensureText(textOrSourceCode);
const preprocess = options.preprocess || (rawText => [rawText]);
const postprocess = options.postprocess || (messagesList => messagesList.flat());
const filterCodeBlock =
options.filterCodeBlock ||
(blockFilename => blockFilename.endsWith(".js"));
const originalExtname = path.extname(filename);
const messageLists = preprocess(text, filenameToExpose).map((block, i) => {
debug("A code block was found: %o", block.filename || "(unnamed)");
// Keep the legacy behavior.
if (typeof block === "string") {
return this._verifyWithoutProcessors(block, config, options);
}
const blockText = block.text;
const blockName = path.join(filename, `${i}_${block.filename}`);
// Skip this block if filtered.
if (!filterCodeBlock(blockName, blockText)) {
debug("This code block was skipped.");
return [];
}
// Resolve configuration again if the file content or extension was changed.
if (configForRecursive && (text !== blockText || path.extname(blockName) !== originalExtname)) {
debug("Resolving configuration again because the file content or extension was changed.");
return this._verifyWithConfigArray(
blockText,
configForRecursive,
{ ...options, filename: blockName, physicalFilename }
);
}
// Does lint.
return this._verifyWithoutProcessors(
blockText,
config,
{ ...options, filename: blockName, physicalFilename }
);
});
return postprocess(messageLists, filenameToExpose);
}
/**
* Given a list of reported problems, distinguish problems between normal messages and suppressed messages.
* The normal messages will be returned and the suppressed messages will be stored as lastSuppressedMessages.
* @param {Problem[]} problems A list of reported problems.
* @returns {LintMessage[]} A list of LintMessage.
*/
_distinguishSuppressedMessages(problems) {
const messages = [];
const suppressedMessages = [];
const slots = internalSlotsMap.get(this);
for (const problem of problems) {
if (problem.suppressions) {
suppressedMessages.push(problem);
} else {
messages.push(problem);
}
}
slots.lastSuppressedMessages = suppressedMessages;
return messages;
}
/**
* Gets the SourceCode object representing the parsed source.
* @returns {SourceCode} The SourceCode object.
*/
getSourceCode() {
return internalSlotsMap.get(this).lastSourceCode;
}
/**
* Gets the list of SuppressedLintMessage produced in the last running.
* @returns {SuppressedLintMessage[]} The list of SuppressedLintMessage
*/
getSuppressedMessages() {
return internalSlotsMap.get(this).lastSuppressedMessages;
}
/**
* Defines a new linting rule.
* @param {string} ruleId A unique rule identifier
* @param {Function | Rule} ruleModule Function from context to object mapping AST node types to event handlers
* @returns {void}
*/
defineRule(ruleId, ruleModule) {
assertEslintrcConfig(this);
internalSlotsMap.get(this).ruleMap.define(ruleId, ruleModule);
}
/**
* Defines many new linting rules.
* @param {Record<string, Function | Rule>} rulesToDefine map from unique rule identifier to rule
* @returns {void}
*/
defineRules(rulesToDefine) {
assertEslintrcConfig(this);
Object.getOwnPropertyNames(rulesToDefine).forEach(ruleId => {
this.defineRule(ruleId, rulesToDefine[ruleId]);
});
}
/**
* Gets an object with all loaded rules.
* @returns {Map<string, Rule>} All loaded rules
*/
getRules() {
assertEslintrcConfig(this);
const { lastConfigArray, ruleMap } = internalSlotsMap.get(this);
return new Map(function *() {
yield* ruleMap;
if (lastConfigArray) {
yield* lastConfigArray.pluginRules;
}
}());
}
/**
* Define a new parser module
* @param {string} parserId Name of the parser
* @param {Parser} parserModule The parser object
* @returns {void}
*/
defineParser(parserId, parserModule) {
assertEslintrcConfig(this);
internalSlotsMap.get(this).parserMap.set(parserId, parserModule);
}
/**
* Performs multiple autofix passes over the text until as many fixes as possible
* have been applied.
* @param {string} text The source text to apply fixes to.
* @param {ConfigData|ConfigArray|FlatConfigArray} config The ESLint config object to use.
* @param {VerifyOptions&ProcessorOptions&FixOptions} options The ESLint options object to use.
* @returns {{fixed:boolean,messages:LintMessage[],output:string}} The result of the fix operation as returned from the
* SourceCodeFixer.
*/
verifyAndFix(text, config, options) {
let messages = [],
fixedResult,
fixed = false,
passNumber = 0,
currentText = text;
const debugTextDescription = options && options.filename || `${text.slice(0, 10)}...`;
const shouldFix = options && typeof options.fix !== "undefined" ? options.fix : true;
/**
* This loop continues until one of the following is true:
*
* 1. No more fixes have been applied.
* 2. Ten passes have been made.
*
* That means anytime a fix is successfully applied, there will be another pass.
* Essentially, guaranteeing a minimum of two passes.
*/
do {
passNumber++;
debug(`Linting code for ${debugTextDescription} (pass ${passNumber})`);
messages = this.verify(currentText, config, options);
debug(`Generating fixed text for ${debugTextDescription} (pass ${passNumber})`);
fixedResult = SourceCodeFixer.applyFixes(currentText, messages, shouldFix);
/*
* stop if there are any syntax errors.
* 'fixedResult.output' is a empty string.
*/
if (messages.length === 1 && messages[0].fatal) {
break;
}
// keep track if any fixes were ever applied - important for return value
fixed = fixed || fixedResult.fixed;
// update to use the fixed output instead of the original text
currentText = fixedResult.output;
} while (
fixedResult.fixed &&
passNumber < MAX_AUTOFIX_PASSES
);
/*
* If the last result had fixes, we need to lint again to be sure we have
* the most up-to-date information.
*/
if (fixedResult.fixed) {
fixedResult.messages = this.verify(currentText, config, options);
}
// ensure the last result properly reflects if fixes were done
fixedResult.fixed = fixed;
fixedResult.output = currentText;
return fixedResult;
}
}
module.exports = {
Linter,
/**
* Get the internal slots of a given Linter instance for tests.
* @param {Linter} instance The Linter instance to get.
* @returns {LinterInternalSlots} The internal slots.
*/
getLinterInternalSlots(instance) {
return internalSlotsMap.get(instance);
}
}; | PypiClean |
/pybw_comic-23.8.10.4.tar.gz/pybw_comic-23.8.10.4/pybw_comic/engines/manhuaDB.py | '''
'''
import os
import sys
import warnings
import time
import shutil
from glob import glob
from pathlib import Path
from tqdm import tqdm
import re
import random
import zhconv
from collections import OrderedDict
import requests
from bs4 import BeautifulSoup
from seleniumwire import webdriver
from selenium.webdriver.common.by import By
warnings.filterwarnings("ignore")
def init_driver(headless=True):
global driver
opts = webdriver.ChromeOptions()
opts.add_experimental_option('excludeSwitches', ['enable-logging'])
opts.add_argument('--ignore-certificate-errors')
opts.add_argument('--disable-notifications')
opts.headless = headless
driver = webdriver.Chrome(options=opts)
time.sleep(1)
return
def get_dic_xpath():
dic_xpath = {
'catelogue-booktopic': '/html/body/div[2]/div[2]/div[1]/div[1]/div/ul[2]/li[2]/a',
'catelogue-bookstate': '/html/body/div[2]/div[2]/div[1]/div[1]/div/ul[2]/li[1]/a',
'catelogue-bookname': '/html/body/div[2]/div[2]/div[1]/div[1]/div/h1',
'catelogue-chapters': [
['/html/body/div[2]/div[2]/div[1]/div[3]/ul/li[1]/a/span', '/html/body/div[2]/div[2]/div[1]/div[3]/div/div[1]/ol/li/a'],
['/html/body/div[2]/div[2]/div[1]/div[3]/ul/li[2]/a/span', '/html/body/div[2]/div[2]/div[1]/div[3]/div/div[2]/ol/li/a'],
['/html/body/div[2]/div[2]/div[1]/div[3]/ul/li[3]/a/span', '/html/body/div[2]/div[2]/div[1]/div[3]/div/div[3]/ol/li/a'],
],
'catelogue-chapter-Property_text': 'text',
'catelogue-chapter-Property_title': 'title',
'catelogue-chapter-Property_url_short': '',
'catelogue-chapter-Property_url_full': 'href',
'onechapter-pages': '//*[@id="page-selector"]/option',
'onechapter-pages-Property_text': 'text',
'onechapter-pages-Property_url_part': 'value',
'onechapter-pages-Property_url_full': 'href',
'onechapter-image': '//*[@id="all"]/div/div[2]/img',
'onechapter-image-Property_url': 'src'
}
return dic_xpath
def init_dic_xpath(dic=get_dic_xpath()):
global dic_xpath
dic_xpath = dic
return
class CleanText():
'''
'''
def __init__(self, text):
self.text = text
self.clean_text = self.get_clean_text(self.text)
@classmethod
def get_clean_text(cls, text):
text = cls.to_CN(text)
text = cls.clean_space(text)
return text
@classmethod
def to_CN(cls, text):
text = zhconv.convert(text, 'zh-cn')
return text
@classmethod
def clean_space(cls, text):
text = re.sub(' +', ' ', text)
return text
class DicXpath():
'''
'''
def __init__(self, dic_xpath):
self.dic_xpath = dic_xpath
for k, v in dic_xpath.items():
self.k = v
def time_parser(sec):
total_sec = sec
sec = int(total_sec % 60)
total_min = total_sec // 60
mins = int(total_min % 60)
hour = int(total_min // 60)
return hour, mins, sec
def find_empty_subdir(dire):
if not os.path.isdir(dire):
raise Exception('{} is not dir'.format(dire))
subs = os.listdir(dire)
subs = [Path(dire).joinpath(i) for i in subs]
subs = [i for i in subs if os.path.isdir(i)]
empty_sub = [i for i in subs if not os.path.getsize(i)]
empty_sub = [i.as_posix() for i in empty_sub]
return empty_sub
class Catelogue():
'''
'''
def __init__(self, url='https://www.manhuadb.com/manhua/2598'):
self.url = url
self.url_prefix = re.findall('.+com', self.url)[0]
driver.get(self.url)
self._click_webpage()
self.booktopic = self.get_booktopic()
self.bookstate = self.get_bookstate()
self.bookname = self.get_comic_bookname()
self.chapters = self.get_chapters()
# self.chapters.reverse()
self.chapter_amount = len(self.chapters)
def _click_webpage(self):
to_click = {
'rank_order': ['', -1],
'unfold_chapters': ['', 0]
}
for k, v in to_click.items():
try:
if v and v[0]:
self._click_by_xpath(v[0], v[1])
except:
pass
return
def _click_by_xpath(self, pat_xpath, order=0):
try:
time.sleep(3)
_finds = driver.find_elements(By.XPATH, pat_xpath)
_find = _finds[order]
_find.click()
time.sleep(3)
except:
pass
return
def get_booktopic(self):
finds = driver.find_elements_by_xpath(dic_xpath['catelogue-booktopic'])
if finds:
find = finds[0]
name = find.text
name = zhconv.convert(name, 'zh-cn')
name = name.strip().split(' ')[:3]
name = ' '.join(name)
return name
else:
return ''
print('\nCannot find booktopic in webpage, please check xpath')
print('Program exit ...\n')
sys.exit()
def get_bookstate(self):
finds = driver.find_elements_by_xpath(dic_xpath['catelogue-bookstate'])
if finds:
find = finds[0]
name = find.text
name = zhconv.convert(name, 'zh-cn')
if '完结' in name:
return '完结'
else:
return '连载中'
else:
print('\nCannot find bookstate in webpage, please check xpath')
print('Program exit ...\n')
sys.exit()
def get_comic_bookname(self):
finds = driver.find_elements(By.XPATH, dic_xpath['catelogue-bookname'])
if finds:
find = finds[0]
name = find.text
name = zhconv.convert(name, 'zh-cn')
name = name.split('\n')[0].strip()
return name
else:
print('\nCannot find bookname in webpage, please check xpath')
print('Program exit ...\n')
sys.exit()
def get_chapters(self):
finds = OrderedDict()
if type(dic_xpath['catelogue-chapters']) == str:
## Not used for manhuaDB
finds = driver.find_elements(By.XPATH, dic_xpath['catelogue-chapters'])
elif type(dic_xpath['catelogue-chapters']) == list:
for [i, j] in dic_xpath['catelogue-chapters']:
try:
i = driver.find_element(By.XPATH, i).text
finds[i] = driver.find_elements(By.XPATH, j)
except:
pass
finds_keys = [i for i in finds.keys()]
zip_chapters = []
for k in finds_keys:
finds_i = finds[k]
chapters_i = {}
chapters_i['text'] = [CleanText(i.get_property('text')).clean_text for i in finds_i]
chapters_i['title'] = [CleanText(i.get_property(dic_xpath['catelogue-chapter-Property_title'])).clean_text for i in finds_i]
chapters_i['url_short'] = [i.get_property(dic_xpath['catelogue-chapter-Property_url_short']) for i in finds_i]
if dic_xpath['catelogue-chapter-Property_url_full']:
chapters_i['url'] = [i.get_property(dic_xpath['catelogue-chapter-Property_url_full']) for i in finds_i]
else:
chapters_i['url'] = [self.url_prefix + i for i in chapters_i['url_short']]
chapters_i['text'] = ['{}_{} {}'.format(k, str(i+1).zfill(3), text_i) for i, text_i in enumerate(chapters_i['text'])]
for i in zip(chapters_i['text'], chapters_i['url'], chapters_i['title'], chapters_i['url_short']):
zip_chapters.append(list(i))
return zip_chapters
def driver_load_url(url):
success = {'state': False,
'stop': 30,
'count': 0,
'sleep': 1,
'if_break': True,
'break': 1
}
while not success['state']:
if success['break']:
if success['count'] > success['break']:
break
if success['count'] > success['stop']:
raise Exception('Cannot find pages')
try:
driver.get(url)
success['state'] = True
except:
success['count'] += 1
time.sleep(success['sleep'])
if success['sleep'] < 5:
success['sleep'] += 0.5
return
class OneChapter():
'''
'''
def __init__(self, url='https://www.1kkk.com/vol1-62275/', headless=True):
self.url = url
self._headless = headless
init_driver(headless=self._headless)
self._driver_load_url(self.url)
self._click_webpage()
self.image_url = self.get_image_url()
driver.close()
def _click_webpage(self):
to_click = {
'id': ['', 0],
}
for k, v in to_click.items():
try:
if v and v[0]:
self._click_by_xpath(v[0], v[1])
except:
pass
return
def _click_by_xpath(self, pat_xpath, order=0):
try:
time.sleep(3)
_finds = driver.find_elements(By.XPATH, pat_xpath)
_find = _finds[order]
_find.click()
time.sleep(3)
except:
pass
return
def get_image_url(self):
success = {'state': False, 'stop': 30, 'count': 0, 'sleep': 0.5}
while not success['state']:
if success['count'] > success['stop']:
raise Exception('Cannot find element')
try:
finds = driver.find_elements(By.XPATH, dic_xpath['onechapter-image'])
find = finds[0]
success['state'] = True
except:
success['count'] += 1
time.sleep(success['sleep'])
find = finds[0]
img_url = find.get_property(dic_xpath['onechapter-image-Property_url'])
return img_url
def _get_index_end(self):
finds = driver.find_elements(By.XPATH, '//*[@id="chapterpager"]/a')
end = finds[-1].text
end = int(end)
return end
def _next_page(self):
## Not used for manhuaDB, because some errors occur
time.sleep(0.5)
dic = {
'next_page': '/html/body/div[2]/div[2]/nav/div/a'
}
success = {'state': False, 'stop': 10, 'count': 0, 'sleep': 0.5}
while not success['state']:
if success['count'] > success['stop']:
raise Exception('Reach try limit, cannot find element')
try:
finds = driver.find_elements(By.XPATH, dic['next_page'])
find = [i for i in finds if i.text.strip() == '下页'][0]
find.click()
success['state'] = True
except:
print('try: {}'.format(success['count']))
success['count'] += 1
time.sleep(success['sleep'])
return
def cal_images(self):
init_driver(headless=self._headless)
self._driver_load_url(self.url)
self._click_webpage()
images_url = []
## Not used for manhuaDB
# images_url.append(self.image_url)
pages = driver.find_elements(By.XPATH, dic_xpath['onechapter-pages'])
pages_text = [i.text.strip() for i in pages]
len_pages = int(len(pages) / 2)
for i in range(len_pages):
finds = driver.find_elements(By.XPATH, dic_xpath['onechapter-pages'])
find = finds[i]
if pages_text[i] != find.text.strip():
raise Exception('The element finded is not right')
find.click()
'''
This sleep time is very important
After test, 3.5 is good for manhuaDB
3.5s reduce a little more than 50% errors than 3s and 2.5s
3s and 2.5s have almost the same amount of errors
'''
time.sleep(3.5)
count = 0
while count <= 30:
if i == 0:
time.sleep(3)
image_url_i = self.get_image_url()
## for code debug
# image_url_i = onechap.get_image_url()
break
image_url_i = self.get_image_url()
## for code debug
# image_url_i = onechap.get_image_url()
temp_urls = images_url + [image_url_i]
if image_url_i == images_url[-1] or len(temp_urls) != len(set(temp_urls)):
print('{} {} | count {}'.format(i+1, pages_text[i], count+1))
count += 1
time.sleep(1)
continue
else:
break
if not image_url_i:
break
images_url.append(image_url_i)
driver.close()
if images_url[0] == images_url[1]:
images_url = images_url[1:]
if images_url[-1] == images_url[-2]:
images_url = images_url[:-1]
if len(images_url) != len(set(images_url)):
raise Exception('images urls are repeate')
zip_images = []
for i, url_i in enumerate(images_url):
i = str(i+1).zfill(6)
zip_images.append(['page_{}'.format(i), url_i])
self.images = zip_images
return
@classmethod
def _driver_load_url(cls, url):
success = {'state': False,
'stop': 30,
'count': 0,
'sleep': 1,
'if_break': True,
'break': 1
}
while not success['state']:
if success['break']:
if success['count'] > success['break']:
break
if success['count'] > success['stop']:
raise Exception('Cannot find pages')
try:
driver.get(url)
success['state'] = True
except:
success['count'] += 1
time.sleep(success['sleep'])
if success['sleep'] < 5:
success['sleep'] += 0.5
return
class ImageDownloader():
'''
'''
def __init__(self, url, url_from, filetype='auto'):
self.url = url
self.url_from = url_from
self.filetype = self._detect_filetype(self.url, filetype)
@classmethod
def _detect_filetype(cls, url, filetype='auto'):
if filetype not in ['auto', 'detect']:
return filetype
if 'png' in url:
return 'png'
elif 'jpg' in url:
return 'jpg'
elif 'jpeg' in url:
return 'jpeg'
elif 'webp' in url:
return 'webp'
else:
return 'jpg'
@classmethod
def _headers(cls, referer):
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
'Referer': referer}
return headers
def download(self, filename):
if not Path(filename).suffix:
filename = filename + '.' + self._detect_filetype(self.url)
headers = self._headers(self.url_from)
req = requests.get(self.url, headers=headers)
content = req.content
with open(filename, 'wb') as f:
f.write(content)
return
class Book():
'''
'''
def __init__(self,
url='https://www.manhuadb.com/manhua/2598',
dire='',
headless=False):
self.url = url
self._headless = headless
self.website_label = 'manhuaDB'
init_driver(headless=self._headless)
init_dic_xpath()
self.catelogue = Catelogue(self.url)
self.booktopic = self.catelogue.booktopic
self.bookstate = self.catelogue.bookstate
self.bookname = self.catelogue.bookname
self.continue_state = {'continue': False, 'delete_last_dir': False}
self._parse_input_continue(dire)
self.dire = self._get_save_dire(dire)
self.chapters = self.catelogue.chapters
self.chapter_amount = self.catelogue.chapter_amount
driver.close()
def _parse_input_continue(self, text):
text = text.strip()
reg = '^(continue)[\s\-_]*(del$|delete$)?'
reg = re.compile(reg)
match = re.findall(reg, text)
if match:
self.continue_state['continue'] = True
if match[0][1]:
self.continue_state['delete_last_dir'] = True
return
def _get_save_dire(self, dire):
time_label = time.strftime('%y%m%d_%H%M%S')
dire = dire.strip()
if self.continue_state['continue']:
dirs = glob('*{}*'.format(self.bookname))
dirs = [i for i in dirs if os.path.isdir(i)]
if not dirs:
dire = ''
else:
dire = dirs[-1]
print('\nFind dir to continue: 【 {} 】\n'.format(dire))
if self.continue_state['delete_last_dir']:
subdirs = os.listdir(dire)
subdirs.sort()
subdirs = [Path(dire).joinpath(i).as_posix() for i in subdirs]
subdirs = [i for i in subdirs if os.path.isdir(i)]
last_subdir = subdirs[-1]
print('--- Delete last subdir: {}\n'.format(last_subdir))
shutil.rmtree(last_subdir)
if not dire:
dire = dire = '{} {} [{}] [{}] [{}]'.format(time_label, self.catelogue.bookname, self.booktopic, self.bookstate, self.website_label)
return dire
def download(self, start_label=''):
print('\n{0}\nBookname | 《 {1} 》\n{0}\n'.format('='*50, self.bookname))
time_book0 = time.time()
date_book0 = time.strftime('%Y-%m-%d %T')
if os.path.exists(self.dire):
print('\nExist dir: 【 {} 】\n'.format(self.dire))
else:
print('\nCreate dir: 【 {} 】\n'.format(self.dire))
os.makedirs(self.dire, exist_ok=True)
with open('{}/book_url.txt'.format(self.dire), 'w') as f:
f.write('{}\n'.format(self.url))
## ------ For start control [2022.09.21]
if start_label:
start_ok = False
start_label = zhconv.convert(start_label, 'zh-cn')
else:
start_ok = True
# for chap_index, chapter in enumerate(tqdm(self.chapters)):
for chap_index, chapter in enumerate(self.chapters):
chap_index += 1
time0 = time.time()
chap_title = chapter[0]
chap_title = zhconv.convert(chap_title, 'zh-cn')
chap_title0 = chap_title
# chap_title = '{} {}'.format(str(chap_index).zfill(3), chap_title)
path_chap = Path(self.dire).joinpath(chap_title).as_posix()
## ------ For start control [2022.09.21]
if not start_ok:
if start_label not in chap_title:
continue
else:
start_ok = True
chap_title_old = []
chap_title_old.append(chap_title0)
chap_title_old.append('chap_{} {}'.format(str(chap_index).zfill(3), chap_title0))
path_chap_old = [Path(self.dire).joinpath(i).as_posix() for i in chap_title_old]
if not os.path.exists(path_chap):
_old_dir = False
for i in path_chap_old:
if os.path.exists(i):
print('\n------ Skip info ------')
print(' For 【{}】'.format(path_chap))
print(' Find old version save dir 【{}】 '.format(i))
print(' Skip this chapter, please check if this is right\n')
_old_dir = True
break
if _old_dir:
continue
else:
os.makedirs(path_chap)
else:
continue
chap_url = chapter[1]
onechap = OneChapter(chap_url, headless=self._headless)
onechap.cal_images()
images = onechap.images
print('\nDownloading [{0}/{1}]: {2}'.format(chap_index, self.chapter_amount, chap_title))
for i, image in enumerate(tqdm(images)):
# for i, image in enumerate(images):
i += 1
filename = image[0]
filename = 'page_{}'.format(str(i).zfill(6))
file_path = Path(path_chap).joinpath(filename).as_posix()
if os.path.exists(file_path):
continue
url_d = image[1]
imgd = ImageDownloader(url_d, chap_url)
imgd.download(file_path)
time.sleep(0.5)
_, time_m, time_s = time_parser(time.time() - time0)
print(' [ {} | {} | {:0>2}:{:0>2} ]'.format(chap_title, time.strftime('%Y-%m-%d %T'), time_m, time_s))
time_book_h, time_book_m, time_book_s = time_parser(time.time() - time_book0)
date_book2 = time.strftime('%Y-%m-%d %T')
print('\n {0} Done | {1} {0}'.format('='*5, self.bookname))
print( ' start | {} '.format(date_book0))
print( ' done | {} '.format(date_book2))
print( ' time | {:0>2}:{:0>2}:{:0>2} \n'.format(time_book_h, time_book_m, time_book_s))
empty_sub = find_empty_subdir(self.dire)
if empty_sub:
print('\n ====== Caution !!! ======')
print( ' These subdir is empty:')
for i in empty_sub:
print(' {}'.format(i))
print('\n Please check and download it again!')
print( ' Book url: {}\n'.format(self.url))
if __name__ == '__main__':
## ------ user settings ------
read_inputs = True
read_dire = True
urls = [
'https://www.1kkk.com/manhua6123/',
]
## ------ Prepare ------
if read_inputs:
urls = []
while True:
inputs = input('url of catelogue: ')
if inputs:
urls.append(inputs.strip())
else:
break
if read_dire:
dire = input('\ndir to save the comic: ')
dire = dire.strip()
## ------ Program Begin ------
for url in urls:
book = Book(url, dire=dire, headless=False)
book.download() | PypiClean |
/dl_coursera-0.1.2-py3-none-any.whl/dl_coursera/define.py | import json
from .lib.MyDict import MyDict
URL_ROOT = 'https://www.coursera.org'
COURSE_0 = 'learning-how-to-learn'
def URL_SPEC(slug):
return URL_ROOT + '/api/onDemandSpecializations.v1?q=slug&slug={}&fields=courseIds,interchangeableCourseIds,launchedAt,logo,memberships,metadata,partnerIds,premiumExperienceVariant,onDemandSpecializationMemberships.v1(suggestedSessionSchedule),onDemandSpecializationSuggestedSchedule.v1(suggestedSessions),partners.v1(homeLink,name),courses.v1(courseProgress,description,membershipIds,startDate,v2Details,vcMembershipIds),v2Details.v1(onDemandSessions,plannedLaunchDate),memberships.v1(grade,vcMembershipId),vcMemberships.v1(certificateCodeWithGrade)&includes=courseIds,memberships,partnerIds,onDemandSpecializationMemberships.v1(suggestedSessionSchedule),courses.v1(courseProgress,membershipIds,v2Details,vcMembershipIds),v2Details.v1(onDemandSessions)'.format(slug)
def URL_COURSE_1(slug):
return URL_ROOT + '/api/onDemandCourses.v1?q=slug&slug={}&includes=instructorIds%2CpartnerIds%2C_links&fields=brandingImage%2CcertificatePurchaseEnabledAt%2Cpartners.v1(squareLogo%2CrectangularLogo)%2Cinstructors.v1(fullName)%2CoverridePartnerLogos%2CsessionsEnabledAt%2CdomainTypes%2CpremiumExperienceVariant%2CisRestrictedMembership'.format(slug)
def URL_COURSE_2(slug):
return URL_ROOT + '/api/onDemandCourseMaterials.v2/?q=slug&slug={}&includes=modules%2Clessons%2CpassableItemGroups%2CpassableItemGroupChoices%2CpassableLessonElements%2Citems%2Ctracks%2CgradePolicy&fields=moduleIds%2ConDemandCourseMaterialModules.v1(name%2Cslug%2Cdescription%2CtimeCommitment%2ClessonIds%2Coptional%2ClearningObjectives)%2ConDemandCourseMaterialLessons.v1(name%2Cslug%2CtimeCommitment%2CelementIds%2Coptional%2CtrackId)%2ConDemandCourseMaterialPassableItemGroups.v1(requiredPassedCount%2CpassableItemGroupChoiceIds%2CtrackId)%2ConDemandCourseMaterialPassableItemGroupChoices.v1(name%2Cdescription%2CitemIds)%2ConDemandCourseMaterialPassableLessonElements.v1(gradingWeight%2CisRequiredForPassing)%2ConDemandCourseMaterialItems.v2(name%2Cslug%2CtimeCommitment%2CcontentSummary%2CisLocked%2ClockableByItem%2CitemLockedReasonCode%2CtrackId%2ClockedStatus%2CitemLockSummary)%2ConDemandCourseMaterialTracks.v1(passablesCount)&showLockedItems=true'.format(slug)
def URL_COURSE_REFERENCES(id_course):
return URL_ROOT + '/api/onDemandReferences.v1/?courseId={}&q=courseListed&fields=name%2CshortId%2Cslug%2Ccontent&includes=assets'.format(id_course)
def URL_COURSE_REFERENCE(id_course, id_ref):
return URL_ROOT + '/api/onDemandReferences.v1/?courseId={}&q=shortId&shortId={}&fields=name%2CshortId%2Cslug%2Ccontent&includes=assets'.format(id_course, id_ref)
def URL_LECTURE_1(id_course, id_lecture):
return URL_ROOT + '/api/onDemandLectureVideos.v1/{}~{}?includes=video&fields=onDemandVideos.v1(sources%2Csubtitles%2CsubtitlesVtt%2CsubtitlesTxt)'.format(id_course, id_lecture)
def URL_LECTURE_2(id_course, id_lecture):
return URL_ROOT + '/api/onDemandLectureAssets.v1/{}~{}/?includes=openCourseAssets'.format(id_course, id_lecture)
def URL_SUPPLEMENT(id_course, id_supplement):
return URL_ROOT + '/api/onDemandSupplements.v1/{}~{}?includes=asset&fields=openCourseAssets.v1(typeName)%2CopenCourseAssets.v1(definition)'.format(id_course, id_supplement)
def URL_ASSET(ids):
return URL_ROOT + '/api/assets.v1?ids={}&fields=audioSourceUrls%2C+videoSourceUrls%2C+videoThumbnailUrls%2C+fileExtension%2C+tags'.format(','.join(ids))
class DlCourseraException(Exception):
pass
class SpecNotExistExcepton(DlCourseraException):
def __init__(self, slug):
super().__init__('The specialization %s does not exist' % slug)
self.slug = slug
class CourseNotExistExcepton(DlCourseraException):
def __init__(self, slug):
super().__init__('The course %s does not exist' % slug)
self.slug = slug
class CookiesExpiredException(DlCourseraException):
def __init__(self):
super().__init__('The cookies.txt expired')
class BadResponseException(DlCourseraException):
def __init__(self, d):
super().__init__('Bad response: %s' % d)
self.d = d
class Spec(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Spec'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['courses'] = []
class Course(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Course'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['modules'] = []
self['references'] = []
class CourseReference(MyDict):
def __init__(self, *, id_=None, name=None, slug=None, item=None):
super().__init__()
self['type'] = 'CourseReference'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['item'] = item
class CourseMaterialModule(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Module'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['lessons'] = []
class CourseMaterialLesson(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Lesson'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['items'] = []
class CourseMaterialLecture(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Lecture'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['videos'] = []
self['assets'] = []
class CourseMaterialSupplement(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Supplement'
self['id'] = id_
self['name'] = name
self['slug'] = slug
self['items'] = []
class CourseMaterialSupplementItem(MyDict):
pass
class CourseMaterialSupplementItemCML(CourseMaterialSupplementItem):
def __init__(self, *, html, assets):
super().__init__()
self['type'] = 'CML'
self['html'] = html
self['assets'] = assets
class CourseMaterialNotebook(MyDict):
def __init__(self, *, id_=None, name=None, slug=None):
super().__init__()
self['type'] = 'Notebook'
self['id'] = id_
self['name'] = name
self['slug'] = slug
class Video(MyDict):
def __init__(self, url_video, url_subtitle=None):
super().__init__()
self['url_video'] = url_video
if url_subtitle is not None:
self['url_subtitle'] = url_subtitle
class Asset(MyDict):
def __init__(self, id_, url, name):
super().__init__()
self['id'] = id_
self['url'] = url
self['name'] = name | PypiClean |
/tnnt_templates-2.11.5.tar.gz/tnnt_templates-2.11.5/tnnt_templates/static/tnnt_templates/libs/highlight-js/11.01/languages/sml.js | export default function(hljs) {
return {
name: 'SML (Standard ML)',
aliases: [ 'ml' ],
keywords: {
$pattern: '[a-z_]\\w*!?',
keyword:
/* according to Definition of Standard ML 97 */
'abstype and andalso as case datatype do else end eqtype ' +
'exception fn fun functor handle if in include infix infixr ' +
'let local nonfix of op open orelse raise rec sharing sig ' +
'signature struct structure then type val with withtype where while',
built_in:
/* built-in types according to basis library */
'array bool char exn int list option order real ref string substring vector unit word',
literal:
'true false NONE SOME LESS EQUAL GREATER nil'
},
illegal: /\/\/|>>/,
contains: [
{
className: 'literal',
begin: /\[(\|\|)?\]|\(\)/,
relevance: 0
},
hljs.COMMENT(
'\\(\\*',
'\\*\\)',
{
contains: [ 'self' ]
}
),
{ /* type variable */
className: 'symbol',
begin: '\'[A-Za-z_](?!\')[\\w\']*'
/* the grammar is ambiguous on how 'a'b should be interpreted but not the compiler */
},
{ /* polymorphic variant */
className: 'type',
begin: '`[A-Z][\\w\']*'
},
{ /* module or constructor */
className: 'type',
begin: '\\b[A-Z][\\w\']*',
relevance: 0
},
{ /* don't color identifiers, but safely catch all identifiers with ' */
begin: '[a-z_]\\w*\'[\\w\']*'
},
hljs.inherit(hljs.APOS_STRING_MODE, {
className: 'string',
relevance: 0
}),
hljs.inherit(hljs.QUOTE_STRING_MODE, {
illegal: null
}),
{
className: 'number',
begin:
'\\b(0[xX][a-fA-F0-9_]+[Lln]?|' +
'0[oO][0-7_]+[Lln]?|' +
'0[bB][01_]+[Lln]?|' +
'[0-9][0-9_]*([Lln]|(\\.[0-9_]*)?([eE][-+]?[0-9_]+)?)?)',
relevance: 0
},
{
begin: /[-=]>/ // relevance booster
}
]
};
} | PypiClean |
/graia_ariadne-0.7.16-py3-none-any.whl/graia/ariadne/context.py |
from contextlib import contextmanager, suppress
from contextvars import ContextVar
from typing import TYPE_CHECKING, Dict, Optional
if TYPE_CHECKING:
from asyncio.events import AbstractEventLoop
from graia.broadcast import Broadcast
from graia.broadcast.entities.event import Dispatchable
from .app import Ariadne
from .connection.util import UploadMethod
ariadne_ctx: ContextVar[Ariadne] = ContextVar("ariadne")
event_ctx: ContextVar[Dispatchable] = ContextVar("event")
event_loop_ctx: ContextVar[AbstractEventLoop] = ContextVar("event_loop")
broadcast_ctx: ContextVar[Broadcast] = ContextVar("broadcast")
upload_method_ctx: ContextVar[UploadMethod] = ContextVar("upload_method")
else: # for not crashing pdoc
ariadne_ctx = ContextVar("ariadne")
event_ctx = ContextVar("event")
event_loop_ctx = ContextVar("event_loop")
broadcast_ctx = ContextVar("broadcast")
upload_method_ctx = ContextVar("upload_method")
context_map: Dict[str, ContextVar] = {
"Ariadne": ariadne_ctx,
"Dispatchable": event_ctx,
"AbstractEventLoop": event_loop_ctx,
"Broadcast": broadcast_ctx,
"UploadMethod": upload_method_ctx,
}
@contextmanager
def enter_message_send_context(method: "UploadMethod"):
"""进入消息发送上下文
Args:
method (UploadMethod): 消息上下文的枚举对象
"""
t = upload_method_ctx.set(method)
yield
upload_method_ctx.reset(t)
@contextmanager
def enter_context(app: Optional["Ariadne"] = None, event: Optional["Dispatchable"] = None):
"""进入事件上下文
Args:
app (Ariadne, optional): Ariadne 实例.
event (Dispatchable, optional): 当前事件
"""
token_loop = None
token_bcc = None
token_app = None
if app:
token_app = ariadne_ctx.set(app)
token_loop = event_loop_ctx.set(app.service.loop)
token_bcc = broadcast_ctx.set(app.service.broadcast)
token_event = event_ctx.set(event) if event else None
yield
with suppress(ValueError):
if token_app:
ariadne_ctx.reset(token_app)
if token_event:
event_ctx.reset(token_event)
if token_loop:
event_loop_ctx.reset(token_loop)
if token_bcc:
broadcast_ctx.reset(token_bcc) | PypiClean |
/music_fsl-0.1.6.tar.gz/music_fsl-0.1.6/music_fsl/data.py | from typing import Optional, Callable, Dict, List, Any, Tuple
import random
from collections import defaultdict
import torch
import numpy as np
import mirdata
from torch.utils.data import Dataset
import music_fsl.util as util
class ClassConditionalDataset(torch.utils.data.Dataset):
def __getitem__(self, index: int) -> Dict[Any, Any]:
"""
Grab an item from the dataset. The item returned must be a dictionary.
"""
raise NotImplementedError
@property
def classlist(self) -> List[str]:
"""
The classlist property returns a list of class labels available in the dataset.
This property enables users of the dataset to easily access a list of all the classes in the dataset.
Returns:
List[str]: A list of class labels available in the dataset.
"""
raise NotImplementedError
@property
def class_to_indices(self) -> Dict[str, List[int]]:
"""
Returns a dictionary where the keys are class labels and the values are
lists of indices in the dataset that belong to that class.
This property enables users of the dataset to easily access
examples that belong to specific classes.
Implement me!
Returns:
Dict[str, List[int]]: A dictionary mapping class labels to lists of dataset indices.
"""
raise NotImplementedError
class TinySOL(ClassConditionalDataset):
"""
Initialize a `TinySOL` dataset instance.
Args:
instruments (List[str]): A list of instruments to include in the dataset.
duration (float): The duration of each audio clip in the dataset (in seconds).
sample_rate (int): The sample rate of the audio clips in the dataset (in Hz).
"""
INSTRUMENTS = [
'Bassoon', 'Viola', 'Trumpet in C', 'Bass Tuba',
'Alto Saxophone', 'French Horn', 'Violin',
'Flute', 'Contrabass', 'Trombone', 'Cello',
'Clarinet in Bb', 'Oboe', 'Accordion'
]
def __init__(self,
instruments: List[str] = None,
duration: float = 1.0,
sample_rate: int = 16000,
):
if instruments is None:
instruments = self.INSTRUMENTS
self.instruments = instruments
self.duration = duration
self.sample_rate = sample_rate
# initialize the tinysol dataset and download if necessary
self.dataset = mirdata.initialize('tinysol')
self.dataset.download()
# make sure the instruments passed in are valid
for instrument in instruments:
assert instrument in self.INSTRUMENTS, f"{instrument} is not a valid instrument"
# load all tracks for this instrument
self.tracks = []
for track in self.dataset.load_tracks().values():
if track.instrument_full in self.instruments:
self.tracks.append(track)
@property
def classlist(self) -> List[str]:
return self.instruments
@property
def class_to_indices(self) -> Dict[str, List[int]]:
# cache it in self._class_to_indices
# so we don't have to recompute it every time
if not hasattr(self, "_class_to_indices"):
self._class_to_indices = defaultdict(list)
for i, track in enumerate(self.tracks):
self._class_to_indices[track.instrument_full].append(i)
return self._class_to_indices
def __getitem__(self, index) -> Dict:
# load the track for this index
track = self.tracks[index]
# load the excerpt
data = util.load_excerpt(track.audio_path, self.duration, self.sample_rate)
data["label"] = track.instrument_full
return data
def __len__(self) -> int:
return len(self.tracks)
class EpisodeDataset(torch.utils.data.Dataset):
"""
A dataset for sampling few-shot learning tasks from a class-conditional dataset.
Args:
dataset (ClassConditionalDataset): The dataset to sample episodes from.
n_way (int): The number of classes to sample per episode.
Default: 5.
n_support (int): The number of samples per class to use as support.
Default: 5.
n_query (int): The number of samples per class to use as query.
Default: 20.
n_episodes (int): The number of episodes to generate.
Default: 100.
"""
def __init__(self,
dataset: ClassConditionalDataset,
n_way: int = 5,
n_support: int = 5,
n_query: int = 20,
n_episodes: int = 100,
):
self.dataset = dataset
self.n_way = n_way
self.n_support = n_support
self.n_query = n_query
self.n_episodes = n_episodes
def __getitem__(self, index: int) -> Tuple[Dict, Dict]:
"""Sample an episode from the class-conditional dataset.
Each episode is a tuple of two dictionaries: a support set and a query set.
The support set contains a set of samples from each of the classes in the
episode, and the query set contains another set of samples from each of the
classes. The class labels are added to each item in the support and query
sets, and the list of classes is also included in each dictionary.
Yields:
Tuple[Dict[str, Any], Dict[str, Any]]: A tuple containing the support
set and the query set for an episode.
"""
# seed the random number generator so we can reproduce this episode
rng = random.Random(index)
# sample the list of classes for this episode
episode_classlist = rng.sample(self.dataset.classlist, self.n_way)
# sample the support and query sets for this episode
support, query = [], []
for c in episode_classlist:
# grab the dataset indices for this class
all_indices = self.dataset.class_to_indices[c]
# sample the support and query sets for this class
indices = rng.sample(all_indices, self.n_support + self.n_query)
items = [self.dataset[i] for i in indices]
# add the class label to each item
for item in items:
item["target"] = torch.tensor(episode_classlist.index(c))
# split the support and query sets
support.extend(items[:self.n_support])
query.extend(items[self.n_support:])
# collate the support and query sets
support = util.collate_list_of_dicts(support)
query = util.collate_list_of_dicts(query)
support["classlist"] = episode_classlist
query["classlist"] = episode_classlist
return support, query
def __len__(self):
return self.n_episodes
def print_episode(self, support, query):
"""Print a summary of the support and query sets for an episode.
Args:
support (Dict[str, Any]): The support set for an episode.
query (Dict[str, Any]): The query set for an episode.
"""
print("Support Set:")
print(f" Classlist: {support['classlist']}")
print(f" Audio Shape: {support['audio'].shape}")
print(f" Target Shape: {support['target'].shape}")
print()
print("Query Set:")
print(f" Classlist: {query['classlist']}")
print(f" Audio Shape: {query['audio'].shape}")
print(f" Target Shape: {query['target'].shape}")
if __name__ == "__main__":
from torch.utils.data import DataLoader
dataset = TinySOL()
# create an episodic dataset
episodes = EpisodeDataset(dataset)
# create a dataloader
dataloader = DataLoader(episodes, batch_size=None, shuffle=True)
batch = next(iter(dataloader))
breakpoint() | PypiClean |
/pylinkirc-3.1.0.tar.gz/pylinkirc-3.1.0/plugins/games.py | import random
from pylinkirc import utils
mydesc = "The \x02Games\x02 plugin provides simple games for IRC."
gameclient = utils.register_service("Games", default_nick="Games", manipulatable=True, desc=mydesc)
reply = gameclient.reply # TODO find a better syntax for ServiceBot.reply()
error = gameclient.error # TODO find a better syntax for ServiceBot.error()
# commands
def dice(irc, source, args):
"""<num>d<sides>
Rolls a die with <sides> sides <num> times.
"""
if not args:
reply(irc, "No string given.")
return
try:
# Split num and sides and convert them to int.
num, sides = map(int, args[0].split('d', 1))
except ValueError:
# Invalid syntax. Show the command help.
gameclient.help(irc, source, ['dice'])
return
assert 1 < sides <= 100, "Invalid side count (must be 2-100)."
assert 1 <= num <= 100, "Cannot roll more than 100 dice at once."
results = []
for _ in range(num):
results.append(random.randint(1, sides))
# Convert results to strings, join them, format, and reply.
s = 'You rolled %s: %s (total: %s)' % (args[0], ' '.join([str(x) for x in results]), sum(results))
reply(irc, s)
gameclient.add_cmd(dice, aliases=('d'), featured=True)
eightball_responses = ["It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes, definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful."]
def eightball(irc, source, args):
"""[<question>]
Asks the Magic 8-ball a question.
"""
reply(irc, random.choice(eightball_responses))
gameclient.add_cmd(eightball, featured=True, aliases=('8ball', '8b'))
def die(irc=None):
utils.unregister_service('games') | PypiClean |
/wagalytics-tier-1.0rc1.tar.gz/wagalytics-tier-1.0rc1/wagalytics/static/wagalytics/vendors.bundle.js |
module.exports = __webpack_require__(1);
/***/ }),
/* 1 */
/***/ (function(module, exports, __webpack_require__) {
var require;/* WEBPACK VAR INJECTION */(function(module) {//! moment.js
;(function (global, factory) {
true ? module.exports = factory() :
typeof define === 'function' && define.amd ? define(factory) :
global.moment = factory()
}(this, (function () { 'use strict';
var hookCallback;
function hooks () {
return hookCallback.apply(null, arguments);
}
// This is done to register the method called with moment()
// without creating circular dependencies.
function setHookCallback (callback) {
hookCallback = callback;
}
function isArray(input) {
return input instanceof Array || Object.prototype.toString.call(input) === '[object Array]';
}
function isObject(input) {
// IE8 will treat undefined and null as object if it wasn't for
// input != null
return input != null && Object.prototype.toString.call(input) === '[object Object]';
}
function isObjectEmpty(obj) {
if (Object.getOwnPropertyNames) {
return (Object.getOwnPropertyNames(obj).length === 0);
} else {
var k;
for (k in obj) {
if (obj.hasOwnProperty(k)) {
return false;
}
}
return true;
}
}
function isUndefined(input) {
return input === void 0;
}
function isNumber(input) {
return typeof input === 'number' || Object.prototype.toString.call(input) === '[object Number]';
}
function isDate(input) {
return input instanceof Date || Object.prototype.toString.call(input) === '[object Date]';
}
function map(arr, fn) {
var res = [], i;
for (i = 0; i < arr.length; ++i) {
res.push(fn(arr[i], i));
}
return res;
}
function hasOwnProp(a, b) {
return Object.prototype.hasOwnProperty.call(a, b);
}
function extend(a, b) {
for (var i in b) {
if (hasOwnProp(b, i)) {
a[i] = b[i];
}
}
if (hasOwnProp(b, 'toString')) {
a.toString = b.toString;
}
if (hasOwnProp(b, 'valueOf')) {
a.valueOf = b.valueOf;
}
return a;
}
function createUTC (input, format, locale, strict) {
return createLocalOrUTC(input, format, locale, strict, true).utc();
}
function defaultParsingFlags() {
// We need to deep clone this object.
return {
empty : false,
unusedTokens : [],
unusedInput : [],
overflow : -2,
charsLeftOver : 0,
nullInput : false,
invalidMonth : null,
invalidFormat : false,
userInvalidated : false,
iso : false,
parsedDateParts : [],
meridiem : null,
rfc2822 : false,
weekdayMismatch : false
};
}
function getParsingFlags(m) {
if (m._pf == null) {
m._pf = defaultParsingFlags();
}
return m._pf;
}
var some;
if (Array.prototype.some) {
some = Array.prototype.some;
} else {
some = function (fun) {
var t = Object(this);
var len = t.length >>> 0;
for (var i = 0; i < len; i++) {
if (i in t && fun.call(this, t[i], i, t)) {
return true;
}
}
return false;
};
}
function isValid(m) {
if (m._isValid == null) {
var flags = getParsingFlags(m);
var parsedParts = some.call(flags.parsedDateParts, function (i) {
return i != null;
});
var isNowValid = !isNaN(m._d.getTime()) &&
flags.overflow < 0 &&
!flags.empty &&
!flags.invalidMonth &&
!flags.invalidWeekday &&
!flags.weekdayMismatch &&
!flags.nullInput &&
!flags.invalidFormat &&
!flags.userInvalidated &&
(!flags.meridiem || (flags.meridiem && parsedParts));
if (m._strict) {
isNowValid = isNowValid &&
flags.charsLeftOver === 0 &&
flags.unusedTokens.length === 0 &&
flags.bigHour === undefined;
}
if (Object.isFrozen == null || !Object.isFrozen(m)) {
m._isValid = isNowValid;
}
else {
return isNowValid;
}
}
return m._isValid;
}
function createInvalid (flags) {
var m = createUTC(NaN);
if (flags != null) {
extend(getParsingFlags(m), flags);
}
else {
getParsingFlags(m).userInvalidated = true;
}
return m;
}
// Plugins that add properties should also add the key here (null value),
// so we can properly clone ourselves.
var momentProperties = hooks.momentProperties = [];
function copyConfig(to, from) {
var i, prop, val;
if (!isUndefined(from._isAMomentObject)) {
to._isAMomentObject = from._isAMomentObject;
}
if (!isUndefined(from._i)) {
to._i = from._i;
}
if (!isUndefined(from._f)) {
to._f = from._f;
}
if (!isUndefined(from._l)) {
to._l = from._l;
}
if (!isUndefined(from._strict)) {
to._strict = from._strict;
}
if (!isUndefined(from._tzm)) {
to._tzm = from._tzm;
}
if (!isUndefined(from._isUTC)) {
to._isUTC = from._isUTC;
}
if (!isUndefined(from._offset)) {
to._offset = from._offset;
}
if (!isUndefined(from._pf)) {
to._pf = getParsingFlags(from);
}
if (!isUndefined(from._locale)) {
to._locale = from._locale;
}
if (momentProperties.length > 0) {
for (i = 0; i < momentProperties.length; i++) {
prop = momentProperties[i];
val = from[prop];
if (!isUndefined(val)) {
to[prop] = val;
}
}
}
return to;
}
var updateInProgress = false;
// Moment prototype object
function Moment(config) {
copyConfig(this, config);
this._d = new Date(config._d != null ? config._d.getTime() : NaN);
if (!this.isValid()) {
this._d = new Date(NaN);
}
// Prevent infinite loop in case updateOffset creates new moment
// objects.
if (updateInProgress === false) {
updateInProgress = true;
hooks.updateOffset(this);
updateInProgress = false;
}
}
function isMoment (obj) {
return obj instanceof Moment || (obj != null && obj._isAMomentObject != null);
}
function absFloor (number) {
if (number < 0) {
// -0 -> 0
return Math.ceil(number) || 0;
} else {
return Math.floor(number);
}
}
function toInt(argumentForCoercion) {
var coercedNumber = +argumentForCoercion,
value = 0;
if (coercedNumber !== 0 && isFinite(coercedNumber)) {
value = absFloor(coercedNumber);
}
return value;
}
// compare two arrays, return the number of differences
function compareArrays(array1, array2, dontConvert) {
var len = Math.min(array1.length, array2.length),
lengthDiff = Math.abs(array1.length - array2.length),
diffs = 0,
i;
for (i = 0; i < len; i++) {
if ((dontConvert && array1[i] !== array2[i]) ||
(!dontConvert && toInt(array1[i]) !== toInt(array2[i]))) {
diffs++;
}
}
return diffs + lengthDiff;
}
function warn(msg) {
if (hooks.suppressDeprecationWarnings === false &&
(typeof console !== 'undefined') && console.warn) {
console.warn('Deprecation warning: ' + msg);
}
}
function deprecate(msg, fn) {
var firstTime = true;
return extend(function () {
if (hooks.deprecationHandler != null) {
hooks.deprecationHandler(null, msg);
}
if (firstTime) {
var args = [];
var arg;
for (var i = 0; i < arguments.length; i++) {
arg = '';
if (typeof arguments[i] === 'object') {
arg += '\n[' + i + '] ';
for (var key in arguments[0]) {
arg += key + ': ' + arguments[0][key] + ', ';
}
arg = arg.slice(0, -2); // Remove trailing comma and space
} else {
arg = arguments[i];
}
args.push(arg);
}
warn(msg + '\nArguments: ' + Array.prototype.slice.call(args).join('') + '\n' + (new Error()).stack);
firstTime = false;
}
return fn.apply(this, arguments);
}, fn);
}
var deprecations = {};
function deprecateSimple(name, msg) {
if (hooks.deprecationHandler != null) {
hooks.deprecationHandler(name, msg);
}
if (!deprecations[name]) {
warn(msg);
deprecations[name] = true;
}
}
hooks.suppressDeprecationWarnings = false;
hooks.deprecationHandler = null;
function isFunction(input) {
return input instanceof Function || Object.prototype.toString.call(input) === '[object Function]';
}
function set (config) {
var prop, i;
for (i in config) {
prop = config[i];
if (isFunction(prop)) {
this[i] = prop;
} else {
this['_' + i] = prop;
}
}
this._config = config;
// Lenient ordinal parsing accepts just a number in addition to
// number + (possibly) stuff coming from _dayOfMonthOrdinalParse.
// TODO: Remove "ordinalParse" fallback in next major release.
this._dayOfMonthOrdinalParseLenient = new RegExp(
(this._dayOfMonthOrdinalParse.source || this._ordinalParse.source) +
'|' + (/\d{1,2}/).source);
}
function mergeConfigs(parentConfig, childConfig) {
var res = extend({}, parentConfig), prop;
for (prop in childConfig) {
if (hasOwnProp(childConfig, prop)) {
if (isObject(parentConfig[prop]) && isObject(childConfig[prop])) {
res[prop] = {};
extend(res[prop], parentConfig[prop]);
extend(res[prop], childConfig[prop]);
} else if (childConfig[prop] != null) {
res[prop] = childConfig[prop];
} else {
delete res[prop];
}
}
}
for (prop in parentConfig) {
if (hasOwnProp(parentConfig, prop) &&
!hasOwnProp(childConfig, prop) &&
isObject(parentConfig[prop])) {
// make sure changes to properties don't modify parent config
res[prop] = extend({}, res[prop]);
}
}
return res;
}
function Locale(config) {
if (config != null) {
this.set(config);
}
}
var keys;
if (Object.keys) {
keys = Object.keys;
} else {
keys = function (obj) {
var i, res = [];
for (i in obj) {
if (hasOwnProp(obj, i)) {
res.push(i);
}
}
return res;
};
}
var defaultCalendar = {
sameDay : '[Today at] LT',
nextDay : '[Tomorrow at] LT',
nextWeek : 'dddd [at] LT',
lastDay : '[Yesterday at] LT',
lastWeek : '[Last] dddd [at] LT',
sameElse : 'L'
};
function calendar (key, mom, now) {
var output = this._calendar[key] || this._calendar['sameElse'];
return isFunction(output) ? output.call(mom, now) : output;
}
var defaultLongDateFormat = {
LTS : 'h:mm:ss A',
LT : 'h:mm A',
L : 'MM/DD/YYYY',
LL : 'MMMM D, YYYY',
LLL : 'MMMM D, YYYY h:mm A',
LLLL : 'dddd, MMMM D, YYYY h:mm A'
};
function longDateFormat (key) {
var format = this._longDateFormat[key],
formatUpper = this._longDateFormat[key.toUpperCase()];
if (format || !formatUpper) {
return format;
}
this._longDateFormat[key] = formatUpper.replace(/MMMM|MM|DD|dddd/g, function (val) {
return val.slice(1);
});
return this._longDateFormat[key];
}
var defaultInvalidDate = 'Invalid date';
function invalidDate () {
return this._invalidDate;
}
var defaultOrdinal = '%d';
var defaultDayOfMonthOrdinalParse = /\d{1,2}/;
function ordinal (number) {
return this._ordinal.replace('%d', number);
}
var defaultRelativeTime = {
future : 'in %s',
past : '%s ago',
s : 'a few seconds',
ss : '%d seconds',
m : 'a minute',
mm : '%d minutes',
h : 'an hour',
hh : '%d hours',
d : 'a day',
dd : '%d days',
M : 'a month',
MM : '%d months',
y : 'a year',
yy : '%d years'
};
function relativeTime (number, withoutSuffix, string, isFuture) {
var output = this._relativeTime[string];
return (isFunction(output)) ?
output(number, withoutSuffix, string, isFuture) :
output.replace(/%d/i, number);
}
function pastFuture (diff, output) {
var format = this._relativeTime[diff > 0 ? 'future' : 'past'];
return isFunction(format) ? format(output) : format.replace(/%s/i, output);
}
var aliases = {};
function addUnitAlias (unit, shorthand) {
var lowerCase = unit.toLowerCase();
aliases[lowerCase] = aliases[lowerCase + 's'] = aliases[shorthand] = unit;
}
function normalizeUnits(units) {
return typeof units === 'string' ? aliases[units] || aliases[units.toLowerCase()] : undefined;
}
function normalizeObjectUnits(inputObject) {
var normalizedInput = {},
normalizedProp,
prop;
for (prop in inputObject) {
if (hasOwnProp(inputObject, prop)) {
normalizedProp = normalizeUnits(prop);
if (normalizedProp) {
normalizedInput[normalizedProp] = inputObject[prop];
}
}
}
return normalizedInput;
}
var priorities = {};
function addUnitPriority(unit, priority) {
priorities[unit] = priority;
}
function getPrioritizedUnits(unitsObj) {
var units = [];
for (var u in unitsObj) {
units.push({unit: u, priority: priorities[u]});
}
units.sort(function (a, b) {
return a.priority - b.priority;
});
return units;
}
function zeroFill(number, targetLength, forceSign) {
var absNumber = '' + Math.abs(number),
zerosToFill = targetLength - absNumber.length,
sign = number >= 0;
return (sign ? (forceSign ? '+' : '') : '-') +
Math.pow(10, Math.max(0, zerosToFill)).toString().substr(1) + absNumber;
}
var formattingTokens = /(\[[^\[]*\])|(\\)?([Hh]mm(ss)?|Mo|MM?M?M?|Do|DDDo|DD?D?D?|ddd?d?|do?|w[o|w]?|W[o|W]?|Qo?|YYYYYY|YYYYY|YYYY|YY|gg(ggg?)?|GG(GGG?)?|e|E|a|A|hh?|HH?|kk?|mm?|ss?|S{1,9}|x|X|zz?|ZZ?|.)/g;
var localFormattingTokens = /(\[[^\[]*\])|(\\)?(LTS|LT|LL?L?L?|l{1,4})/g;
var formatFunctions = {};
var formatTokenFunctions = {};
// token: 'M'
// padded: ['MM', 2]
// ordinal: 'Mo'
// callback: function () { this.month() + 1 }
function addFormatToken (token, padded, ordinal, callback) {
var func = callback;
if (typeof callback === 'string') {
func = function () {
return this[callback]();
};
}
if (token) {
formatTokenFunctions[token] = func;
}
if (padded) {
formatTokenFunctions[padded[0]] = function () {
return zeroFill(func.apply(this, arguments), padded[1], padded[2]);
};
}
if (ordinal) {
formatTokenFunctions[ordinal] = function () {
return this.localeData().ordinal(func.apply(this, arguments), token);
};
}
}
function removeFormattingTokens(input) {
if (input.match(/\[[\s\S]/)) {
return input.replace(/^\[|\]$/g, '');
}
return input.replace(/\\/g, '');
}
function makeFormatFunction(format) {
var array = format.match(formattingTokens), i, length;
for (i = 0, length = array.length; i < length; i++) {
if (formatTokenFunctions[array[i]]) {
array[i] = formatTokenFunctions[array[i]];
} else {
array[i] = removeFormattingTokens(array[i]);
}
}
return function (mom) {
var output = '', i;
for (i = 0; i < length; i++) {
output += isFunction(array[i]) ? array[i].call(mom, format) : array[i];
}
return output;
};
}
// format date using native date object
function formatMoment(m, format) {
if (!m.isValid()) {
return m.localeData().invalidDate();
}
format = expandFormat(format, m.localeData());
formatFunctions[format] = formatFunctions[format] || makeFormatFunction(format);
return formatFunctions[format](m);
}
function expandFormat(format, locale) {
var i = 5;
function replaceLongDateFormatTokens(input) {
return locale.longDateFormat(input) || input;
}
localFormattingTokens.lastIndex = 0;
while (i >= 0 && localFormattingTokens.test(format)) {
format = format.replace(localFormattingTokens, replaceLongDateFormatTokens);
localFormattingTokens.lastIndex = 0;
i -= 1;
}
return format;
}
var match1 = /\d/; // 0 - 9
var match2 = /\d\d/; // 00 - 99
var match3 = /\d{3}/; // 000 - 999
var match4 = /\d{4}/; // 0000 - 9999
var match6 = /[+-]?\d{6}/; // -999999 - 999999
var match1to2 = /\d\d?/; // 0 - 99
var match3to4 = /\d\d\d\d?/; // 999 - 9999
var match5to6 = /\d\d\d\d\d\d?/; // 99999 - 999999
var match1to3 = /\d{1,3}/; // 0 - 999
var match1to4 = /\d{1,4}/; // 0 - 9999
var match1to6 = /[+-]?\d{1,6}/; // -999999 - 999999
var matchUnsigned = /\d+/; // 0 - inf
var matchSigned = /[+-]?\d+/; // -inf - inf
var matchOffset = /Z|[+-]\d\d:?\d\d/gi; // +00:00 -00:00 +0000 -0000 or Z
var matchShortOffset = /Z|[+-]\d\d(?::?\d\d)?/gi; // +00 -00 +00:00 -00:00 +0000 -0000 or Z
var matchTimestamp = /[+-]?\d+(\.\d{1,3})?/; // 123456789 123456789.123
// any word (or two) characters or numbers including two/three word month in arabic.
// includes scottish gaelic two word and hyphenated months
var matchWord = /[0-9]{0,256}['a-z\u00A0-\u05FF\u0700-\uD7FF\uF900-\uFDCF\uFDF0-\uFF07\uFF10-\uFFEF]{1,256}|[\u0600-\u06FF\/]{1,256}(\s*?[\u0600-\u06FF]{1,256}){1,2}/i;
var regexes = {};
function addRegexToken (token, regex, strictRegex) {
regexes[token] = isFunction(regex) ? regex : function (isStrict, localeData) {
return (isStrict && strictRegex) ? strictRegex : regex;
};
}
function getParseRegexForToken (token, config) {
if (!hasOwnProp(regexes, token)) {
return new RegExp(unescapeFormat(token));
}
return regexes[token](config._strict, config._locale);
}
// Code from http://stackoverflow.com/questions/3561493/is-there-a-regexp-escape-function-in-javascript
function unescapeFormat(s) {
return regexEscape(s.replace('\\', '').replace(/\\(\[)|\\(\])|\[([^\]\[]*)\]|\\(.)/g, function (matched, p1, p2, p3, p4) {
return p1 || p2 || p3 || p4;
}));
}
function regexEscape(s) {
return s.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&');
}
var tokens = {};
function addParseToken (token, callback) {
var i, func = callback;
if (typeof token === 'string') {
token = [token];
}
if (isNumber(callback)) {
func = function (input, array) {
array[callback] = toInt(input);
};
}
for (i = 0; i < token.length; i++) {
tokens[token[i]] = func;
}
}
function addWeekParseToken (token, callback) {
addParseToken(token, function (input, array, config, token) {
config._w = config._w || {};
callback(input, config._w, config, token);
});
}
function addTimeToArrayFromToken(token, input, config) {
if (input != null && hasOwnProp(tokens, token)) {
tokens[token](input, config._a, config, token);
}
}
var YEAR = 0;
var MONTH = 1;
var DATE = 2;
var HOUR = 3;
var MINUTE = 4;
var SECOND = 5;
var MILLISECOND = 6;
var WEEK = 7;
var WEEKDAY = 8;
// FORMATTING
addFormatToken('Y', 0, 0, function () {
var y = this.year();
return y <= 9999 ? '' + y : '+' + y;
});
addFormatToken(0, ['YY', 2], 0, function () {
return this.year() % 100;
});
addFormatToken(0, ['YYYY', 4], 0, 'year');
addFormatToken(0, ['YYYYY', 5], 0, 'year');
addFormatToken(0, ['YYYYYY', 6, true], 0, 'year');
// ALIASES
addUnitAlias('year', 'y');
// PRIORITIES
addUnitPriority('year', 1);
// PARSING
addRegexToken('Y', matchSigned);
addRegexToken('YY', match1to2, match2);
addRegexToken('YYYY', match1to4, match4);
addRegexToken('YYYYY', match1to6, match6);
addRegexToken('YYYYYY', match1to6, match6);
addParseToken(['YYYYY', 'YYYYYY'], YEAR);
addParseToken('YYYY', function (input, array) {
array[YEAR] = input.length === 2 ? hooks.parseTwoDigitYear(input) : toInt(input);
});
addParseToken('YY', function (input, array) {
array[YEAR] = hooks.parseTwoDigitYear(input);
});
addParseToken('Y', function (input, array) {
array[YEAR] = parseInt(input, 10);
});
// HELPERS
function daysInYear(year) {
return isLeapYear(year) ? 366 : 365;
}
function isLeapYear(year) {
return (year % 4 === 0 && year % 100 !== 0) || year % 400 === 0;
}
// HOOKS
hooks.parseTwoDigitYear = function (input) {
return toInt(input) + (toInt(input) > 68 ? 1900 : 2000);
};
// MOMENTS
var getSetYear = makeGetSet('FullYear', true);
function getIsLeapYear () {
return isLeapYear(this.year());
}
function makeGetSet (unit, keepTime) {
return function (value) {
if (value != null) {
set$1(this, unit, value);
hooks.updateOffset(this, keepTime);
return this;
} else {
return get(this, unit);
}
};
}
function get (mom, unit) {
return mom.isValid() ?
mom._d['get' + (mom._isUTC ? 'UTC' : '') + unit]() : NaN;
}
function set$1 (mom, unit, value) {
if (mom.isValid() && !isNaN(value)) {
if (unit === 'FullYear' && isLeapYear(mom.year()) && mom.month() === 1 && mom.date() === 29) {
mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value, mom.month(), daysInMonth(value, mom.month()));
}
else {
mom._d['set' + (mom._isUTC ? 'UTC' : '') + unit](value);
}
}
}
// MOMENTS
function stringGet (units) {
units = normalizeUnits(units);
if (isFunction(this[units])) {
return this[units]();
}
return this;
}
function stringSet (units, value) {
if (typeof units === 'object') {
units = normalizeObjectUnits(units);
var prioritized = getPrioritizedUnits(units);
for (var i = 0; i < prioritized.length; i++) {
this[prioritized[i].unit](units[prioritized[i].unit]);
}
} else {
units = normalizeUnits(units);
if (isFunction(this[units])) {
return this[units](value);
}
}
return this;
}
function mod(n, x) {
return ((n % x) + x) % x;
}
var indexOf;
if (Array.prototype.indexOf) {
indexOf = Array.prototype.indexOf;
} else {
indexOf = function (o) {
// I know
var i;
for (i = 0; i < this.length; ++i) {
if (this[i] === o) {
return i;
}
}
return -1;
};
}
function daysInMonth(year, month) {
if (isNaN(year) || isNaN(month)) {
return NaN;
}
var modMonth = mod(month, 12);
year += (month - modMonth) / 12;
return modMonth === 1 ? (isLeapYear(year) ? 29 : 28) : (31 - modMonth % 7 % 2);
}
// FORMATTING
addFormatToken('M', ['MM', 2], 'Mo', function () {
return this.month() + 1;
});
addFormatToken('MMM', 0, 0, function (format) {
return this.localeData().monthsShort(this, format);
});
addFormatToken('MMMM', 0, 0, function (format) {
return this.localeData().months(this, format);
});
// ALIASES
addUnitAlias('month', 'M');
// PRIORITY
addUnitPriority('month', 8);
// PARSING
addRegexToken('M', match1to2);
addRegexToken('MM', match1to2, match2);
addRegexToken('MMM', function (isStrict, locale) {
return locale.monthsShortRegex(isStrict);
});
addRegexToken('MMMM', function (isStrict, locale) {
return locale.monthsRegex(isStrict);
});
addParseToken(['M', 'MM'], function (input, array) {
array[MONTH] = toInt(input) - 1;
});
addParseToken(['MMM', 'MMMM'], function (input, array, config, token) {
var month = config._locale.monthsParse(input, token, config._strict);
// if we didn't find a month name, mark the date as invalid.
if (month != null) {
array[MONTH] = month;
} else {
getParsingFlags(config).invalidMonth = input;
}
});
// LOCALES
var MONTHS_IN_FORMAT = /D[oD]?(\[[^\[\]]*\]|\s)+MMMM?/;
var defaultLocaleMonths = 'January_February_March_April_May_June_July_August_September_October_November_December'.split('_');
function localeMonths (m, format) {
if (!m) {
return isArray(this._months) ? this._months :
this._months['standalone'];
}
return isArray(this._months) ? this._months[m.month()] :
this._months[(this._months.isFormat || MONTHS_IN_FORMAT).test(format) ? 'format' : 'standalone'][m.month()];
}
var defaultLocaleMonthsShort = 'Jan_Feb_Mar_Apr_May_Jun_Jul_Aug_Sep_Oct_Nov_Dec'.split('_');
function localeMonthsShort (m, format) {
if (!m) {
return isArray(this._monthsShort) ? this._monthsShort :
this._monthsShort['standalone'];
}
return isArray(this._monthsShort) ? this._monthsShort[m.month()] :
this._monthsShort[MONTHS_IN_FORMAT.test(format) ? 'format' : 'standalone'][m.month()];
}
function handleStrictParse(monthName, format, strict) {
var i, ii, mom, llc = monthName.toLocaleLowerCase();
if (!this._monthsParse) {
// this is not used
this._monthsParse = [];
this._longMonthsParse = [];
this._shortMonthsParse = [];
for (i = 0; i < 12; ++i) {
mom = createUTC([2000, i]);
this._shortMonthsParse[i] = this.monthsShort(mom, '').toLocaleLowerCase();
this._longMonthsParse[i] = this.months(mom, '').toLocaleLowerCase();
}
}
if (strict) {
if (format === 'MMM') {
ii = indexOf.call(this._shortMonthsParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._longMonthsParse, llc);
return ii !== -1 ? ii : null;
}
} else {
if (format === 'MMM') {
ii = indexOf.call(this._shortMonthsParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._longMonthsParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._longMonthsParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortMonthsParse, llc);
return ii !== -1 ? ii : null;
}
}
}
function localeMonthsParse (monthName, format, strict) {
var i, mom, regex;
if (this._monthsParseExact) {
return handleStrictParse.call(this, monthName, format, strict);
}
if (!this._monthsParse) {
this._monthsParse = [];
this._longMonthsParse = [];
this._shortMonthsParse = [];
}
// TODO: add sorting
// Sorting makes sure if one month (or abbr) is a prefix of another
// see sorting in computeMonthsParse
for (i = 0; i < 12; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, i]);
if (strict && !this._longMonthsParse[i]) {
this._longMonthsParse[i] = new RegExp('^' + this.months(mom, '').replace('.', '') + '$', 'i');
this._shortMonthsParse[i] = new RegExp('^' + this.monthsShort(mom, '').replace('.', '') + '$', 'i');
}
if (!strict && !this._monthsParse[i]) {
regex = '^' + this.months(mom, '') + '|^' + this.monthsShort(mom, '');
this._monthsParse[i] = new RegExp(regex.replace('.', ''), 'i');
}
// test the regex
if (strict && format === 'MMMM' && this._longMonthsParse[i].test(monthName)) {
return i;
} else if (strict && format === 'MMM' && this._shortMonthsParse[i].test(monthName)) {
return i;
} else if (!strict && this._monthsParse[i].test(monthName)) {
return i;
}
}
}
// MOMENTS
function setMonth (mom, value) {
var dayOfMonth;
if (!mom.isValid()) {
// No op
return mom;
}
if (typeof value === 'string') {
if (/^\d+$/.test(value)) {
value = toInt(value);
} else {
value = mom.localeData().monthsParse(value);
// TODO: Another silent failure?
if (!isNumber(value)) {
return mom;
}
}
}
dayOfMonth = Math.min(mom.date(), daysInMonth(mom.year(), value));
mom._d['set' + (mom._isUTC ? 'UTC' : '') + 'Month'](value, dayOfMonth);
return mom;
}
function getSetMonth (value) {
if (value != null) {
setMonth(this, value);
hooks.updateOffset(this, true);
return this;
} else {
return get(this, 'Month');
}
}
function getDaysInMonth () {
return daysInMonth(this.year(), this.month());
}
var defaultMonthsShortRegex = matchWord;
function monthsShortRegex (isStrict) {
if (this._monthsParseExact) {
if (!hasOwnProp(this, '_monthsRegex')) {
computeMonthsParse.call(this);
}
if (isStrict) {
return this._monthsShortStrictRegex;
} else {
return this._monthsShortRegex;
}
} else {
if (!hasOwnProp(this, '_monthsShortRegex')) {
this._monthsShortRegex = defaultMonthsShortRegex;
}
return this._monthsShortStrictRegex && isStrict ?
this._monthsShortStrictRegex : this._monthsShortRegex;
}
}
var defaultMonthsRegex = matchWord;
function monthsRegex (isStrict) {
if (this._monthsParseExact) {
if (!hasOwnProp(this, '_monthsRegex')) {
computeMonthsParse.call(this);
}
if (isStrict) {
return this._monthsStrictRegex;
} else {
return this._monthsRegex;
}
} else {
if (!hasOwnProp(this, '_monthsRegex')) {
this._monthsRegex = defaultMonthsRegex;
}
return this._monthsStrictRegex && isStrict ?
this._monthsStrictRegex : this._monthsRegex;
}
}
function computeMonthsParse () {
function cmpLenRev(a, b) {
return b.length - a.length;
}
var shortPieces = [], longPieces = [], mixedPieces = [],
i, mom;
for (i = 0; i < 12; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, i]);
shortPieces.push(this.monthsShort(mom, ''));
longPieces.push(this.months(mom, ''));
mixedPieces.push(this.months(mom, ''));
mixedPieces.push(this.monthsShort(mom, ''));
}
// Sorting makes sure if one month (or abbr) is a prefix of another it
// will match the longer piece.
shortPieces.sort(cmpLenRev);
longPieces.sort(cmpLenRev);
mixedPieces.sort(cmpLenRev);
for (i = 0; i < 12; i++) {
shortPieces[i] = regexEscape(shortPieces[i]);
longPieces[i] = regexEscape(longPieces[i]);
}
for (i = 0; i < 24; i++) {
mixedPieces[i] = regexEscape(mixedPieces[i]);
}
this._monthsRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');
this._monthsShortRegex = this._monthsRegex;
this._monthsStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');
this._monthsShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');
}
function createDate (y, m, d, h, M, s, ms) {
// can't just apply() to create a date:
// https://stackoverflow.com/q/181348
var date = new Date(y, m, d, h, M, s, ms);
// the date constructor remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0 && isFinite(date.getFullYear())) {
date.setFullYear(y);
}
return date;
}
function createUTCDate (y) {
var date = new Date(Date.UTC.apply(null, arguments));
// the Date.UTC function remaps years 0-99 to 1900-1999
if (y < 100 && y >= 0 && isFinite(date.getUTCFullYear())) {
date.setUTCFullYear(y);
}
return date;
}
// start-of-first-week - start-of-year
function firstWeekOffset(year, dow, doy) {
var // first-week day -- which january is always in the first week (4 for iso, 1 for other)
fwd = 7 + dow - doy,
// first-week day local weekday -- which local weekday is fwd
fwdlw = (7 + createUTCDate(year, 0, fwd).getUTCDay() - dow) % 7;
return -fwdlw + fwd - 1;
}
// https://en.wikipedia.org/wiki/ISO_week_date#Calculating_a_date_given_the_year.2C_week_number_and_weekday
function dayOfYearFromWeeks(year, week, weekday, dow, doy) {
var localWeekday = (7 + weekday - dow) % 7,
weekOffset = firstWeekOffset(year, dow, doy),
dayOfYear = 1 + 7 * (week - 1) + localWeekday + weekOffset,
resYear, resDayOfYear;
if (dayOfYear <= 0) {
resYear = year - 1;
resDayOfYear = daysInYear(resYear) + dayOfYear;
} else if (dayOfYear > daysInYear(year)) {
resYear = year + 1;
resDayOfYear = dayOfYear - daysInYear(year);
} else {
resYear = year;
resDayOfYear = dayOfYear;
}
return {
year: resYear,
dayOfYear: resDayOfYear
};
}
function weekOfYear(mom, dow, doy) {
var weekOffset = firstWeekOffset(mom.year(), dow, doy),
week = Math.floor((mom.dayOfYear() - weekOffset - 1) / 7) + 1,
resWeek, resYear;
if (week < 1) {
resYear = mom.year() - 1;
resWeek = week + weeksInYear(resYear, dow, doy);
} else if (week > weeksInYear(mom.year(), dow, doy)) {
resWeek = week - weeksInYear(mom.year(), dow, doy);
resYear = mom.year() + 1;
} else {
resYear = mom.year();
resWeek = week;
}
return {
week: resWeek,
year: resYear
};
}
function weeksInYear(year, dow, doy) {
var weekOffset = firstWeekOffset(year, dow, doy),
weekOffsetNext = firstWeekOffset(year + 1, dow, doy);
return (daysInYear(year) - weekOffset + weekOffsetNext) / 7;
}
// FORMATTING
addFormatToken('w', ['ww', 2], 'wo', 'week');
addFormatToken('W', ['WW', 2], 'Wo', 'isoWeek');
// ALIASES
addUnitAlias('week', 'w');
addUnitAlias('isoWeek', 'W');
// PRIORITIES
addUnitPriority('week', 5);
addUnitPriority('isoWeek', 5);
// PARSING
addRegexToken('w', match1to2);
addRegexToken('ww', match1to2, match2);
addRegexToken('W', match1to2);
addRegexToken('WW', match1to2, match2);
addWeekParseToken(['w', 'ww', 'W', 'WW'], function (input, week, config, token) {
week[token.substr(0, 1)] = toInt(input);
});
// HELPERS
// LOCALES
function localeWeek (mom) {
return weekOfYear(mom, this._week.dow, this._week.doy).week;
}
var defaultLocaleWeek = {
dow : 0, // Sunday is the first day of the week.
doy : 6 // The week that contains Jan 1st is the first week of the year.
};
function localeFirstDayOfWeek () {
return this._week.dow;
}
function localeFirstDayOfYear () {
return this._week.doy;
}
// MOMENTS
function getSetWeek (input) {
var week = this.localeData().week(this);
return input == null ? week : this.add((input - week) * 7, 'd');
}
function getSetISOWeek (input) {
var week = weekOfYear(this, 1, 4).week;
return input == null ? week : this.add((input - week) * 7, 'd');
}
// FORMATTING
addFormatToken('d', 0, 'do', 'day');
addFormatToken('dd', 0, 0, function (format) {
return this.localeData().weekdaysMin(this, format);
});
addFormatToken('ddd', 0, 0, function (format) {
return this.localeData().weekdaysShort(this, format);
});
addFormatToken('dddd', 0, 0, function (format) {
return this.localeData().weekdays(this, format);
});
addFormatToken('e', 0, 0, 'weekday');
addFormatToken('E', 0, 0, 'isoWeekday');
// ALIASES
addUnitAlias('day', 'd');
addUnitAlias('weekday', 'e');
addUnitAlias('isoWeekday', 'E');
// PRIORITY
addUnitPriority('day', 11);
addUnitPriority('weekday', 11);
addUnitPriority('isoWeekday', 11);
// PARSING
addRegexToken('d', match1to2);
addRegexToken('e', match1to2);
addRegexToken('E', match1to2);
addRegexToken('dd', function (isStrict, locale) {
return locale.weekdaysMinRegex(isStrict);
});
addRegexToken('ddd', function (isStrict, locale) {
return locale.weekdaysShortRegex(isStrict);
});
addRegexToken('dddd', function (isStrict, locale) {
return locale.weekdaysRegex(isStrict);
});
addWeekParseToken(['dd', 'ddd', 'dddd'], function (input, week, config, token) {
var weekday = config._locale.weekdaysParse(input, token, config._strict);
// if we didn't get a weekday name, mark the date as invalid
if (weekday != null) {
week.d = weekday;
} else {
getParsingFlags(config).invalidWeekday = input;
}
});
addWeekParseToken(['d', 'e', 'E'], function (input, week, config, token) {
week[token] = toInt(input);
});
// HELPERS
function parseWeekday(input, locale) {
if (typeof input !== 'string') {
return input;
}
if (!isNaN(input)) {
return parseInt(input, 10);
}
input = locale.weekdaysParse(input);
if (typeof input === 'number') {
return input;
}
return null;
}
function parseIsoWeekday(input, locale) {
if (typeof input === 'string') {
return locale.weekdaysParse(input) % 7 || 7;
}
return isNaN(input) ? null : input;
}
// LOCALES
var defaultLocaleWeekdays = 'Sunday_Monday_Tuesday_Wednesday_Thursday_Friday_Saturday'.split('_');
function localeWeekdays (m, format) {
if (!m) {
return isArray(this._weekdays) ? this._weekdays :
this._weekdays['standalone'];
}
return isArray(this._weekdays) ? this._weekdays[m.day()] :
this._weekdays[this._weekdays.isFormat.test(format) ? 'format' : 'standalone'][m.day()];
}
var defaultLocaleWeekdaysShort = 'Sun_Mon_Tue_Wed_Thu_Fri_Sat'.split('_');
function localeWeekdaysShort (m) {
return (m) ? this._weekdaysShort[m.day()] : this._weekdaysShort;
}
var defaultLocaleWeekdaysMin = 'Su_Mo_Tu_We_Th_Fr_Sa'.split('_');
function localeWeekdaysMin (m) {
return (m) ? this._weekdaysMin[m.day()] : this._weekdaysMin;
}
function handleStrictParse$1(weekdayName, format, strict) {
var i, ii, mom, llc = weekdayName.toLocaleLowerCase();
if (!this._weekdaysParse) {
this._weekdaysParse = [];
this._shortWeekdaysParse = [];
this._minWeekdaysParse = [];
for (i = 0; i < 7; ++i) {
mom = createUTC([2000, 1]).day(i);
this._minWeekdaysParse[i] = this.weekdaysMin(mom, '').toLocaleLowerCase();
this._shortWeekdaysParse[i] = this.weekdaysShort(mom, '').toLocaleLowerCase();
this._weekdaysParse[i] = this.weekdays(mom, '').toLocaleLowerCase();
}
}
if (strict) {
if (format === 'dddd') {
ii = indexOf.call(this._weekdaysParse, llc);
return ii !== -1 ? ii : null;
} else if (format === 'ddd') {
ii = indexOf.call(this._shortWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
}
} else {
if (format === 'dddd') {
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else if (format === 'ddd') {
ii = indexOf.call(this._shortWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._minWeekdaysParse, llc);
return ii !== -1 ? ii : null;
} else {
ii = indexOf.call(this._minWeekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._weekdaysParse, llc);
if (ii !== -1) {
return ii;
}
ii = indexOf.call(this._shortWeekdaysParse, llc);
return ii !== -1 ? ii : null;
}
}
}
function localeWeekdaysParse (weekdayName, format, strict) {
var i, mom, regex;
if (this._weekdaysParseExact) {
return handleStrictParse$1.call(this, weekdayName, format, strict);
}
if (!this._weekdaysParse) {
this._weekdaysParse = [];
this._minWeekdaysParse = [];
this._shortWeekdaysParse = [];
this._fullWeekdaysParse = [];
}
for (i = 0; i < 7; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, 1]).day(i);
if (strict && !this._fullWeekdaysParse[i]) {
this._fullWeekdaysParse[i] = new RegExp('^' + this.weekdays(mom, '').replace('.', '\\.?') + '$', 'i');
this._shortWeekdaysParse[i] = new RegExp('^' + this.weekdaysShort(mom, '').replace('.', '\\.?') + '$', 'i');
this._minWeekdaysParse[i] = new RegExp('^' + this.weekdaysMin(mom, '').replace('.', '\\.?') + '$', 'i');
}
if (!this._weekdaysParse[i]) {
regex = '^' + this.weekdays(mom, '') + '|^' + this.weekdaysShort(mom, '') + '|^' + this.weekdaysMin(mom, '');
this._weekdaysParse[i] = new RegExp(regex.replace('.', ''), 'i');
}
// test the regex
if (strict && format === 'dddd' && this._fullWeekdaysParse[i].test(weekdayName)) {
return i;
} else if (strict && format === 'ddd' && this._shortWeekdaysParse[i].test(weekdayName)) {
return i;
} else if (strict && format === 'dd' && this._minWeekdaysParse[i].test(weekdayName)) {
return i;
} else if (!strict && this._weekdaysParse[i].test(weekdayName)) {
return i;
}
}
}
// MOMENTS
function getSetDayOfWeek (input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
var day = this._isUTC ? this._d.getUTCDay() : this._d.getDay();
if (input != null) {
input = parseWeekday(input, this.localeData());
return this.add(input - day, 'd');
} else {
return day;
}
}
function getSetLocaleDayOfWeek (input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
var weekday = (this.day() + 7 - this.localeData()._week.dow) % 7;
return input == null ? weekday : this.add(input - weekday, 'd');
}
function getSetISODayOfWeek (input) {
if (!this.isValid()) {
return input != null ? this : NaN;
}
// behaves the same as moment#day except
// as a getter, returns 7 instead of 0 (1-7 range instead of 0-6)
// as a setter, sunday should belong to the previous week.
if (input != null) {
var weekday = parseIsoWeekday(input, this.localeData());
return this.day(this.day() % 7 ? weekday : weekday - 7);
} else {
return this.day() || 7;
}
}
var defaultWeekdaysRegex = matchWord;
function weekdaysRegex (isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysStrictRegex;
} else {
return this._weekdaysRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysRegex')) {
this._weekdaysRegex = defaultWeekdaysRegex;
}
return this._weekdaysStrictRegex && isStrict ?
this._weekdaysStrictRegex : this._weekdaysRegex;
}
}
var defaultWeekdaysShortRegex = matchWord;
function weekdaysShortRegex (isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysShortStrictRegex;
} else {
return this._weekdaysShortRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysShortRegex')) {
this._weekdaysShortRegex = defaultWeekdaysShortRegex;
}
return this._weekdaysShortStrictRegex && isStrict ?
this._weekdaysShortStrictRegex : this._weekdaysShortRegex;
}
}
var defaultWeekdaysMinRegex = matchWord;
function weekdaysMinRegex (isStrict) {
if (this._weekdaysParseExact) {
if (!hasOwnProp(this, '_weekdaysRegex')) {
computeWeekdaysParse.call(this);
}
if (isStrict) {
return this._weekdaysMinStrictRegex;
} else {
return this._weekdaysMinRegex;
}
} else {
if (!hasOwnProp(this, '_weekdaysMinRegex')) {
this._weekdaysMinRegex = defaultWeekdaysMinRegex;
}
return this._weekdaysMinStrictRegex && isStrict ?
this._weekdaysMinStrictRegex : this._weekdaysMinRegex;
}
}
function computeWeekdaysParse () {
function cmpLenRev(a, b) {
return b.length - a.length;
}
var minPieces = [], shortPieces = [], longPieces = [], mixedPieces = [],
i, mom, minp, shortp, longp;
for (i = 0; i < 7; i++) {
// make the regex if we don't have it already
mom = createUTC([2000, 1]).day(i);
minp = this.weekdaysMin(mom, '');
shortp = this.weekdaysShort(mom, '');
longp = this.weekdays(mom, '');
minPieces.push(minp);
shortPieces.push(shortp);
longPieces.push(longp);
mixedPieces.push(minp);
mixedPieces.push(shortp);
mixedPieces.push(longp);
}
// Sorting makes sure if one weekday (or abbr) is a prefix of another it
// will match the longer piece.
minPieces.sort(cmpLenRev);
shortPieces.sort(cmpLenRev);
longPieces.sort(cmpLenRev);
mixedPieces.sort(cmpLenRev);
for (i = 0; i < 7; i++) {
shortPieces[i] = regexEscape(shortPieces[i]);
longPieces[i] = regexEscape(longPieces[i]);
mixedPieces[i] = regexEscape(mixedPieces[i]);
}
this._weekdaysRegex = new RegExp('^(' + mixedPieces.join('|') + ')', 'i');
this._weekdaysShortRegex = this._weekdaysRegex;
this._weekdaysMinRegex = this._weekdaysRegex;
this._weekdaysStrictRegex = new RegExp('^(' + longPieces.join('|') + ')', 'i');
this._weekdaysShortStrictRegex = new RegExp('^(' + shortPieces.join('|') + ')', 'i');
this._weekdaysMinStrictRegex = new RegExp('^(' + minPieces.join('|') + ')', 'i');
}
// FORMATTING
function hFormat() {
return this.hours() % 12 || 12;
}
function kFormat() {
return this.hours() || 24;
}
addFormatToken('H', ['HH', 2], 0, 'hour');
addFormatToken('h', ['hh', 2], 0, hFormat);
addFormatToken('k', ['kk', 2], 0, kFormat);
addFormatToken('hmm', 0, 0, function () {
return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2);
});
addFormatToken('hmmss', 0, 0, function () {
return '' + hFormat.apply(this) + zeroFill(this.minutes(), 2) +
zeroFill(this.seconds(), 2);
});
addFormatToken('Hmm', 0, 0, function () {
return '' + this.hours() + zeroFill(this.minutes(), 2);
});
addFormatToken('Hmmss', 0, 0, function () {
return '' + this.hours() + zeroFill(this.minutes(), 2) +
zeroFill(this.seconds(), 2);
});
function meridiem (token, lowercase) {
addFormatToken(token, 0, 0, function () {
return this.localeData().meridiem(this.hours(), this.minutes(), lowercase);
});
}
meridiem('a', true);
meridiem('A', false);
// ALIASES
addUnitAlias('hour', 'h');
// PRIORITY
addUnitPriority('hour', 13);
// PARSING
function matchMeridiem (isStrict, locale) {
return locale._meridiemParse;
}
addRegexToken('a', matchMeridiem);
addRegexToken('A', matchMeridiem);
addRegexToken('H', match1to2);
addRegexToken('h', match1to2);
addRegexToken('k', match1to2);
addRegexToken('HH', match1to2, match2);
addRegexToken('hh', match1to2, match2);
addRegexToken('kk', match1to2, match2);
addRegexToken('hmm', match3to4);
addRegexToken('hmmss', match5to6);
addRegexToken('Hmm', match3to4);
addRegexToken('Hmmss', match5to6);
addParseToken(['H', 'HH'], HOUR);
addParseToken(['k', 'kk'], function (input, array, config) {
var kInput = toInt(input);
array[HOUR] = kInput === 24 ? 0 : kInput;
});
addParseToken(['a', 'A'], function (input, array, config) {
config._isPm = config._locale.isPM(input);
config._meridiem = input;
});
addParseToken(['h', 'hh'], function (input, array, config) {
array[HOUR] = toInt(input);
getParsingFlags(config).bigHour = true;
});
addParseToken('hmm', function (input, array, config) {
var pos = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos));
array[MINUTE] = toInt(input.substr(pos));
getParsingFlags(config).bigHour = true;
});
addParseToken('hmmss', function (input, array, config) {
var pos1 = input.length - 4;
var pos2 = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos1));
array[MINUTE] = toInt(input.substr(pos1, 2));
array[SECOND] = toInt(input.substr(pos2));
getParsingFlags(config).bigHour = true;
});
addParseToken('Hmm', function (input, array, config) {
var pos = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos));
array[MINUTE] = toInt(input.substr(pos));
});
addParseToken('Hmmss', function (input, array, config) {
var pos1 = input.length - 4;
var pos2 = input.length - 2;
array[HOUR] = toInt(input.substr(0, pos1));
array[MINUTE] = toInt(input.substr(pos1, 2));
array[SECOND] = toInt(input.substr(pos2));
});
// LOCALES
function localeIsPM (input) {
// IE8 Quirks Mode & IE7 Standards Mode do not allow accessing strings like arrays
// Using charAt should be more compatible.
return ((input + '').toLowerCase().charAt(0) === 'p');
}
var defaultLocaleMeridiemParse = /[ap]\.?m?\.?/i;
function localeMeridiem (hours, minutes, isLower) {
if (hours > 11) {
return isLower ? 'pm' : 'PM';
} else {
return isLower ? 'am' : 'AM';
}
}
// MOMENTS
// Setting the hour should keep the time, because the user explicitly
// specified which hour they want. So trying to maintain the same hour (in
// a new timezone) makes sense. Adding/subtracting hours does not follow
// this rule.
var getSetHour = makeGetSet('Hours', true);
var baseConfig = {
calendar: defaultCalendar,
longDateFormat: defaultLongDateFormat,
invalidDate: defaultInvalidDate,
ordinal: defaultOrdinal,
dayOfMonthOrdinalParse: defaultDayOfMonthOrdinalParse,
relativeTime: defaultRelativeTime,
months: defaultLocaleMonths,
monthsShort: defaultLocaleMonthsShort,
week: defaultLocaleWeek,
weekdays: defaultLocaleWeekdays,
weekdaysMin: defaultLocaleWeekdaysMin,
weekdaysShort: defaultLocaleWeekdaysShort,
meridiemParse: defaultLocaleMeridiemParse
};
// internal storage for locale config files
var locales = {};
var localeFamilies = {};
var globalLocale;
function normalizeLocale(key) {
return key ? key.toLowerCase().replace('_', '-') : key;
}
// pick the locale from the array
// try ['en-au', 'en-gb'] as 'en-au', 'en-gb', 'en', as in move through the list trying each
// substring from most specific to least, but move to the next array item if it's a more specific variant than the current root
function chooseLocale(names) {
var i = 0, j, next, locale, split;
while (i < names.length) {
split = normalizeLocale(names[i]).split('-');
j = split.length;
next = normalizeLocale(names[i + 1]);
next = next ? next.split('-') : null;
while (j > 0) {
locale = loadLocale(split.slice(0, j).join('-'));
if (locale) {
return locale;
}
if (next && next.length >= j && compareArrays(split, next, true) >= j - 1) {
//the next array item is better than a shallower substring of this one
break;
}
j--;
}
i++;
}
return globalLocale;
}
function loadLocale(name) {
var oldLocale = null;
// TODO: Find a better way to register and load all the locales in Node
if (!locales[name] && (typeof module !== 'undefined') &&
module && module.exports) {
try {
oldLocale = globalLocale._abbr;
var aliasedRequire = require;
!(function webpackMissingModule() { var e = new Error("Cannot find module \"./locale\""); e.code = 'MODULE_NOT_FOUND'; throw e; }());
getSetGlobalLocale(oldLocale);
} catch (e) {}
}
return locales[name];
}
// This function will load locale and then set the global locale. If
// no arguments are passed in, it will simply return the current global
// locale key.
function getSetGlobalLocale (key, values) {
var data;
if (key) {
if (isUndefined(values)) {
data = getLocale(key);
}
else {
data = defineLocale(key, values);
}
if (data) {
// moment.duration._locale = moment._locale = data;
globalLocale = data;
}
else {
if ((typeof console !== 'undefined') && console.warn) {
//warn user if arguments are passed but the locale could not be set
console.warn('Locale ' + key + ' not found. Did you forget to load it?');
}
}
}
return globalLocale._abbr;
}
function defineLocale (name, config) {
if (config !== null) {
var locale, parentConfig = baseConfig;
config.abbr = name;
if (locales[name] != null) {
deprecateSimple('defineLocaleOverride',
'use moment.updateLocale(localeName, config) to change ' +
'an existing locale. moment.defineLocale(localeName, ' +
'config) should only be used for creating a new locale ' +
'See http://momentjs.com/guides/#/warnings/define-locale/ for more info.');
parentConfig = locales[name]._config;
} else if (config.parentLocale != null) {
if (locales[config.parentLocale] != null) {
parentConfig = locales[config.parentLocale]._config;
} else {
locale = loadLocale(config.parentLocale);
if (locale != null) {
parentConfig = locale._config;
} else {
if (!localeFamilies[config.parentLocale]) {
localeFamilies[config.parentLocale] = [];
}
localeFamilies[config.parentLocale].push({
name: name,
config: config
});
return null;
}
}
}
locales[name] = new Locale(mergeConfigs(parentConfig, config));
if (localeFamilies[name]) {
localeFamilies[name].forEach(function (x) {
defineLocale(x.name, x.config);
});
}
// backwards compat for now: also set the locale
// make sure we set the locale AFTER all child locales have been
// created, so we won't end up with the child locale set.
getSetGlobalLocale(name);
return locales[name];
} else {
// useful for testing
delete locales[name];
return null;
}
}
function updateLocale(name, config) {
if (config != null) {
var locale, tmpLocale, parentConfig = baseConfig;
// MERGE
tmpLocale = loadLocale(name);
if (tmpLocale != null) {
parentConfig = tmpLocale._config;
}
config = mergeConfigs(parentConfig, config);
locale = new Locale(config);
locale.parentLocale = locales[name];
locales[name] = locale;
// backwards compat for now: also set the locale
getSetGlobalLocale(name);
} else {
// pass null for config to unupdate, useful for tests
if (locales[name] != null) {
if (locales[name].parentLocale != null) {
locales[name] = locales[name].parentLocale;
} else if (locales[name] != null) {
delete locales[name];
}
}
}
return locales[name];
}
// returns locale data
function getLocale (key) {
var locale;
if (key && key._locale && key._locale._abbr) {
key = key._locale._abbr;
}
if (!key) {
return globalLocale;
}
if (!isArray(key)) {
//short-circuit everything else
locale = loadLocale(key);
if (locale) {
return locale;
}
key = [key];
}
return chooseLocale(key);
}
function listLocales() {
return keys(locales);
}
function checkOverflow (m) {
var overflow;
var a = m._a;
if (a && getParsingFlags(m).overflow === -2) {
overflow =
a[MONTH] < 0 || a[MONTH] > 11 ? MONTH :
a[DATE] < 1 || a[DATE] > daysInMonth(a[YEAR], a[MONTH]) ? DATE :
a[HOUR] < 0 || a[HOUR] > 24 || (a[HOUR] === 24 && (a[MINUTE] !== 0 || a[SECOND] !== 0 || a[MILLISECOND] !== 0)) ? HOUR :
a[MINUTE] < 0 || a[MINUTE] > 59 ? MINUTE :
a[SECOND] < 0 || a[SECOND] > 59 ? SECOND :
a[MILLISECOND] < 0 || a[MILLISECOND] > 999 ? MILLISECOND :
-1;
if (getParsingFlags(m)._overflowDayOfYear && (overflow < YEAR || overflow > DATE)) {
overflow = DATE;
}
if (getParsingFlags(m)._overflowWeeks && overflow === -1) {
overflow = WEEK;
}
if (getParsingFlags(m)._overflowWeekday && overflow === -1) {
overflow = WEEKDAY;
}
getParsingFlags(m).overflow = overflow;
}
return m;
}
// Pick the first defined of two or three arguments.
function defaults(a, b, c) {
if (a != null) {
return a;
}
if (b != null) {
return b;
}
return c;
}
function currentDateArray(config) {
// hooks is actually the exported moment object
var nowValue = new Date(hooks.now());
if (config._useUTC) {
return [nowValue.getUTCFullYear(), nowValue.getUTCMonth(), nowValue.getUTCDate()];
}
return [nowValue.getFullYear(), nowValue.getMonth(), nowValue.getDate()];
}
// convert an array to a date.
// the array should mirror the parameters below
// note: all values past the year are optional and will default to the lowest possible value.
// [year, month, day , hour, minute, second, millisecond]
function configFromArray (config) {
var i, date, input = [], currentDate, expectedWeekday, yearToUse;
if (config._d) {
return;
}
currentDate = currentDateArray(config);
//compute day of the year from weeks and weekdays
if (config._w && config._a[DATE] == null && config._a[MONTH] == null) {
dayOfYearFromWeekInfo(config);
}
//if the day of the year is set, figure out what it is
if (config._dayOfYear != null) {
yearToUse = defaults(config._a[YEAR], currentDate[YEAR]);
if (config._dayOfYear > daysInYear(yearToUse) || config._dayOfYear === 0) {
getParsingFlags(config)._overflowDayOfYear = true;
}
date = createUTCDate(yearToUse, 0, config._dayOfYear);
config._a[MONTH] = date.getUTCMonth();
config._a[DATE] = date.getUTCDate();
}
// Default to current date.
// * if no year, month, day of month are given, default to today
// * if day of month is given, default month and year
// * if month is given, default only year
// * if year is given, don't default anything
for (i = 0; i < 3 && config._a[i] == null; ++i) {
config._a[i] = input[i] = currentDate[i];
}
// Zero out whatever was not defaulted, including time
for (; i < 7; i++) {
config._a[i] = input[i] = (config._a[i] == null) ? (i === 2 ? 1 : 0) : config._a[i];
}
// Check for 24:00:00.000
if (config._a[HOUR] === 24 &&
config._a[MINUTE] === 0 &&
config._a[SECOND] === 0 &&
config._a[MILLISECOND] === 0) {
config._nextDay = true;
config._a[HOUR] = 0;
}
config._d = (config._useUTC ? createUTCDate : createDate).apply(null, input);
expectedWeekday = config._useUTC ? config._d.getUTCDay() : config._d.getDay();
// Apply timezone offset from input. The actual utcOffset can be changed
// with parseZone.
if (config._tzm != null) {
config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);
}
if (config._nextDay) {
config._a[HOUR] = 24;
}
// check for mismatching day of week
if (config._w && typeof config._w.d !== 'undefined' && config._w.d !== expectedWeekday) {
getParsingFlags(config).weekdayMismatch = true;
}
}
function dayOfYearFromWeekInfo(config) {
var w, weekYear, week, weekday, dow, doy, temp, weekdayOverflow;
w = config._w;
if (w.GG != null || w.W != null || w.E != null) {
dow = 1;
doy = 4;
// TODO: We need to take the current isoWeekYear, but that depends on
// how we interpret now (local, utc, fixed offset). So create
// a now version of current config (take local/utc/offset flags, and
// create now).
weekYear = defaults(w.GG, config._a[YEAR], weekOfYear(createLocal(), 1, 4).year);
week = defaults(w.W, 1);
weekday = defaults(w.E, 1);
if (weekday < 1 || weekday > 7) {
weekdayOverflow = true;
}
} else {
dow = config._locale._week.dow;
doy = config._locale._week.doy;
var curWeek = weekOfYear(createLocal(), dow, doy);
weekYear = defaults(w.gg, config._a[YEAR], curWeek.year);
// Default to current week.
week = defaults(w.w, curWeek.week);
if (w.d != null) {
// weekday -- low day numbers are considered next week
weekday = w.d;
if (weekday < 0 || weekday > 6) {
weekdayOverflow = true;
}
} else if (w.e != null) {
// local weekday -- counting starts from begining of week
weekday = w.e + dow;
if (w.e < 0 || w.e > 6) {
weekdayOverflow = true;
}
} else {
// default to begining of week
weekday = dow;
}
}
if (week < 1 || week > weeksInYear(weekYear, dow, doy)) {
getParsingFlags(config)._overflowWeeks = true;
} else if (weekdayOverflow != null) {
getParsingFlags(config)._overflowWeekday = true;
} else {
temp = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy);
config._a[YEAR] = temp.year;
config._dayOfYear = temp.dayOfYear;
}
}
// iso 8601 regex
// 0000-00-00 0000-W00 or 0000-W00-0 + T + 00 or 00:00 or 00:00:00 or 00:00:00.000 + +00:00 or +0000 or +00)
var extendedIsoRegex = /^\s*((?:[+-]\d{6}|\d{4})-(?:\d\d-\d\d|W\d\d-\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?::\d\d(?::\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/;
var basicIsoRegex = /^\s*((?:[+-]\d{6}|\d{4})(?:\d\d\d\d|W\d\d\d|W\d\d|\d\d\d|\d\d))(?:(T| )(\d\d(?:\d\d(?:\d\d(?:[.,]\d+)?)?)?)([\+\-]\d\d(?::?\d\d)?|\s*Z)?)?$/;
var tzRegex = /Z|[+-]\d\d(?::?\d\d)?/;
var isoDates = [
['YYYYYY-MM-DD', /[+-]\d{6}-\d\d-\d\d/],
['YYYY-MM-DD', /\d{4}-\d\d-\d\d/],
['GGGG-[W]WW-E', /\d{4}-W\d\d-\d/],
['GGGG-[W]WW', /\d{4}-W\d\d/, false],
['YYYY-DDD', /\d{4}-\d{3}/],
['YYYY-MM', /\d{4}-\d\d/, false],
['YYYYYYMMDD', /[+-]\d{10}/],
['YYYYMMDD', /\d{8}/],
// YYYYMM is NOT allowed by the standard
['GGGG[W]WWE', /\d{4}W\d{3}/],
['GGGG[W]WW', /\d{4}W\d{2}/, false],
['YYYYDDD', /\d{7}/]
];
// iso time formats and regexes
var isoTimes = [
['HH:mm:ss.SSSS', /\d\d:\d\d:\d\d\.\d+/],
['HH:mm:ss,SSSS', /\d\d:\d\d:\d\d,\d+/],
['HH:mm:ss', /\d\d:\d\d:\d\d/],
['HH:mm', /\d\d:\d\d/],
['HHmmss.SSSS', /\d\d\d\d\d\d\.\d+/],
['HHmmss,SSSS', /\d\d\d\d\d\d,\d+/],
['HHmmss', /\d\d\d\d\d\d/],
['HHmm', /\d\d\d\d/],
['HH', /\d\d/]
];
var aspNetJsonRegex = /^\/?Date\((\-?\d+)/i;
// date from iso format
function configFromISO(config) {
var i, l,
string = config._i,
match = extendedIsoRegex.exec(string) || basicIsoRegex.exec(string),
allowTime, dateFormat, timeFormat, tzFormat;
if (match) {
getParsingFlags(config).iso = true;
for (i = 0, l = isoDates.length; i < l; i++) {
if (isoDates[i][1].exec(match[1])) {
dateFormat = isoDates[i][0];
allowTime = isoDates[i][2] !== false;
break;
}
}
if (dateFormat == null) {
config._isValid = false;
return;
}
if (match[3]) {
for (i = 0, l = isoTimes.length; i < l; i++) {
if (isoTimes[i][1].exec(match[3])) {
// match[2] should be 'T' or space
timeFormat = (match[2] || ' ') + isoTimes[i][0];
break;
}
}
if (timeFormat == null) {
config._isValid = false;
return;
}
}
if (!allowTime && timeFormat != null) {
config._isValid = false;
return;
}
if (match[4]) {
if (tzRegex.exec(match[4])) {
tzFormat = 'Z';
} else {
config._isValid = false;
return;
}
}
config._f = dateFormat + (timeFormat || '') + (tzFormat || '');
configFromStringAndFormat(config);
} else {
config._isValid = false;
}
}
// RFC 2822 regex: For details see https://tools.ietf.org/html/rfc2822#section-3.3
var rfc2822 = /^(?:(Mon|Tue|Wed|Thu|Fri|Sat|Sun),?\s)?(\d{1,2})\s(Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)\s(\d{2,4})\s(\d\d):(\d\d)(?::(\d\d))?\s(?:(UT|GMT|[ECMP][SD]T)|([Zz])|([+-]\d{4}))$/;
function extractFromRFC2822Strings(yearStr, monthStr, dayStr, hourStr, minuteStr, secondStr) {
var result = [
untruncateYear(yearStr),
defaultLocaleMonthsShort.indexOf(monthStr),
parseInt(dayStr, 10),
parseInt(hourStr, 10),
parseInt(minuteStr, 10)
];
if (secondStr) {
result.push(parseInt(secondStr, 10));
}
return result;
}
function untruncateYear(yearStr) {
var year = parseInt(yearStr, 10);
if (year <= 49) {
return 2000 + year;
} else if (year <= 999) {
return 1900 + year;
}
return year;
}
function preprocessRFC2822(s) {
// Remove comments and folding whitespace and replace multiple-spaces with a single space
return s.replace(/\([^)]*\)|[\n\t]/g, ' ').replace(/(\s\s+)/g, ' ').replace(/^\s\s*/, '').replace(/\s\s*$/, '');
}
function checkWeekday(weekdayStr, parsedInput, config) {
if (weekdayStr) {
// TODO: Replace the vanilla JS Date object with an indepentent day-of-week check.
var weekdayProvided = defaultLocaleWeekdaysShort.indexOf(weekdayStr),
weekdayActual = new Date(parsedInput[0], parsedInput[1], parsedInput[2]).getDay();
if (weekdayProvided !== weekdayActual) {
getParsingFlags(config).weekdayMismatch = true;
config._isValid = false;
return false;
}
}
return true;
}
var obsOffsets = {
UT: 0,
GMT: 0,
EDT: -4 * 60,
EST: -5 * 60,
CDT: -5 * 60,
CST: -6 * 60,
MDT: -6 * 60,
MST: -7 * 60,
PDT: -7 * 60,
PST: -8 * 60
};
function calculateOffset(obsOffset, militaryOffset, numOffset) {
if (obsOffset) {
return obsOffsets[obsOffset];
} else if (militaryOffset) {
// the only allowed military tz is Z
return 0;
} else {
var hm = parseInt(numOffset, 10);
var m = hm % 100, h = (hm - m) / 100;
return h * 60 + m;
}
}
// date and time from ref 2822 format
function configFromRFC2822(config) {
var match = rfc2822.exec(preprocessRFC2822(config._i));
if (match) {
var parsedArray = extractFromRFC2822Strings(match[4], match[3], match[2], match[5], match[6], match[7]);
if (!checkWeekday(match[1], parsedArray, config)) {
return;
}
config._a = parsedArray;
config._tzm = calculateOffset(match[8], match[9], match[10]);
config._d = createUTCDate.apply(null, config._a);
config._d.setUTCMinutes(config._d.getUTCMinutes() - config._tzm);
getParsingFlags(config).rfc2822 = true;
} else {
config._isValid = false;
}
}
// date from iso format or fallback
function configFromString(config) {
var matched = aspNetJsonRegex.exec(config._i);
if (matched !== null) {
config._d = new Date(+matched[1]);
return;
}
configFromISO(config);
if (config._isValid === false) {
delete config._isValid;
} else {
return;
}
configFromRFC2822(config);
if (config._isValid === false) {
delete config._isValid;
} else {
return;
}
// Final attempt, use Input Fallback
hooks.createFromInputFallback(config);
}
hooks.createFromInputFallback = deprecate(
'value provided is not in a recognized RFC2822 or ISO format. moment construction falls back to js Date(), ' +
'which is not reliable across all browsers and versions. Non RFC2822/ISO date formats are ' +
'discouraged and will be removed in an upcoming major release. Please refer to ' +
'http://momentjs.com/guides/#/warnings/js-date/ for more info.',
function (config) {
config._d = new Date(config._i + (config._useUTC ? ' UTC' : ''));
}
);
// constant that refers to the ISO standard
hooks.ISO_8601 = function () {};
// constant that refers to the RFC 2822 form
hooks.RFC_2822 = function () {};
// date from string and format string
function configFromStringAndFormat(config) {
// TODO: Move this to another part of the creation flow to prevent circular deps
if (config._f === hooks.ISO_8601) {
configFromISO(config);
return;
}
if (config._f === hooks.RFC_2822) {
configFromRFC2822(config);
return;
}
config._a = [];
getParsingFlags(config).empty = true;
// This array is used to make a Date, either with `new Date` or `Date.UTC`
var string = '' + config._i,
i, parsedInput, tokens, token, skipped,
stringLength = string.length,
totalParsedInputLength = 0;
tokens = expandFormat(config._f, config._locale).match(formattingTokens) || [];
for (i = 0; i < tokens.length; i++) {
token = tokens[i];
parsedInput = (string.match(getParseRegexForToken(token, config)) || [])[0];
// console.log('token', token, 'parsedInput', parsedInput,
// 'regex', getParseRegexForToken(token, config));
if (parsedInput) {
skipped = string.substr(0, string.indexOf(parsedInput));
if (skipped.length > 0) {
getParsingFlags(config).unusedInput.push(skipped);
}
string = string.slice(string.indexOf(parsedInput) + parsedInput.length);
totalParsedInputLength += parsedInput.length;
}
// don't parse if it's not a known token
if (formatTokenFunctions[token]) {
if (parsedInput) {
getParsingFlags(config).empty = false;
}
else {
getParsingFlags(config).unusedTokens.push(token);
}
addTimeToArrayFromToken(token, parsedInput, config);
}
else if (config._strict && !parsedInput) {
getParsingFlags(config).unusedTokens.push(token);
}
}
// add remaining unparsed input length to the string
getParsingFlags(config).charsLeftOver = stringLength - totalParsedInputLength;
if (string.length > 0) {
getParsingFlags(config).unusedInput.push(string);
}
// clear _12h flag if hour is <= 12
if (config._a[HOUR] <= 12 &&
getParsingFlags(config).bigHour === true &&
config._a[HOUR] > 0) {
getParsingFlags(config).bigHour = undefined;
}
getParsingFlags(config).parsedDateParts = config._a.slice(0);
getParsingFlags(config).meridiem = config._meridiem;
// handle meridiem
config._a[HOUR] = meridiemFixWrap(config._locale, config._a[HOUR], config._meridiem);
configFromArray(config);
checkOverflow(config);
}
function meridiemFixWrap (locale, hour, meridiem) {
var isPm;
if (meridiem == null) {
// nothing to do
return hour;
}
if (locale.meridiemHour != null) {
return locale.meridiemHour(hour, meridiem);
} else if (locale.isPM != null) {
// Fallback
isPm = locale.isPM(meridiem);
if (isPm && hour < 12) {
hour += 12;
}
if (!isPm && hour === 12) {
hour = 0;
}
return hour;
} else {
// this is not supposed to happen
return hour;
}
}
// date from string and array of format strings
function configFromStringAndArray(config) {
var tempConfig,
bestMoment,
scoreToBeat,
i,
currentScore;
if (config._f.length === 0) {
getParsingFlags(config).invalidFormat = true;
config._d = new Date(NaN);
return;
}
for (i = 0; i < config._f.length; i++) {
currentScore = 0;
tempConfig = copyConfig({}, config);
if (config._useUTC != null) {
tempConfig._useUTC = config._useUTC;
}
tempConfig._f = config._f[i];
configFromStringAndFormat(tempConfig);
if (!isValid(tempConfig)) {
continue;
}
// if there is any input that was not parsed add a penalty for that format
currentScore += getParsingFlags(tempConfig).charsLeftOver;
//or tokens
currentScore += getParsingFlags(tempConfig).unusedTokens.length * 10;
getParsingFlags(tempConfig).score = currentScore;
if (scoreToBeat == null || currentScore < scoreToBeat) {
scoreToBeat = currentScore;
bestMoment = tempConfig;
}
}
extend(config, bestMoment || tempConfig);
}
function configFromObject(config) {
if (config._d) {
return;
}
var i = normalizeObjectUnits(config._i);
config._a = map([i.year, i.month, i.day || i.date, i.hour, i.minute, i.second, i.millisecond], function (obj) {
return obj && parseInt(obj, 10);
});
configFromArray(config);
}
function createFromConfig (config) {
var res = new Moment(checkOverflow(prepareConfig(config)));
if (res._nextDay) {
// Adding is smart enough around DST
res.add(1, 'd');
res._nextDay = undefined;
}
return res;
}
function prepareConfig (config) {
var input = config._i,
format = config._f;
config._locale = config._locale || getLocale(config._l);
if (input === null || (format === undefined && input === '')) {
return createInvalid({nullInput: true});
}
if (typeof input === 'string') {
config._i = input = config._locale.preparse(input);
}
if (isMoment(input)) {
return new Moment(checkOverflow(input));
} else if (isDate(input)) {
config._d = input;
} else if (isArray(format)) {
configFromStringAndArray(config);
} else if (format) {
configFromStringAndFormat(config);
} else {
configFromInput(config);
}
if (!isValid(config)) {
config._d = null;
}
return config;
}
function configFromInput(config) {
var input = config._i;
if (isUndefined(input)) {
config._d = new Date(hooks.now());
} else if (isDate(input)) {
config._d = new Date(input.valueOf());
} else if (typeof input === 'string') {
configFromString(config);
} else if (isArray(input)) {
config._a = map(input.slice(0), function (obj) {
return parseInt(obj, 10);
});
configFromArray(config);
} else if (isObject(input)) {
configFromObject(config);
} else if (isNumber(input)) {
// from milliseconds
config._d = new Date(input);
} else {
hooks.createFromInputFallback(config);
}
}
function createLocalOrUTC (input, format, locale, strict, isUTC) {
var c = {};
if (locale === true || locale === false) {
strict = locale;
locale = undefined;
}
if ((isObject(input) && isObjectEmpty(input)) ||
(isArray(input) && input.length === 0)) {
input = undefined;
}
// object construction must be done this way.
// https://github.com/moment/moment/issues/1423
c._isAMomentObject = true;
c._useUTC = c._isUTC = isUTC;
c._l = locale;
c._i = input;
c._f = format;
c._strict = strict;
return createFromConfig(c);
}
function createLocal (input, format, locale, strict) {
return createLocalOrUTC(input, format, locale, strict, false);
}
var prototypeMin = deprecate(
'moment().min is deprecated, use moment.max instead. http://momentjs.com/guides/#/warnings/min-max/',
function () {
var other = createLocal.apply(null, arguments);
if (this.isValid() && other.isValid()) {
return other < this ? this : other;
} else {
return createInvalid();
}
}
);
var prototypeMax = deprecate(
'moment().max is deprecated, use moment.min instead. http://momentjs.com/guides/#/warnings/min-max/',
function () {
var other = createLocal.apply(null, arguments);
if (this.isValid() && other.isValid()) {
return other > this ? this : other;
} else {
return createInvalid();
}
}
);
// Pick a moment m from moments so that m[fn](other) is true for all
// other. This relies on the function fn to be transitive.
//
// moments should either be an array of moment objects or an array, whose
// first element is an array of moment objects.
function pickBy(fn, moments) {
var res, i;
if (moments.length === 1 && isArray(moments[0])) {
moments = moments[0];
}
if (!moments.length) {
return createLocal();
}
res = moments[0];
for (i = 1; i < moments.length; ++i) {
if (!moments[i].isValid() || moments[i][fn](res)) {
res = moments[i];
}
}
return res;
}
// TODO: Use [].sort instead?
function min () {
var args = [].slice.call(arguments, 0);
return pickBy('isBefore', args);
}
function max () {
var args = [].slice.call(arguments, 0);
return pickBy('isAfter', args);
}
var now = function () {
return Date.now ? Date.now() : +(new Date());
};
var ordering = ['year', 'quarter', 'month', 'week', 'day', 'hour', 'minute', 'second', 'millisecond'];
function isDurationValid(m) {
for (var key in m) {
if (!(indexOf.call(ordering, key) !== -1 && (m[key] == null || !isNaN(m[key])))) {
return false;
}
}
var unitHasDecimal = false;
for (var i = 0; i < ordering.length; ++i) {
if (m[ordering[i]]) {
if (unitHasDecimal) {
return false; // only allow non-integers for smallest unit
}
if (parseFloat(m[ordering[i]]) !== toInt(m[ordering[i]])) {
unitHasDecimal = true;
}
}
}
return true;
}
function isValid$1() {
return this._isValid;
}
function createInvalid$1() {
return createDuration(NaN);
}
function Duration (duration) {
var normalizedInput = normalizeObjectUnits(duration),
years = normalizedInput.year || 0,
quarters = normalizedInput.quarter || 0,
months = normalizedInput.month || 0,
weeks = normalizedInput.week || 0,
days = normalizedInput.day || 0,
hours = normalizedInput.hour || 0,
minutes = normalizedInput.minute || 0,
seconds = normalizedInput.second || 0,
milliseconds = normalizedInput.millisecond || 0;
this._isValid = isDurationValid(normalizedInput);
// representation for dateAddRemove
this._milliseconds = +milliseconds +
seconds * 1e3 + // 1000
minutes * 6e4 + // 1000 * 60
hours * 1000 * 60 * 60; //using 1000 * 60 * 60 instead of 36e5 to avoid floating point rounding errors https://github.com/moment/moment/issues/2978
// Because of dateAddRemove treats 24 hours as different from a
// day when working around DST, we need to store them separately
this._days = +days +
weeks * 7;
// It is impossible to translate months into days without knowing
// which months you are are talking about, so we have to store
// it separately.
this._months = +months +
quarters * 3 +
years * 12;
this._data = {};
this._locale = getLocale();
this._bubble();
}
function isDuration (obj) {
return obj instanceof Duration;
}
function absRound (number) {
if (number < 0) {
return Math.round(-1 * number) * -1;
} else {
return Math.round(number);
}
}
// FORMATTING
function offset (token, separator) {
addFormatToken(token, 0, 0, function () {
var offset = this.utcOffset();
var sign = '+';
if (offset < 0) {
offset = -offset;
sign = '-';
}
return sign + zeroFill(~~(offset / 60), 2) + separator + zeroFill(~~(offset) % 60, 2);
});
}
offset('Z', ':');
offset('ZZ', '');
// PARSING
addRegexToken('Z', matchShortOffset);
addRegexToken('ZZ', matchShortOffset);
addParseToken(['Z', 'ZZ'], function (input, array, config) {
config._useUTC = true;
config._tzm = offsetFromString(matchShortOffset, input);
});
// HELPERS
// timezone chunker
// '+10:00' > ['10', '00']
// '-1530' > ['-15', '30']
var chunkOffset = /([\+\-]|\d\d)/gi;
function offsetFromString(matcher, string) {
var matches = (string || '').match(matcher);
if (matches === null) {
return null;
}
var chunk = matches[matches.length - 1] || [];
var parts = (chunk + '').match(chunkOffset) || ['-', 0, 0];
var minutes = +(parts[1] * 60) + toInt(parts[2]);
return minutes === 0 ?
0 :
parts[0] === '+' ? minutes : -minutes;
}
// Return a moment from input, that is local/utc/zone equivalent to model.
function cloneWithOffset(input, model) {
var res, diff;
if (model._isUTC) {
res = model.clone();
diff = (isMoment(input) || isDate(input) ? input.valueOf() : createLocal(input).valueOf()) - res.valueOf();
// Use low-level api, because this fn is low-level api.
res._d.setTime(res._d.valueOf() + diff);
hooks.updateOffset(res, false);
return res;
} else {
return createLocal(input).local();
}
}
function getDateOffset (m) {
// On Firefox.24 Date#getTimezoneOffset returns a floating point.
// https://github.com/moment/moment/pull/1871
return -Math.round(m._d.getTimezoneOffset() / 15) * 15;
}
// HOOKS
// This function will be called whenever a moment is mutated.
// It is intended to keep the offset in sync with the timezone.
hooks.updateOffset = function () {};
// MOMENTS
// keepLocalTime = true means only change the timezone, without
// affecting the local hour. So 5:31:26 +0300 --[utcOffset(2, true)]-->
// 5:31:26 +0200 It is possible that 5:31:26 doesn't exist with offset
// +0200, so we adjust the time as needed, to be valid.
//
// Keeping the time actually adds/subtracts (one hour)
// from the actual represented time. That is why we call updateOffset
// a second time. In case it wants us to change the offset again
// _changeInProgress == true case, then we have to adjust, because
// there is no such time in the given timezone.
function getSetOffset (input, keepLocalTime, keepMinutes) {
var offset = this._offset || 0,
localAdjust;
if (!this.isValid()) {
return input != null ? this : NaN;
}
if (input != null) {
if (typeof input === 'string') {
input = offsetFromString(matchShortOffset, input);
if (input === null) {
return this;
}
} else if (Math.abs(input) < 16 && !keepMinutes) {
input = input * 60;
}
if (!this._isUTC && keepLocalTime) {
localAdjust = getDateOffset(this);
}
this._offset = input;
this._isUTC = true;
if (localAdjust != null) {
this.add(localAdjust, 'm');
}
if (offset !== input) {
if (!keepLocalTime || this._changeInProgress) {
addSubtract(this, createDuration(input - offset, 'm'), 1, false);
} else if (!this._changeInProgress) {
this._changeInProgress = true;
hooks.updateOffset(this, true);
this._changeInProgress = null;
}
}
return this;
} else {
return this._isUTC ? offset : getDateOffset(this);
}
}
function getSetZone (input, keepLocalTime) {
if (input != null) {
if (typeof input !== 'string') {
input = -input;
}
this.utcOffset(input, keepLocalTime);
return this;
} else {
return -this.utcOffset();
}
}
function setOffsetToUTC (keepLocalTime) {
return this.utcOffset(0, keepLocalTime);
}
function setOffsetToLocal (keepLocalTime) {
if (this._isUTC) {
this.utcOffset(0, keepLocalTime);
this._isUTC = false;
if (keepLocalTime) {
this.subtract(getDateOffset(this), 'm');
}
}
return this;
}
function setOffsetToParsedOffset () {
if (this._tzm != null) {
this.utcOffset(this._tzm, false, true);
} else if (typeof this._i === 'string') {
var tZone = offsetFromString(matchOffset, this._i);
if (tZone != null) {
this.utcOffset(tZone);
}
else {
this.utcOffset(0, true);
}
}
return this;
}
function hasAlignedHourOffset (input) {
if (!this.isValid()) {
return false;
}
input = input ? createLocal(input).utcOffset() : 0;
return (this.utcOffset() - input) % 60 === 0;
}
function isDaylightSavingTime () {
return (
this.utcOffset() > this.clone().month(0).utcOffset() ||
this.utcOffset() > this.clone().month(5).utcOffset()
);
}
function isDaylightSavingTimeShifted () {
if (!isUndefined(this._isDSTShifted)) {
return this._isDSTShifted;
}
var c = {};
copyConfig(c, this);
c = prepareConfig(c);
if (c._a) {
var other = c._isUTC ? createUTC(c._a) : createLocal(c._a);
this._isDSTShifted = this.isValid() &&
compareArrays(c._a, other.toArray()) > 0;
} else {
this._isDSTShifted = false;
}
return this._isDSTShifted;
}
function isLocal () {
return this.isValid() ? !this._isUTC : false;
}
function isUtcOffset () {
return this.isValid() ? this._isUTC : false;
}
function isUtc () {
return this.isValid() ? this._isUTC && this._offset === 0 : false;
}
// ASP.NET json date format regex
var aspNetRegex = /^(\-|\+)?(?:(\d*)[. ])?(\d+)\:(\d+)(?:\:(\d+)(\.\d*)?)?$/;
// from http://docs.closure-library.googlecode.com/git/closure_goog_date_date.js.source.html
// somewhat more in line with 4.4.3.2 2004 spec, but allows decimal anywhere
// and further modified to allow for strings containing both week and day
var isoRegex = /^(-|\+)?P(?:([-+]?[0-9,.]*)Y)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)W)?(?:([-+]?[0-9,.]*)D)?(?:T(?:([-+]?[0-9,.]*)H)?(?:([-+]?[0-9,.]*)M)?(?:([-+]?[0-9,.]*)S)?)?$/;
function createDuration (input, key) {
var duration = input,
// matching against regexp is expensive, do it on demand
match = null,
sign,
ret,
diffRes;
if (isDuration(input)) {
duration = {
ms : input._milliseconds,
d : input._days,
M : input._months
};
} else if (isNumber(input)) {
duration = {};
if (key) {
duration[key] = input;
} else {
duration.milliseconds = input;
}
} else if (!!(match = aspNetRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : 1;
duration = {
y : 0,
d : toInt(match[DATE]) * sign,
h : toInt(match[HOUR]) * sign,
m : toInt(match[MINUTE]) * sign,
s : toInt(match[SECOND]) * sign,
ms : toInt(absRound(match[MILLISECOND] * 1000)) * sign // the millisecond decimal point is included in the match
};
} else if (!!(match = isoRegex.exec(input))) {
sign = (match[1] === '-') ? -1 : (match[1] === '+') ? 1 : 1;
duration = {
y : parseIso(match[2], sign),
M : parseIso(match[3], sign),
w : parseIso(match[4], sign),
d : parseIso(match[5], sign),
h : parseIso(match[6], sign),
m : parseIso(match[7], sign),
s : parseIso(match[8], sign)
};
} else if (duration == null) {// checks for null or undefined
duration = {};
} else if (typeof duration === 'object' && ('from' in duration || 'to' in duration)) {
diffRes = momentsDifference(createLocal(duration.from), createLocal(duration.to));
duration = {};
duration.ms = diffRes.milliseconds;
duration.M = diffRes.months;
}
ret = new Duration(duration);
if (isDuration(input) && hasOwnProp(input, '_locale')) {
ret._locale = input._locale;
}
return ret;
}
createDuration.fn = Duration.prototype;
createDuration.invalid = createInvalid$1;
function parseIso (inp, sign) {
// We'd normally use ~~inp for this, but unfortunately it also
// converts floats to ints.
// inp may be undefined, so careful calling replace on it.
var res = inp && parseFloat(inp.replace(',', '.'));
// apply sign while we're at it
return (isNaN(res) ? 0 : res) * sign;
}
function positiveMomentsDifference(base, other) {
var res = {milliseconds: 0, months: 0};
res.months = other.month() - base.month() +
(other.year() - base.year()) * 12;
if (base.clone().add(res.months, 'M').isAfter(other)) {
--res.months;
}
res.milliseconds = +other - +(base.clone().add(res.months, 'M'));
return res;
}
function momentsDifference(base, other) {
var res;
if (!(base.isValid() && other.isValid())) {
return {milliseconds: 0, months: 0};
}
other = cloneWithOffset(other, base);
if (base.isBefore(other)) {
res = positiveMomentsDifference(base, other);
} else {
res = positiveMomentsDifference(other, base);
res.milliseconds = -res.milliseconds;
res.months = -res.months;
}
return res;
}
// TODO: remove 'name' arg after deprecation is removed
function createAdder(direction, name) {
return function (val, period) {
var dur, tmp;
//invert the arguments, but complain about it
if (period !== null && !isNaN(+period)) {
deprecateSimple(name, 'moment().' + name + '(period, number) is deprecated. Please use moment().' + name + '(number, period). ' +
'See http://momentjs.com/guides/#/warnings/add-inverted-param/ for more info.');
tmp = val; val = period; period = tmp;
}
val = typeof val === 'string' ? +val : val;
dur = createDuration(val, period);
addSubtract(this, dur, direction);
return this;
};
}
function addSubtract (mom, duration, isAdding, updateOffset) {
var milliseconds = duration._milliseconds,
days = absRound(duration._days),
months = absRound(duration._months);
if (!mom.isValid()) {
// No op
return;
}
updateOffset = updateOffset == null ? true : updateOffset;
if (months) {
setMonth(mom, get(mom, 'Month') + months * isAdding);
}
if (days) {
set$1(mom, 'Date', get(mom, 'Date') + days * isAdding);
}
if (milliseconds) {
mom._d.setTime(mom._d.valueOf() + milliseconds * isAdding);
}
if (updateOffset) {
hooks.updateOffset(mom, days || months);
}
}
var add = createAdder(1, 'add');
var subtract = createAdder(-1, 'subtract');
function getCalendarFormat(myMoment, now) {
var diff = myMoment.diff(now, 'days', true);
return diff < -6 ? 'sameElse' :
diff < -1 ? 'lastWeek' :
diff < 0 ? 'lastDay' :
diff < 1 ? 'sameDay' :
diff < 2 ? 'nextDay' :
diff < 7 ? 'nextWeek' : 'sameElse';
}
function calendar$1 (time, formats) {
// We want to compare the start of today, vs this.
// Getting start-of-today depends on whether we're local/utc/offset or not.
var now = time || createLocal(),
sod = cloneWithOffset(now, this).startOf('day'),
format = hooks.calendarFormat(this, sod) || 'sameElse';
var output = formats && (isFunction(formats[format]) ? formats[format].call(this, now) : formats[format]);
return this.format(output || this.localeData().calendar(format, this, createLocal(now)));
}
function clone () {
return new Moment(this);
}
function isAfter (input, units) {
var localInput = isMoment(input) ? input : createLocal(input);
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');
if (units === 'millisecond') {
return this.valueOf() > localInput.valueOf();
} else {
return localInput.valueOf() < this.clone().startOf(units).valueOf();
}
}
function isBefore (input, units) {
var localInput = isMoment(input) ? input : createLocal(input);
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(!isUndefined(units) ? units : 'millisecond');
if (units === 'millisecond') {
return this.valueOf() < localInput.valueOf();
} else {
return this.clone().endOf(units).valueOf() < localInput.valueOf();
}
}
function isBetween (from, to, units, inclusivity) {
inclusivity = inclusivity || '()';
return (inclusivity[0] === '(' ? this.isAfter(from, units) : !this.isBefore(from, units)) &&
(inclusivity[1] === ')' ? this.isBefore(to, units) : !this.isAfter(to, units));
}
function isSame (input, units) {
var localInput = isMoment(input) ? input : createLocal(input),
inputMs;
if (!(this.isValid() && localInput.isValid())) {
return false;
}
units = normalizeUnits(units || 'millisecond');
if (units === 'millisecond') {
return this.valueOf() === localInput.valueOf();
} else {
inputMs = localInput.valueOf();
return this.clone().startOf(units).valueOf() <= inputMs && inputMs <= this.clone().endOf(units).valueOf();
}
}
function isSameOrAfter (input, units) {
return this.isSame(input, units) || this.isAfter(input,units);
}
function isSameOrBefore (input, units) {
return this.isSame(input, units) || this.isBefore(input,units);
}
function diff (input, units, asFloat) {
var that,
zoneDelta,
output;
if (!this.isValid()) {
return NaN;
}
that = cloneWithOffset(input, this);
if (!that.isValid()) {
return NaN;
}
zoneDelta = (that.utcOffset() - this.utcOffset()) * 6e4;
units = normalizeUnits(units);
switch (units) {
case 'year': output = monthDiff(this, that) / 12; break;
case 'month': output = monthDiff(this, that); break;
case 'quarter': output = monthDiff(this, that) / 3; break;
case 'second': output = (this - that) / 1e3; break; // 1000
case 'minute': output = (this - that) / 6e4; break; // 1000 * 60
case 'hour': output = (this - that) / 36e5; break; // 1000 * 60 * 60
case 'day': output = (this - that - zoneDelta) / 864e5; break; // 1000 * 60 * 60 * 24, negate dst
case 'week': output = (this - that - zoneDelta) / 6048e5; break; // 1000 * 60 * 60 * 24 * 7, negate dst
default: output = this - that;
}
return asFloat ? output : absFloor(output);
}
function monthDiff (a, b) {
// difference in months
var wholeMonthDiff = ((b.year() - a.year()) * 12) + (b.month() - a.month()),
// b is in (anchor - 1 month, anchor + 1 month)
anchor = a.clone().add(wholeMonthDiff, 'months'),
anchor2, adjust;
if (b - anchor < 0) {
anchor2 = a.clone().add(wholeMonthDiff - 1, 'months');
// linear across the month
adjust = (b - anchor) / (anchor - anchor2);
} else {
anchor2 = a.clone().add(wholeMonthDiff + 1, 'months');
// linear across the month
adjust = (b - anchor) / (anchor2 - anchor);
}
//check for negative zero, return zero if negative zero
return -(wholeMonthDiff + adjust) || 0;
}
hooks.defaultFormat = 'YYYY-MM-DDTHH:mm:ssZ';
hooks.defaultFormatUtc = 'YYYY-MM-DDTHH:mm:ss[Z]';
function toString () {
return this.clone().locale('en').format('ddd MMM DD YYYY HH:mm:ss [GMT]ZZ');
}
function toISOString(keepOffset) {
if (!this.isValid()) {
return null;
}
var utc = keepOffset !== true;
var m = utc ? this.clone().utc() : this;
if (m.year() < 0 || m.year() > 9999) {
return formatMoment(m, utc ? 'YYYYYY-MM-DD[T]HH:mm:ss.SSS[Z]' : 'YYYYYY-MM-DD[T]HH:mm:ss.SSSZ');
}
if (isFunction(Date.prototype.toISOString)) {
// native implementation is ~50x faster, use it when we can
if (utc) {
return this.toDate().toISOString();
} else {
return new Date(this.valueOf() + this.utcOffset() * 60 * 1000).toISOString().replace('Z', formatMoment(m, 'Z'));
}
}
return formatMoment(m, utc ? 'YYYY-MM-DD[T]HH:mm:ss.SSS[Z]' : 'YYYY-MM-DD[T]HH:mm:ss.SSSZ');
}
/**
* Return a human readable representation of a moment that can
* also be evaluated to get a new moment which is the same
*
* @link https://nodejs.org/dist/latest/docs/api/util.html#util_custom_inspect_function_on_objects
*/
function inspect () {
if (!this.isValid()) {
return 'moment.invalid(/* ' + this._i + ' */)';
}
var func = 'moment';
var zone = '';
if (!this.isLocal()) {
func = this.utcOffset() === 0 ? 'moment.utc' : 'moment.parseZone';
zone = 'Z';
}
var prefix = '[' + func + '("]';
var year = (0 <= this.year() && this.year() <= 9999) ? 'YYYY' : 'YYYYYY';
var datetime = '-MM-DD[T]HH:mm:ss.SSS';
var suffix = zone + '[")]';
return this.format(prefix + year + datetime + suffix);
}
function format (inputString) {
if (!inputString) {
inputString = this.isUtc() ? hooks.defaultFormatUtc : hooks.defaultFormat;
}
var output = formatMoment(this, inputString);
return this.localeData().postformat(output);
}
function from (time, withoutSuffix) {
if (this.isValid() &&
((isMoment(time) && time.isValid()) ||
createLocal(time).isValid())) {
return createDuration({to: this, from: time}).locale(this.locale()).humanize(!withoutSuffix);
} else {
return this.localeData().invalidDate();
}
}
function fromNow (withoutSuffix) {
return this.from(createLocal(), withoutSuffix);
}
function to (time, withoutSuffix) {
if (this.isValid() &&
((isMoment(time) && time.isValid()) ||
createLocal(time).isValid())) {
return createDuration({from: this, to: time}).locale(this.locale()).humanize(!withoutSuffix);
} else {
return this.localeData().invalidDate();
}
}
function toNow (withoutSuffix) {
return this.to(createLocal(), withoutSuffix);
}
// If passed a locale key, it will set the locale for this
// instance. Otherwise, it will return the locale configuration
// variables for this instance.
function locale (key) {
var newLocaleData;
if (key === undefined) {
return this._locale._abbr;
} else {
newLocaleData = getLocale(key);
if (newLocaleData != null) {
this._locale = newLocaleData;
}
return this;
}
}
var lang = deprecate(
'moment().lang() is deprecated. Instead, use moment().localeData() to get the language configuration. Use moment().locale() to change languages.',
function (key) {
if (key === undefined) {
return this.localeData();
} else {
return this.locale(key);
}
}
);
function localeData () {
return this._locale;
}
function startOf (units) {
units = normalizeUnits(units);
// the following switch intentionally omits break keywords
// to utilize falling through the cases.
switch (units) {
case 'year':
this.month(0);
/* falls through */
case 'quarter':
case 'month':
this.date(1);
/* falls through */
case 'week':
case 'isoWeek':
case 'day':
case 'date':
this.hours(0);
/* falls through */
case 'hour':
this.minutes(0);
/* falls through */
case 'minute':
this.seconds(0);
/* falls through */
case 'second':
this.milliseconds(0);
}
// weeks are a special case
if (units === 'week') {
this.weekday(0);
}
if (units === 'isoWeek') {
this.isoWeekday(1);
}
// quarters are also special
if (units === 'quarter') {
this.month(Math.floor(this.month() / 3) * 3);
}
return this;
}
function endOf (units) {
units = normalizeUnits(units);
if (units === undefined || units === 'millisecond') {
return this;
}
// 'date' is an alias for 'day', so it should be considered as such.
if (units === 'date') {
units = 'day';
}
return this.startOf(units).add(1, (units === 'isoWeek' ? 'week' : units)).subtract(1, 'ms');
}
function valueOf () {
return this._d.valueOf() - ((this._offset || 0) * 60000);
}
function unix () {
return Math.floor(this.valueOf() / 1000);
}
function toDate () {
return new Date(this.valueOf());
}
function toArray () {
var m = this;
return [m.year(), m.month(), m.date(), m.hour(), m.minute(), m.second(), m.millisecond()];
}
function toObject () {
var m = this;
return {
years: m.year(),
months: m.month(),
date: m.date(),
hours: m.hours(),
minutes: m.minutes(),
seconds: m.seconds(),
milliseconds: m.milliseconds()
};
}
function toJSON () {
// new Date(NaN).toJSON() === null
return this.isValid() ? this.toISOString() : null;
}
function isValid$2 () {
return isValid(this);
}
function parsingFlags () {
return extend({}, getParsingFlags(this));
}
function invalidAt () {
return getParsingFlags(this).overflow;
}
function creationData() {
return {
input: this._i,
format: this._f,
locale: this._locale,
isUTC: this._isUTC,
strict: this._strict
};
}
// FORMATTING
addFormatToken(0, ['gg', 2], 0, function () {
return this.weekYear() % 100;
});
addFormatToken(0, ['GG', 2], 0, function () {
return this.isoWeekYear() % 100;
});
function addWeekYearFormatToken (token, getter) {
addFormatToken(0, [token, token.length], 0, getter);
}
addWeekYearFormatToken('gggg', 'weekYear');
addWeekYearFormatToken('ggggg', 'weekYear');
addWeekYearFormatToken('GGGG', 'isoWeekYear');
addWeekYearFormatToken('GGGGG', 'isoWeekYear');
// ALIASES
addUnitAlias('weekYear', 'gg');
addUnitAlias('isoWeekYear', 'GG');
// PRIORITY
addUnitPriority('weekYear', 1);
addUnitPriority('isoWeekYear', 1);
// PARSING
addRegexToken('G', matchSigned);
addRegexToken('g', matchSigned);
addRegexToken('GG', match1to2, match2);
addRegexToken('gg', match1to2, match2);
addRegexToken('GGGG', match1to4, match4);
addRegexToken('gggg', match1to4, match4);
addRegexToken('GGGGG', match1to6, match6);
addRegexToken('ggggg', match1to6, match6);
addWeekParseToken(['gggg', 'ggggg', 'GGGG', 'GGGGG'], function (input, week, config, token) {
week[token.substr(0, 2)] = toInt(input);
});
addWeekParseToken(['gg', 'GG'], function (input, week, config, token) {
week[token] = hooks.parseTwoDigitYear(input);
});
// MOMENTS
function getSetWeekYear (input) {
return getSetWeekYearHelper.call(this,
input,
this.week(),
this.weekday(),
this.localeData()._week.dow,
this.localeData()._week.doy);
}
function getSetISOWeekYear (input) {
return getSetWeekYearHelper.call(this,
input, this.isoWeek(), this.isoWeekday(), 1, 4);
}
function getISOWeeksInYear () {
return weeksInYear(this.year(), 1, 4);
}
function getWeeksInYear () {
var weekInfo = this.localeData()._week;
return weeksInYear(this.year(), weekInfo.dow, weekInfo.doy);
}
function getSetWeekYearHelper(input, week, weekday, dow, doy) {
var weeksTarget;
if (input == null) {
return weekOfYear(this, dow, doy).year;
} else {
weeksTarget = weeksInYear(input, dow, doy);
if (week > weeksTarget) {
week = weeksTarget;
}
return setWeekAll.call(this, input, week, weekday, dow, doy);
}
}
function setWeekAll(weekYear, week, weekday, dow, doy) {
var dayOfYearData = dayOfYearFromWeeks(weekYear, week, weekday, dow, doy),
date = createUTCDate(dayOfYearData.year, 0, dayOfYearData.dayOfYear);
this.year(date.getUTCFullYear());
this.month(date.getUTCMonth());
this.date(date.getUTCDate());
return this;
}
// FORMATTING
addFormatToken('Q', 0, 'Qo', 'quarter');
// ALIASES
addUnitAlias('quarter', 'Q');
// PRIORITY
addUnitPriority('quarter', 7);
// PARSING
addRegexToken('Q', match1);
addParseToken('Q', function (input, array) {
array[MONTH] = (toInt(input) - 1) * 3;
});
// MOMENTS
function getSetQuarter (input) {
return input == null ? Math.ceil((this.month() + 1) / 3) : this.month((input - 1) * 3 + this.month() % 3);
}
// FORMATTING
addFormatToken('D', ['DD', 2], 'Do', 'date');
// ALIASES
addUnitAlias('date', 'D');
// PRIORITY
addUnitPriority('date', 9);
// PARSING
addRegexToken('D', match1to2);
addRegexToken('DD', match1to2, match2);
addRegexToken('Do', function (isStrict, locale) {
// TODO: Remove "ordinalParse" fallback in next major release.
return isStrict ?
(locale._dayOfMonthOrdinalParse || locale._ordinalParse) :
locale._dayOfMonthOrdinalParseLenient;
});
addParseToken(['D', 'DD'], DATE);
addParseToken('Do', function (input, array) {
array[DATE] = toInt(input.match(match1to2)[0]);
});
// MOMENTS
var getSetDayOfMonth = makeGetSet('Date', true);
// FORMATTING
addFormatToken('DDD', ['DDDD', 3], 'DDDo', 'dayOfYear');
// ALIASES
addUnitAlias('dayOfYear', 'DDD');
// PRIORITY
addUnitPriority('dayOfYear', 4);
// PARSING
addRegexToken('DDD', match1to3);
addRegexToken('DDDD', match3);
addParseToken(['DDD', 'DDDD'], function (input, array, config) {
config._dayOfYear = toInt(input);
});
// HELPERS
// MOMENTS
function getSetDayOfYear (input) {
var dayOfYear = Math.round((this.clone().startOf('day') - this.clone().startOf('year')) / 864e5) + 1;
return input == null ? dayOfYear : this.add((input - dayOfYear), 'd');
}
// FORMATTING
addFormatToken('m', ['mm', 2], 0, 'minute');
// ALIASES
addUnitAlias('minute', 'm');
// PRIORITY
addUnitPriority('minute', 14);
// PARSING
addRegexToken('m', match1to2);
addRegexToken('mm', match1to2, match2);
addParseToken(['m', 'mm'], MINUTE);
// MOMENTS
var getSetMinute = makeGetSet('Minutes', false);
// FORMATTING
addFormatToken('s', ['ss', 2], 0, 'second');
// ALIASES
addUnitAlias('second', 's');
// PRIORITY
addUnitPriority('second', 15);
// PARSING
addRegexToken('s', match1to2);
addRegexToken('ss', match1to2, match2);
addParseToken(['s', 'ss'], SECOND);
// MOMENTS
var getSetSecond = makeGetSet('Seconds', false);
// FORMATTING
addFormatToken('S', 0, 0, function () {
return ~~(this.millisecond() / 100);
});
addFormatToken(0, ['SS', 2], 0, function () {
return ~~(this.millisecond() / 10);
});
addFormatToken(0, ['SSS', 3], 0, 'millisecond');
addFormatToken(0, ['SSSS', 4], 0, function () {
return this.millisecond() * 10;
});
addFormatToken(0, ['SSSSS', 5], 0, function () {
return this.millisecond() * 100;
});
addFormatToken(0, ['SSSSSS', 6], 0, function () {
return this.millisecond() * 1000;
});
addFormatToken(0, ['SSSSSSS', 7], 0, function () {
return this.millisecond() * 10000;
});
addFormatToken(0, ['SSSSSSSS', 8], 0, function () {
return this.millisecond() * 100000;
});
addFormatToken(0, ['SSSSSSSSS', 9], 0, function () {
return this.millisecond() * 1000000;
});
// ALIASES
addUnitAlias('millisecond', 'ms');
// PRIORITY
addUnitPriority('millisecond', 16);
// PARSING
addRegexToken('S', match1to3, match1);
addRegexToken('SS', match1to3, match2);
addRegexToken('SSS', match1to3, match3);
var token;
for (token = 'SSSS'; token.length <= 9; token += 'S') {
addRegexToken(token, matchUnsigned);
}
function parseMs(input, array) {
array[MILLISECOND] = toInt(('0.' + input) * 1000);
}
for (token = 'S'; token.length <= 9; token += 'S') {
addParseToken(token, parseMs);
}
// MOMENTS
var getSetMillisecond = makeGetSet('Milliseconds', false);
// FORMATTING
addFormatToken('z', 0, 0, 'zoneAbbr');
addFormatToken('zz', 0, 0, 'zoneName');
// MOMENTS
function getZoneAbbr () {
return this._isUTC ? 'UTC' : '';
}
function getZoneName () {
return this._isUTC ? 'Coordinated Universal Time' : '';
}
var proto = Moment.prototype;
proto.add = add;
proto.calendar = calendar$1;
proto.clone = clone;
proto.diff = diff;
proto.endOf = endOf;
proto.format = format;
proto.from = from;
proto.fromNow = fromNow;
proto.to = to;
proto.toNow = toNow;
proto.get = stringGet;
proto.invalidAt = invalidAt;
proto.isAfter = isAfter;
proto.isBefore = isBefore;
proto.isBetween = isBetween;
proto.isSame = isSame;
proto.isSameOrAfter = isSameOrAfter;
proto.isSameOrBefore = isSameOrBefore;
proto.isValid = isValid$2;
proto.lang = lang;
proto.locale = locale;
proto.localeData = localeData;
proto.max = prototypeMax;
proto.min = prototypeMin;
proto.parsingFlags = parsingFlags;
proto.set = stringSet;
proto.startOf = startOf;
proto.subtract = subtract;
proto.toArray = toArray;
proto.toObject = toObject;
proto.toDate = toDate;
proto.toISOString = toISOString;
proto.inspect = inspect;
proto.toJSON = toJSON;
proto.toString = toString;
proto.unix = unix;
proto.valueOf = valueOf;
proto.creationData = creationData;
proto.year = getSetYear;
proto.isLeapYear = getIsLeapYear;
proto.weekYear = getSetWeekYear;
proto.isoWeekYear = getSetISOWeekYear;
proto.quarter = proto.quarters = getSetQuarter;
proto.month = getSetMonth;
proto.daysInMonth = getDaysInMonth;
proto.week = proto.weeks = getSetWeek;
proto.isoWeek = proto.isoWeeks = getSetISOWeek;
proto.weeksInYear = getWeeksInYear;
proto.isoWeeksInYear = getISOWeeksInYear;
proto.date = getSetDayOfMonth;
proto.day = proto.days = getSetDayOfWeek;
proto.weekday = getSetLocaleDayOfWeek;
proto.isoWeekday = getSetISODayOfWeek;
proto.dayOfYear = getSetDayOfYear;
proto.hour = proto.hours = getSetHour;
proto.minute = proto.minutes = getSetMinute;
proto.second = proto.seconds = getSetSecond;
proto.millisecond = proto.milliseconds = getSetMillisecond;
proto.utcOffset = getSetOffset;
proto.utc = setOffsetToUTC;
proto.local = setOffsetToLocal;
proto.parseZone = setOffsetToParsedOffset;
proto.hasAlignedHourOffset = hasAlignedHourOffset;
proto.isDST = isDaylightSavingTime;
proto.isLocal = isLocal;
proto.isUtcOffset = isUtcOffset;
proto.isUtc = isUtc;
proto.isUTC = isUtc;
proto.zoneAbbr = getZoneAbbr;
proto.zoneName = getZoneName;
proto.dates = deprecate('dates accessor is deprecated. Use date instead.', getSetDayOfMonth);
proto.months = deprecate('months accessor is deprecated. Use month instead', getSetMonth);
proto.years = deprecate('years accessor is deprecated. Use year instead', getSetYear);
proto.zone = deprecate('moment().zone is deprecated, use moment().utcOffset instead. http://momentjs.com/guides/#/warnings/zone/', getSetZone);
proto.isDSTShifted = deprecate('isDSTShifted is deprecated. See http://momentjs.com/guides/#/warnings/dst-shifted/ for more information', isDaylightSavingTimeShifted);
function createUnix (input) {
return createLocal(input * 1000);
}
function createInZone () {
return createLocal.apply(null, arguments).parseZone();
}
function preParsePostFormat (string) {
return string;
}
var proto$1 = Locale.prototype;
proto$1.calendar = calendar;
proto$1.longDateFormat = longDateFormat;
proto$1.invalidDate = invalidDate;
proto$1.ordinal = ordinal;
proto$1.preparse = preParsePostFormat;
proto$1.postformat = preParsePostFormat;
proto$1.relativeTime = relativeTime;
proto$1.pastFuture = pastFuture;
proto$1.set = set;
proto$1.months = localeMonths;
proto$1.monthsShort = localeMonthsShort;
proto$1.monthsParse = localeMonthsParse;
proto$1.monthsRegex = monthsRegex;
proto$1.monthsShortRegex = monthsShortRegex;
proto$1.week = localeWeek;
proto$1.firstDayOfYear = localeFirstDayOfYear;
proto$1.firstDayOfWeek = localeFirstDayOfWeek;
proto$1.weekdays = localeWeekdays;
proto$1.weekdaysMin = localeWeekdaysMin;
proto$1.weekdaysShort = localeWeekdaysShort;
proto$1.weekdaysParse = localeWeekdaysParse;
proto$1.weekdaysRegex = weekdaysRegex;
proto$1.weekdaysShortRegex = weekdaysShortRegex;
proto$1.weekdaysMinRegex = weekdaysMinRegex;
proto$1.isPM = localeIsPM;
proto$1.meridiem = localeMeridiem;
function get$1 (format, index, field, setter) {
var locale = getLocale();
var utc = createUTC().set(setter, index);
return locale[field](utc, format);
}
function listMonthsImpl (format, index, field) {
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
if (index != null) {
return get$1(format, index, field, 'month');
}
var i;
var out = [];
for (i = 0; i < 12; i++) {
out[i] = get$1(format, i, field, 'month');
}
return out;
}
// ()
// (5)
// (fmt, 5)
// (fmt)
// (true)
// (true, 5)
// (true, fmt, 5)
// (true, fmt)
function listWeekdaysImpl (localeSorted, format, index, field) {
if (typeof localeSorted === 'boolean') {
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
} else {
format = localeSorted;
index = format;
localeSorted = false;
if (isNumber(format)) {
index = format;
format = undefined;
}
format = format || '';
}
var locale = getLocale(),
shift = localeSorted ? locale._week.dow : 0;
if (index != null) {
return get$1(format, (index + shift) % 7, field, 'day');
}
var i;
var out = [];
for (i = 0; i < 7; i++) {
out[i] = get$1(format, (i + shift) % 7, field, 'day');
}
return out;
}
function listMonths (format, index) {
return listMonthsImpl(format, index, 'months');
}
function listMonthsShort (format, index) {
return listMonthsImpl(format, index, 'monthsShort');
}
function listWeekdays (localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdays');
}
function listWeekdaysShort (localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdaysShort');
}
function listWeekdaysMin (localeSorted, format, index) {
return listWeekdaysImpl(localeSorted, format, index, 'weekdaysMin');
}
getSetGlobalLocale('en', {
dayOfMonthOrdinalParse: /\d{1,2}(th|st|nd|rd)/,
ordinal : function (number) {
var b = number % 10,
output = (toInt(number % 100 / 10) === 1) ? 'th' :
(b === 1) ? 'st' :
(b === 2) ? 'nd' :
(b === 3) ? 'rd' : 'th';
return number + output;
}
});
// Side effect imports
hooks.lang = deprecate('moment.lang is deprecated. Use moment.locale instead.', getSetGlobalLocale);
hooks.langData = deprecate('moment.langData is deprecated. Use moment.localeData instead.', getLocale);
var mathAbs = Math.abs;
function abs () {
var data = this._data;
this._milliseconds = mathAbs(this._milliseconds);
this._days = mathAbs(this._days);
this._months = mathAbs(this._months);
data.milliseconds = mathAbs(data.milliseconds);
data.seconds = mathAbs(data.seconds);
data.minutes = mathAbs(data.minutes);
data.hours = mathAbs(data.hours);
data.months = mathAbs(data.months);
data.years = mathAbs(data.years);
return this;
}
function addSubtract$1 (duration, input, value, direction) {
var other = createDuration(input, value);
duration._milliseconds += direction * other._milliseconds;
duration._days += direction * other._days;
duration._months += direction * other._months;
return duration._bubble();
}
// supports only 2.0-style add(1, 's') or add(duration)
function add$1 (input, value) {
return addSubtract$1(this, input, value, 1);
}
// supports only 2.0-style subtract(1, 's') or subtract(duration)
function subtract$1 (input, value) {
return addSubtract$1(this, input, value, -1);
}
function absCeil (number) {
if (number < 0) {
return Math.floor(number);
} else {
return Math.ceil(number);
}
}
function bubble () {
var milliseconds = this._milliseconds;
var days = this._days;
var months = this._months;
var data = this._data;
var seconds, minutes, hours, years, monthsFromDays;
// if we have a mix of positive and negative values, bubble down first
// check: https://github.com/moment/moment/issues/2166
if (!((milliseconds >= 0 && days >= 0 && months >= 0) ||
(milliseconds <= 0 && days <= 0 && months <= 0))) {
milliseconds += absCeil(monthsToDays(months) + days) * 864e5;
days = 0;
months = 0;
}
// The following code bubbles up values, see the tests for
// examples of what that means.
data.milliseconds = milliseconds % 1000;
seconds = absFloor(milliseconds / 1000);
data.seconds = seconds % 60;
minutes = absFloor(seconds / 60);
data.minutes = minutes % 60;
hours = absFloor(minutes / 60);
data.hours = hours % 24;
days += absFloor(hours / 24);
// convert days to months
monthsFromDays = absFloor(daysToMonths(days));
months += monthsFromDays;
days -= absCeil(monthsToDays(monthsFromDays));
// 12 months -> 1 year
years = absFloor(months / 12);
months %= 12;
data.days = days;
data.months = months;
data.years = years;
return this;
}
function daysToMonths (days) {
// 400 years have 146097 days (taking into account leap year rules)
// 400 years have 12 months === 4800
return days * 4800 / 146097;
}
function monthsToDays (months) {
// the reverse of daysToMonths
return months * 146097 / 4800;
}
function as (units) {
if (!this.isValid()) {
return NaN;
}
var days;
var months;
var milliseconds = this._milliseconds;
units = normalizeUnits(units);
if (units === 'month' || units === 'year') {
days = this._days + milliseconds / 864e5;
months = this._months + daysToMonths(days);
return units === 'month' ? months : months / 12;
} else {
// handle milliseconds separately because of floating point math errors (issue #1867)
days = this._days + Math.round(monthsToDays(this._months));
switch (units) {
case 'week' : return days / 7 + milliseconds / 6048e5;
case 'day' : return days + milliseconds / 864e5;
case 'hour' : return days * 24 + milliseconds / 36e5;
case 'minute' : return days * 1440 + milliseconds / 6e4;
case 'second' : return days * 86400 + milliseconds / 1000;
// Math.floor prevents floating point math errors here
case 'millisecond': return Math.floor(days * 864e5) + milliseconds;
default: throw new Error('Unknown unit ' + units);
}
}
}
// TODO: Use this.as('ms')?
function valueOf$1 () {
if (!this.isValid()) {
return NaN;
}
return (
this._milliseconds +
this._days * 864e5 +
(this._months % 12) * 2592e6 +
toInt(this._months / 12) * 31536e6
);
}
function makeAs (alias) {
return function () {
return this.as(alias);
};
}
var asMilliseconds = makeAs('ms');
var asSeconds = makeAs('s');
var asMinutes = makeAs('m');
var asHours = makeAs('h');
var asDays = makeAs('d');
var asWeeks = makeAs('w');
var asMonths = makeAs('M');
var asYears = makeAs('y');
function clone$1 () {
return createDuration(this);
}
function get$2 (units) {
units = normalizeUnits(units);
return this.isValid() ? this[units + 's']() : NaN;
}
function makeGetter(name) {
return function () {
return this.isValid() ? this._data[name] : NaN;
};
}
var milliseconds = makeGetter('milliseconds');
var seconds = makeGetter('seconds');
var minutes = makeGetter('minutes');
var hours = makeGetter('hours');
var days = makeGetter('days');
var months = makeGetter('months');
var years = makeGetter('years');
function weeks () {
return absFloor(this.days() / 7);
}
var round = Math.round;
var thresholds = {
ss: 44, // a few seconds to seconds
s : 45, // seconds to minute
m : 45, // minutes to hour
h : 22, // hours to day
d : 26, // days to month
M : 11 // months to year
};
// helper function for moment.fn.from, moment.fn.fromNow, and moment.duration.fn.humanize
function substituteTimeAgo(string, number, withoutSuffix, isFuture, locale) {
return locale.relativeTime(number || 1, !!withoutSuffix, string, isFuture);
}
function relativeTime$1 (posNegDuration, withoutSuffix, locale) {
var duration = createDuration(posNegDuration).abs();
var seconds = round(duration.as('s'));
var minutes = round(duration.as('m'));
var hours = round(duration.as('h'));
var days = round(duration.as('d'));
var months = round(duration.as('M'));
var years = round(duration.as('y'));
var a = seconds <= thresholds.ss && ['s', seconds] ||
seconds < thresholds.s && ['ss', seconds] ||
minutes <= 1 && ['m'] ||
minutes < thresholds.m && ['mm', minutes] ||
hours <= 1 && ['h'] ||
hours < thresholds.h && ['hh', hours] ||
days <= 1 && ['d'] ||
days < thresholds.d && ['dd', days] ||
months <= 1 && ['M'] ||
months < thresholds.M && ['MM', months] ||
years <= 1 && ['y'] || ['yy', years];
a[2] = withoutSuffix;
a[3] = +posNegDuration > 0;
a[4] = locale;
return substituteTimeAgo.apply(null, a);
}
// This function allows you to set the rounding function for relative time strings
function getSetRelativeTimeRounding (roundingFunction) {
if (roundingFunction === undefined) {
return round;
}
if (typeof(roundingFunction) === 'function') {
round = roundingFunction;
return true;
}
return false;
}
// This function allows you to set a threshold for relative time strings
function getSetRelativeTimeThreshold (threshold, limit) {
if (thresholds[threshold] === undefined) {
return false;
}
if (limit === undefined) {
return thresholds[threshold];
}
thresholds[threshold] = limit;
if (threshold === 's') {
thresholds.ss = limit - 1;
}
return true;
}
function humanize (withSuffix) {
if (!this.isValid()) {
return this.localeData().invalidDate();
}
var locale = this.localeData();
var output = relativeTime$1(this, !withSuffix, locale);
if (withSuffix) {
output = locale.pastFuture(+this, output);
}
return locale.postformat(output);
}
var abs$1 = Math.abs;
function sign(x) {
return ((x > 0) - (x < 0)) || +x;
}
function toISOString$1() {
// for ISO strings we do not use the normal bubbling rules:
// * milliseconds bubble up until they become hours
// * days do not bubble at all
// * months bubble up until they become years
// This is because there is no context-free conversion between hours and days
// (think of clock changes)
// and also not between days and months (28-31 days per month)
if (!this.isValid()) {
return this.localeData().invalidDate();
}
var seconds = abs$1(this._milliseconds) / 1000;
var days = abs$1(this._days);
var months = abs$1(this._months);
var minutes, hours, years;
// 3600 seconds -> 60 minutes -> 1 hour
minutes = absFloor(seconds / 60);
hours = absFloor(minutes / 60);
seconds %= 60;
minutes %= 60;
// 12 months -> 1 year
years = absFloor(months / 12);
months %= 12;
// inspired by https://github.com/dordille/moment-isoduration/blob/master/moment.isoduration.js
var Y = years;
var M = months;
var D = days;
var h = hours;
var m = minutes;
var s = seconds ? seconds.toFixed(3).replace(/\.?0+$/, '') : '';
var total = this.asSeconds();
if (!total) {
// this is the same as C#'s (Noda) and python (isodate)...
// but not other JS (goog.date)
return 'P0D';
}
var totalSign = total < 0 ? '-' : '';
var ymSign = sign(this._months) !== sign(total) ? '-' : '';
var daysSign = sign(this._days) !== sign(total) ? '-' : '';
var hmsSign = sign(this._milliseconds) !== sign(total) ? '-' : '';
return totalSign + 'P' +
(Y ? ymSign + Y + 'Y' : '') +
(M ? ymSign + M + 'M' : '') +
(D ? daysSign + D + 'D' : '') +
((h || m || s) ? 'T' : '') +
(h ? hmsSign + h + 'H' : '') +
(m ? hmsSign + m + 'M' : '') +
(s ? hmsSign + s + 'S' : '');
}
var proto$2 = Duration.prototype;
proto$2.isValid = isValid$1;
proto$2.abs = abs;
proto$2.add = add$1;
proto$2.subtract = subtract$1;
proto$2.as = as;
proto$2.asMilliseconds = asMilliseconds;
proto$2.asSeconds = asSeconds;
proto$2.asMinutes = asMinutes;
proto$2.asHours = asHours;
proto$2.asDays = asDays;
proto$2.asWeeks = asWeeks;
proto$2.asMonths = asMonths;
proto$2.asYears = asYears;
proto$2.valueOf = valueOf$1;
proto$2._bubble = bubble;
proto$2.clone = clone$1;
proto$2.get = get$2;
proto$2.milliseconds = milliseconds;
proto$2.seconds = seconds;
proto$2.minutes = minutes;
proto$2.hours = hours;
proto$2.days = days;
proto$2.weeks = weeks;
proto$2.months = months;
proto$2.years = years;
proto$2.humanize = humanize;
proto$2.toISOString = toISOString$1;
proto$2.toString = toISOString$1;
proto$2.toJSON = toISOString$1;
proto$2.locale = locale;
proto$2.localeData = localeData;
proto$2.toIsoString = deprecate('toIsoString() is deprecated. Please use toISOString() instead (notice the capitals)', toISOString$1);
proto$2.lang = lang;
// Side effect imports
// FORMATTING
addFormatToken('X', 0, 0, 'unix');
addFormatToken('x', 0, 0, 'valueOf');
// PARSING
addRegexToken('x', matchSigned);
addRegexToken('X', matchTimestamp);
addParseToken('X', function (input, array, config) {
config._d = new Date(parseFloat(input, 10) * 1000);
});
addParseToken('x', function (input, array, config) {
config._d = new Date(toInt(input));
});
// Side effect imports
hooks.version = '2.22.2';
setHookCallback(createLocal);
hooks.fn = proto;
hooks.min = min;
hooks.max = max;
hooks.now = now;
hooks.utc = createUTC;
hooks.unix = createUnix;
hooks.months = listMonths;
hooks.isDate = isDate;
hooks.locale = getSetGlobalLocale;
hooks.invalid = createInvalid;
hooks.duration = createDuration;
hooks.isMoment = isMoment;
hooks.weekdays = listWeekdays;
hooks.parseZone = createInZone;
hooks.localeData = getLocale;
hooks.isDuration = isDuration;
hooks.monthsShort = listMonthsShort;
hooks.weekdaysMin = listWeekdaysMin;
hooks.defineLocale = defineLocale;
hooks.updateLocale = updateLocale;
hooks.locales = listLocales;
hooks.weekdaysShort = listWeekdaysShort;
hooks.normalizeUnits = normalizeUnits;
hooks.relativeTimeRounding = getSetRelativeTimeRounding;
hooks.relativeTimeThreshold = getSetRelativeTimeThreshold;
hooks.calendarFormat = getCalendarFormat;
hooks.prototype = proto;
// currently HTML5 input type only supports 24-hour formats
hooks.HTML5_FMT = {
DATETIME_LOCAL: 'YYYY-MM-DDTHH:mm', // <input type="datetime-local" />
DATETIME_LOCAL_SECONDS: 'YYYY-MM-DDTHH:mm:ss', // <input type="datetime-local" step="1" />
DATETIME_LOCAL_MS: 'YYYY-MM-DDTHH:mm:ss.SSS', // <input type="datetime-local" step="0.001" />
DATE: 'YYYY-MM-DD', // <input type="date" />
TIME: 'HH:mm', // <input type="time" />
TIME_SECONDS: 'HH:mm:ss', // <input type="time" step="1" />
TIME_MS: 'HH:mm:ss.SSS', // <input type="time" step="0.001" />
WEEK: 'YYYY-[W]WW', // <input type="week" />
MONTH: 'YYYY-MM' // <input type="month" />
};
return hooks;
})));
/* WEBPACK VAR INJECTION */}.call(exports, __webpack_require__(2)(module)))
/***/ }),
/* 2 */
/***/ (function(module, exports) {
module.exports = function(module) {
if(!module.webpackPolyfill) {
module.deprecate = function() {};
module.paths = [];
// module.parent = undefined by default
module.children = [];
module.webpackPolyfill = 1;
}
return module;
}
/***/ })
/******/ ]);
//# sourceMappingURL=http://127.0.0.1:3001/dist/js/bundle.js.map | PypiClean |
/argon_88_sk-0.1.2.tar.gz/argon_88_sk-0.1.2/argon_88_sk/serialize.py | import re
from types import NoneType, EllipsisType
from typing import Iterator
from .base import Serializer
from argon_88_sk.utils.templates import JSON, XML, XML_PRIMITIVE
from argon_88_sk.utils.constants import PRIMITIVE_TYPES, TYPE_MAPPING
class JSONSerializer(Serializer):
"""JSON serializer class."""
_TYPE_PATTERN: str = r"<class '(\w\S+)'>_"
_KEYWORDS: dict[None | bool, str] = {
None: 'null',
True: 'true',
False: 'false'
}
def __init__(self):
super().__init__()
@classmethod
def _to_number(cls, s: str) -> int | float | complex | None:
for num_type in (int, float, complex):
try:
return num_type(s)
except (ValueError, TypeError):
pass
def _load_from_json(self, template: str) -> dict:
"""Takes a string of specific format (visit ``utils.templates``
for more clarity) as an input, loads object data,
and returns it in the form of dict.
:param template: string template to retrieve object from.
:return: dictionary with object data.
"""
obj: dict = {}
lines: list[str] = template.split("\n")
it: Iterator[str] = enumerate(lines)
for i, line in it:
if not re.search(r'\s*(.+):\s*([^,]*)', line):
continue
key, value = re.search(r'\s*(.+):\s*([^,]*)', line).groups()
if value != "{":
obj[self.loads(key)] = self.loads(value)
elif value == "{" and "<class" not in key:
brackets = 1
start = i + 1
while brackets and i < len(lines) - 1:
i, line = next(it, None)
brackets += ("{" in lines[i]) - ("}" in lines[i])
obj[self.loads(key)] = self.loads('\n'.join(lines[start:i]))
return obj
def dumps(self, obj) -> str:
"""Dumps an object to a string and returns the string.
Dumping is done via general JSON object template. It can
overcomplicate simple structure serialization, but can be
applied to much larger scale of python objects.
:param obj: object to dump.
:return: string containing serialized (dumped) object.
"""
if type(obj) == str:
return f'"{obj}"'
if type(obj) == type(Ellipsis):
return ' '
if type(obj) in (int, float, complex):
return str(obj)
if type(obj) in [bool, NoneType]:
return self._KEYWORDS[obj]
return JSON.format(
type=type(obj) if type(obj) in TYPE_MAPPING.values() else object,
id=id(obj),
items=self.formatter.to_json(self.get_items(obj), self.dumps)
)
def loads(self, s: str):
"""Loads an object from a string and returns it.
Operates using JSON template from ``utils.templates``.
However, primitive types are serialized without this
or any other template.
:param s: string to extract object from.
:return: deserialized Python object.
"""
if not len(s):
return
if s == ' ':
return ...
if s.startswith('"'):
return s.strip('"')
if s in self._KEYWORDS.values():
return self._get_key(s, self._KEYWORDS)
if self._to_number(s) is not None:
return self._to_number(s)
return self.create_object(
self._type_from_str(s, self._TYPE_PATTERN),
self._load_from_json(s)
)
class XMLSerializer(Serializer):
_TYPE_PATTERN: str = r'type="(\w+)"'
def _get_tag(self, tagname: str, lines) -> str:
counter = 1
it = enumerate(lines)
for i, line in it:
if not counter:
return lines[:i]
counter += bool(re.search(rf"<{tagname}.*>", line.strip("\t\n ")))
counter -= bool(re.search(rf"</{tagname}>", line.strip("\t\n ")))
def _load_from_xml(self, template: str) -> dict:
obj: dict = {}
lines: list[str] = template.split("\n")
it: Iterator[str] = enumerate(lines)
for i, line in it:
if "<item>" == line.strip("\t\n "):
item = self._get_tag("item", lines[i+1:])
key = self._get_tag("key", item[1:])
value = self._get_tag("value", item[len(key)+2:])
obj[self.loads("\n".join(key[:-1]))] = self.loads("\n".join(value[:-1]))
[next(it, None) for _ in range(len(item))]
return obj
def dumps(self, obj) -> str:
"""Dumps an object to a string and returns the string.
Dumping is done via string templates with XML prefix in
``utils.templates`` module.
:param obj: object to dump.
:return: string containing serialized (dumped) object.
"""
if type(obj) in PRIMITIVE_TYPES:
obj_type = self._get_key(type(obj), TYPE_MAPPING)
return f'<primitive type="{obj_type}">{obj}</primitive>'
return XML.format(
type=self._get_key(type(obj), TYPE_MAPPING) if type(obj) in TYPE_MAPPING.values() else "object",
id=id(obj),
items=self.formatter.to_xml(self.get_items(obj), self.dumps)
)
def loads(self, s):
"""Loads an object from a string and returns it.
Operates using templates with XML prefix from ``utils.templates``.
:param s: string to extract object from.
:return: deserialized Python object.
"""
if not len(s):
return
if "primitive" in s.split("\n")[0]:
obj_data = re.search(
XML_PRIMITIVE.format(
type="\w+",
obj="(.*)"
), s).group(1)
obj_type = self._type_from_str(
s=s.split("\n")[0],
pattern=self._TYPE_PATTERN
)
if obj_type == NoneType:
return None
if obj_type == bool:
return obj_data == "True"
if obj_type == EllipsisType:
return ...
return obj_type(obj_data)
return self.create_object(
self._type_from_str(s, self._TYPE_PATTERN),
self._load_from_xml(s)
) | PypiClean |
/getpaid.formgen-0.6.zip/getpaid.formgen-0.6/src/getpaid/formgen/__init__.py | from Products.Archetypes import atapi
from Products.CMFCore import utils
from zope.i18nmessageid import MessageFactory
# Define a message factory for when this product is internationalised.
# This will be imported with the special name "_" in most modules. Strings
# like _(u"message") will then be extracted by i18n tools for translation.
GPFGMessageFactory = MessageFactory('getpaid.formgen')
def initialize(context):
"""Intializer called when used as a Zope 2 product.
This is referenced from configure.zcml. Regstrations as a "Zope 2 product"
is necessary for GenericSetup profiles to work, for example.
Here, we call the Archetypes machinery to register our content types
with Zope and the CMF.
"""
# Retrieve the content types that have been registered with Archetypes
# This happens when the content type is imported and the registerType()
# call in the content type's module is invoked. Actually, this happens
# during ZCML processing, but we do it here again to be explicit. Of
# course, even if we import the module several times, it is only run
# once!
from content import GetpaidPFGAdapter
content_types, constructors, ftis = atapi.process_types(
atapi.listTypes(config.PROJECTNAME),
config.PROJECTNAME)
# Now initialize all these content types. The initialization process takes
# care of registering low-level Zope 2 factories, including the relevant
# add-permission. These are listed in config.py. We use different
# permisisons for each content type to allow maximum flexibility of who
# can add which content types, where. The roles are set up in rolemap.xml
# in the GenericSetup profile.
for atype, constructor in zip(content_types, constructors):
utils.ContentInit("%s: %s" % (config.PROJECTNAME, atype.portal_type),
content_types = (atype,),
permission = config.ADD_PERMISSIONS[atype.portal_type],
extra_constructors = (constructor,),
).initialize(context)
try:
from Products.CMFPlone.migrations import v3_0
except ImportError:
HAS_PLONE30 = False
else:
HAS_PLONE30 = True
try:
from plone.app.upgrade import v40
except ImportError:
HAS_PLONE40 = False
else:
HAS_PLONE40 = True
HAS_PLONE30 = True | PypiClean |
/monocdk.experiment-1.66.0-py3-none-any.whl/monocdk_experiment/aws_kinesisanalytics/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from .._jsii import *
from .. import (
CfnResource as _CfnResource_7760e8e4,
CfnTag as _CfnTag_b4661f1a,
Construct as _Construct_f50a3f53,
IInspectable as _IInspectable_051e6ed8,
IResolvable as _IResolvable_9ceae33e,
TagManager as _TagManager_2508893f,
TreeInspector as _TreeInspector_154f5999,
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplication(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication",
):
"""A CloudFormation ``AWS::KinesisAnalytics::Application``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html
:cloudformationResource: AWS::KinesisAnalytics::Application
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
inputs: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplication.InputProperty", _IResolvable_9ceae33e]]],
application_code: typing.Optional[builtins.str] = None,
application_description: typing.Optional[builtins.str] = None,
application_name: typing.Optional[builtins.str] = None,
) -> None:
"""Create a new ``AWS::KinesisAnalytics::Application``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param inputs: ``AWS::KinesisAnalytics::Application.Inputs``.
:param application_code: ``AWS::KinesisAnalytics::Application.ApplicationCode``.
:param application_description: ``AWS::KinesisAnalytics::Application.ApplicationDescription``.
:param application_name: ``AWS::KinesisAnalytics::Application.ApplicationName``.
"""
props = CfnApplicationProps(
inputs=inputs,
application_code=application_code,
application_description=application_description,
application_name=application_name,
)
jsii.create(CfnApplication, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="inputs")
def inputs(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplication.InputProperty", _IResolvable_9ceae33e]]]:
"""``AWS::KinesisAnalytics::Application.Inputs``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-inputs
"""
return jsii.get(self, "inputs")
@inputs.setter # type: ignore
def inputs(
self,
value: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplication.InputProperty", _IResolvable_9ceae33e]]],
) -> None:
jsii.set(self, "inputs", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationCode")
def application_code(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationCode``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationcode
"""
return jsii.get(self, "applicationCode")
@application_code.setter # type: ignore
def application_code(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "applicationCode", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationDescription")
def application_description(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationdescription
"""
return jsii.get(self, "applicationDescription")
@application_description.setter # type: ignore
def application_description(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "applicationDescription", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "applicationName", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.CSVMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"record_column_delimiter": "recordColumnDelimiter",
"record_row_delimiter": "recordRowDelimiter",
},
)
class CSVMappingParametersProperty:
def __init__(
self,
*,
record_column_delimiter: builtins.str,
record_row_delimiter: builtins.str,
) -> None:
"""
:param record_column_delimiter: ``CfnApplication.CSVMappingParametersProperty.RecordColumnDelimiter``.
:param record_row_delimiter: ``CfnApplication.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-csvmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_column_delimiter": record_column_delimiter,
"record_row_delimiter": record_row_delimiter,
}
@builtins.property
def record_column_delimiter(self) -> builtins.str:
"""``CfnApplication.CSVMappingParametersProperty.RecordColumnDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-csvmappingparameters.html#cfn-kinesisanalytics-application-csvmappingparameters-recordcolumndelimiter
"""
result = self._values.get("record_column_delimiter")
assert result is not None, "Required property 'record_column_delimiter' is missing"
return result
@builtins.property
def record_row_delimiter(self) -> builtins.str:
"""``CfnApplication.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-csvmappingparameters.html#cfn-kinesisanalytics-application-csvmappingparameters-recordrowdelimiter
"""
result = self._values.get("record_row_delimiter")
assert result is not None, "Required property 'record_row_delimiter' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CSVMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.InputLambdaProcessorProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class InputLambdaProcessorProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplication.InputLambdaProcessorProperty.ResourceARN``.
:param role_arn: ``CfnApplication.InputLambdaProcessorProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputlambdaprocessor.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplication.InputLambdaProcessorProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputlambdaprocessor.html#cfn-kinesisanalytics-application-inputlambdaprocessor-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplication.InputLambdaProcessorProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputlambdaprocessor.html#cfn-kinesisanalytics-application-inputlambdaprocessor-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputLambdaProcessorProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.InputParallelismProperty",
jsii_struct_bases=[],
name_mapping={"count": "count"},
)
class InputParallelismProperty:
def __init__(self, *, count: typing.Optional[jsii.Number] = None) -> None:
"""
:param count: ``CfnApplication.InputParallelismProperty.Count``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputparallelism.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if count is not None:
self._values["count"] = count
@builtins.property
def count(self) -> typing.Optional[jsii.Number]:
"""``CfnApplication.InputParallelismProperty.Count``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputparallelism.html#cfn-kinesisanalytics-application-inputparallelism-count
"""
result = self._values.get("count")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputParallelismProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.InputProcessingConfigurationProperty",
jsii_struct_bases=[],
name_mapping={"input_lambda_processor": "inputLambdaProcessor"},
)
class InputProcessingConfigurationProperty:
def __init__(
self,
*,
input_lambda_processor: typing.Optional[typing.Union["CfnApplication.InputLambdaProcessorProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param input_lambda_processor: ``CfnApplication.InputProcessingConfigurationProperty.InputLambdaProcessor``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputprocessingconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if input_lambda_processor is not None:
self._values["input_lambda_processor"] = input_lambda_processor
@builtins.property
def input_lambda_processor(
self,
) -> typing.Optional[typing.Union["CfnApplication.InputLambdaProcessorProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.InputProcessingConfigurationProperty.InputLambdaProcessor``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputprocessingconfiguration.html#cfn-kinesisanalytics-application-inputprocessingconfiguration-inputlambdaprocessor
"""
result = self._values.get("input_lambda_processor")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputProcessingConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.InputProperty",
jsii_struct_bases=[],
name_mapping={
"input_schema": "inputSchema",
"name_prefix": "namePrefix",
"input_parallelism": "inputParallelism",
"input_processing_configuration": "inputProcessingConfiguration",
"kinesis_firehose_input": "kinesisFirehoseInput",
"kinesis_streams_input": "kinesisStreamsInput",
},
)
class InputProperty:
def __init__(
self,
*,
input_schema: typing.Union["CfnApplication.InputSchemaProperty", _IResolvable_9ceae33e],
name_prefix: builtins.str,
input_parallelism: typing.Optional[typing.Union["CfnApplication.InputParallelismProperty", _IResolvable_9ceae33e]] = None,
input_processing_configuration: typing.Optional[typing.Union["CfnApplication.InputProcessingConfigurationProperty", _IResolvable_9ceae33e]] = None,
kinesis_firehose_input: typing.Optional[typing.Union["CfnApplication.KinesisFirehoseInputProperty", _IResolvable_9ceae33e]] = None,
kinesis_streams_input: typing.Optional[typing.Union["CfnApplication.KinesisStreamsInputProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param input_schema: ``CfnApplication.InputProperty.InputSchema``.
:param name_prefix: ``CfnApplication.InputProperty.NamePrefix``.
:param input_parallelism: ``CfnApplication.InputProperty.InputParallelism``.
:param input_processing_configuration: ``CfnApplication.InputProperty.InputProcessingConfiguration``.
:param kinesis_firehose_input: ``CfnApplication.InputProperty.KinesisFirehoseInput``.
:param kinesis_streams_input: ``CfnApplication.InputProperty.KinesisStreamsInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html
"""
self._values: typing.Dict[str, typing.Any] = {
"input_schema": input_schema,
"name_prefix": name_prefix,
}
if input_parallelism is not None:
self._values["input_parallelism"] = input_parallelism
if input_processing_configuration is not None:
self._values["input_processing_configuration"] = input_processing_configuration
if kinesis_firehose_input is not None:
self._values["kinesis_firehose_input"] = kinesis_firehose_input
if kinesis_streams_input is not None:
self._values["kinesis_streams_input"] = kinesis_streams_input
@builtins.property
def input_schema(
self,
) -> typing.Union["CfnApplication.InputSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplication.InputProperty.InputSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-inputschema
"""
result = self._values.get("input_schema")
assert result is not None, "Required property 'input_schema' is missing"
return result
@builtins.property
def name_prefix(self) -> builtins.str:
"""``CfnApplication.InputProperty.NamePrefix``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-nameprefix
"""
result = self._values.get("name_prefix")
assert result is not None, "Required property 'name_prefix' is missing"
return result
@builtins.property
def input_parallelism(
self,
) -> typing.Optional[typing.Union["CfnApplication.InputParallelismProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.InputProperty.InputParallelism``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-inputparallelism
"""
result = self._values.get("input_parallelism")
return result
@builtins.property
def input_processing_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplication.InputProcessingConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.InputProperty.InputProcessingConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-inputprocessingconfiguration
"""
result = self._values.get("input_processing_configuration")
return result
@builtins.property
def kinesis_firehose_input(
self,
) -> typing.Optional[typing.Union["CfnApplication.KinesisFirehoseInputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.InputProperty.KinesisFirehoseInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-kinesisfirehoseinput
"""
result = self._values.get("kinesis_firehose_input")
return result
@builtins.property
def kinesis_streams_input(
self,
) -> typing.Optional[typing.Union["CfnApplication.KinesisStreamsInputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.InputProperty.KinesisStreamsInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-input.html#cfn-kinesisanalytics-application-input-kinesisstreamsinput
"""
result = self._values.get("kinesis_streams_input")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.InputSchemaProperty",
jsii_struct_bases=[],
name_mapping={
"record_columns": "recordColumns",
"record_format": "recordFormat",
"record_encoding": "recordEncoding",
},
)
class InputSchemaProperty:
def __init__(
self,
*,
record_columns: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplication.RecordColumnProperty", _IResolvable_9ceae33e]]],
record_format: typing.Union["CfnApplication.RecordFormatProperty", _IResolvable_9ceae33e],
record_encoding: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_columns: ``CfnApplication.InputSchemaProperty.RecordColumns``.
:param record_format: ``CfnApplication.InputSchemaProperty.RecordFormat``.
:param record_encoding: ``CfnApplication.InputSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputschema.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_columns": record_columns,
"record_format": record_format,
}
if record_encoding is not None:
self._values["record_encoding"] = record_encoding
@builtins.property
def record_columns(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplication.RecordColumnProperty", _IResolvable_9ceae33e]]]:
"""``CfnApplication.InputSchemaProperty.RecordColumns``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputschema.html#cfn-kinesisanalytics-application-inputschema-recordcolumns
"""
result = self._values.get("record_columns")
assert result is not None, "Required property 'record_columns' is missing"
return result
@builtins.property
def record_format(
self,
) -> typing.Union["CfnApplication.RecordFormatProperty", _IResolvable_9ceae33e]:
"""``CfnApplication.InputSchemaProperty.RecordFormat``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputschema.html#cfn-kinesisanalytics-application-inputschema-recordformat
"""
result = self._values.get("record_format")
assert result is not None, "Required property 'record_format' is missing"
return result
@builtins.property
def record_encoding(self) -> typing.Optional[builtins.str]:
"""``CfnApplication.InputSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-inputschema.html#cfn-kinesisanalytics-application-inputschema-recordencoding
"""
result = self._values.get("record_encoding")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.JSONMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={"record_row_path": "recordRowPath"},
)
class JSONMappingParametersProperty:
def __init__(self, *, record_row_path: builtins.str) -> None:
"""
:param record_row_path: ``CfnApplication.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-jsonmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_row_path": record_row_path,
}
@builtins.property
def record_row_path(self) -> builtins.str:
"""``CfnApplication.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-jsonmappingparameters.html#cfn-kinesisanalytics-application-jsonmappingparameters-recordrowpath
"""
result = self._values.get("record_row_path")
assert result is not None, "Required property 'record_row_path' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "JSONMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.KinesisFirehoseInputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class KinesisFirehoseInputProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplication.KinesisFirehoseInputProperty.ResourceARN``.
:param role_arn: ``CfnApplication.KinesisFirehoseInputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisfirehoseinput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplication.KinesisFirehoseInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisfirehoseinput.html#cfn-kinesisanalytics-application-kinesisfirehoseinput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplication.KinesisFirehoseInputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisfirehoseinput.html#cfn-kinesisanalytics-application-kinesisfirehoseinput-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisFirehoseInputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.KinesisStreamsInputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class KinesisStreamsInputProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplication.KinesisStreamsInputProperty.ResourceARN``.
:param role_arn: ``CfnApplication.KinesisStreamsInputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisstreamsinput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplication.KinesisStreamsInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisstreamsinput.html#cfn-kinesisanalytics-application-kinesisstreamsinput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplication.KinesisStreamsInputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-kinesisstreamsinput.html#cfn-kinesisanalytics-application-kinesisstreamsinput-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisStreamsInputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.MappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"csv_mapping_parameters": "csvMappingParameters",
"json_mapping_parameters": "jsonMappingParameters",
},
)
class MappingParametersProperty:
def __init__(
self,
*,
csv_mapping_parameters: typing.Optional[typing.Union["CfnApplication.CSVMappingParametersProperty", _IResolvable_9ceae33e]] = None,
json_mapping_parameters: typing.Optional[typing.Union["CfnApplication.JSONMappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param csv_mapping_parameters: ``CfnApplication.MappingParametersProperty.CSVMappingParameters``.
:param json_mapping_parameters: ``CfnApplication.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-mappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if csv_mapping_parameters is not None:
self._values["csv_mapping_parameters"] = csv_mapping_parameters
if json_mapping_parameters is not None:
self._values["json_mapping_parameters"] = json_mapping_parameters
@builtins.property
def csv_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplication.CSVMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.MappingParametersProperty.CSVMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-mappingparameters.html#cfn-kinesisanalytics-application-mappingparameters-csvmappingparameters
"""
result = self._values.get("csv_mapping_parameters")
return result
@builtins.property
def json_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplication.JSONMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-mappingparameters.html#cfn-kinesisanalytics-application-mappingparameters-jsonmappingparameters
"""
result = self._values.get("json_mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "MappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.RecordColumnProperty",
jsii_struct_bases=[],
name_mapping={"name": "name", "sql_type": "sqlType", "mapping": "mapping"},
)
class RecordColumnProperty:
def __init__(
self,
*,
name: builtins.str,
sql_type: builtins.str,
mapping: typing.Optional[builtins.str] = None,
) -> None:
"""
:param name: ``CfnApplication.RecordColumnProperty.Name``.
:param sql_type: ``CfnApplication.RecordColumnProperty.SqlType``.
:param mapping: ``CfnApplication.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordcolumn.html
"""
self._values: typing.Dict[str, typing.Any] = {
"name": name,
"sql_type": sql_type,
}
if mapping is not None:
self._values["mapping"] = mapping
@builtins.property
def name(self) -> builtins.str:
"""``CfnApplication.RecordColumnProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordcolumn.html#cfn-kinesisanalytics-application-recordcolumn-name
"""
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return result
@builtins.property
def sql_type(self) -> builtins.str:
"""``CfnApplication.RecordColumnProperty.SqlType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordcolumn.html#cfn-kinesisanalytics-application-recordcolumn-sqltype
"""
result = self._values.get("sql_type")
assert result is not None, "Required property 'sql_type' is missing"
return result
@builtins.property
def mapping(self) -> typing.Optional[builtins.str]:
"""``CfnApplication.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordcolumn.html#cfn-kinesisanalytics-application-recordcolumn-mapping
"""
result = self._values.get("mapping")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordColumnProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplication.RecordFormatProperty",
jsii_struct_bases=[],
name_mapping={
"record_format_type": "recordFormatType",
"mapping_parameters": "mappingParameters",
},
)
class RecordFormatProperty:
def __init__(
self,
*,
record_format_type: builtins.str,
mapping_parameters: typing.Optional[typing.Union["CfnApplication.MappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param record_format_type: ``CfnApplication.RecordFormatProperty.RecordFormatType``.
:param mapping_parameters: ``CfnApplication.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordformat.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_format_type": record_format_type,
}
if mapping_parameters is not None:
self._values["mapping_parameters"] = mapping_parameters
@builtins.property
def record_format_type(self) -> builtins.str:
"""``CfnApplication.RecordFormatProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordformat.html#cfn-kinesisanalytics-application-recordformat-recordformattype
"""
result = self._values.get("record_format_type")
assert result is not None, "Required property 'record_format_type' is missing"
return result
@builtins.property
def mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplication.MappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplication.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-application-recordformat.html#cfn-kinesisanalytics-application-recordformat-mappingparameters
"""
result = self._values.get("mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordFormatProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationCloudWatchLoggingOptionV2(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationCloudWatchLoggingOptionV2",
):
"""A CloudFormation ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html
:cloudformationResource: AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
application_name: builtins.str,
cloud_watch_logging_option: typing.Union["CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty", _IResolvable_9ceae33e],
) -> None:
"""Create a new ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.ApplicationName``.
:param cloud_watch_logging_option: ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption``.
"""
props = CfnApplicationCloudWatchLoggingOptionV2Props(
application_name=application_name,
cloud_watch_logging_option=cloud_watch_logging_option,
)
jsii.create(CfnApplicationCloudWatchLoggingOptionV2, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: builtins.str) -> None:
jsii.set(self, "applicationName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="cloudWatchLoggingOption")
def cloud_watch_logging_option(
self,
) -> typing.Union["CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty", _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption
"""
return jsii.get(self, "cloudWatchLoggingOption")
@cloud_watch_logging_option.setter # type: ignore
def cloud_watch_logging_option(
self,
value: typing.Union["CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty", _IResolvable_9ceae33e],
) -> None:
jsii.set(self, "cloudWatchLoggingOption", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty",
jsii_struct_bases=[],
name_mapping={"log_stream_arn": "logStreamArn"},
)
class CloudWatchLoggingOptionProperty:
def __init__(self, *, log_stream_arn: builtins.str) -> None:
"""
:param log_stream_arn: ``CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty.LogStreamARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption.html
"""
self._values: typing.Dict[str, typing.Any] = {
"log_stream_arn": log_stream_arn,
}
@builtins.property
def log_stream_arn(self) -> builtins.str:
"""``CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty.LogStreamARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption-logstreamarn
"""
result = self._values.get("log_stream_arn")
assert result is not None, "Required property 'log_stream_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CloudWatchLoggingOptionProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationCloudWatchLoggingOptionV2Props",
jsii_struct_bases=[],
name_mapping={
"application_name": "applicationName",
"cloud_watch_logging_option": "cloudWatchLoggingOption",
},
)
class CfnApplicationCloudWatchLoggingOptionV2Props:
def __init__(
self,
*,
application_name: builtins.str,
cloud_watch_logging_option: typing.Union[CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty, _IResolvable_9ceae33e],
) -> None:
"""Properties for defining a ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption``.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.ApplicationName``.
:param cloud_watch_logging_option: ``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html
"""
self._values: typing.Dict[str, typing.Any] = {
"application_name": application_name,
"cloud_watch_logging_option": cloud_watch_logging_option,
}
@builtins.property
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-applicationname
"""
result = self._values.get("application_name")
assert result is not None, "Required property 'application_name' is missing"
return result
@builtins.property
def cloud_watch_logging_option(
self,
) -> typing.Union[CfnApplicationCloudWatchLoggingOptionV2.CloudWatchLoggingOptionProperty, _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationCloudWatchLoggingOption.CloudWatchLoggingOption``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationcloudwatchloggingoption.html#cfn-kinesisanalyticsv2-applicationcloudwatchloggingoption-cloudwatchloggingoption
"""
result = self._values.get("cloud_watch_logging_option")
assert result is not None, "Required property 'cloud_watch_logging_option' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationCloudWatchLoggingOptionV2Props(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationOutput(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput",
):
"""A CloudFormation ``AWS::KinesisAnalytics::ApplicationOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html
:cloudformationResource: AWS::KinesisAnalytics::ApplicationOutput
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
application_name: builtins.str,
output: typing.Union["CfnApplicationOutput.OutputProperty", _IResolvable_9ceae33e],
) -> None:
"""Create a new ``AWS::KinesisAnalytics::ApplicationOutput``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param application_name: ``AWS::KinesisAnalytics::ApplicationOutput.ApplicationName``.
:param output: ``AWS::KinesisAnalytics::ApplicationOutput.Output``.
"""
props = CfnApplicationOutputProps(
application_name=application_name, output=output
)
jsii.create(CfnApplicationOutput, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalytics::ApplicationOutput.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html#cfn-kinesisanalytics-applicationoutput-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: builtins.str) -> None:
jsii.set(self, "applicationName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="output")
def output(
self,
) -> typing.Union["CfnApplicationOutput.OutputProperty", _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalytics::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html#cfn-kinesisanalytics-applicationoutput-output
"""
return jsii.get(self, "output")
@output.setter # type: ignore
def output(
self,
value: typing.Union["CfnApplicationOutput.OutputProperty", _IResolvable_9ceae33e],
) -> None:
jsii.set(self, "output", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput.DestinationSchemaProperty",
jsii_struct_bases=[],
name_mapping={"record_format_type": "recordFormatType"},
)
class DestinationSchemaProperty:
def __init__(
self,
*,
record_format_type: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_format_type: ``CfnApplicationOutput.DestinationSchemaProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-destinationschema.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if record_format_type is not None:
self._values["record_format_type"] = record_format_type
@builtins.property
def record_format_type(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationOutput.DestinationSchemaProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-destinationschema.html#cfn-kinesisanalytics-applicationoutput-destinationschema-recordformattype
"""
result = self._values.get("record_format_type")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DestinationSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput.KinesisFirehoseOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class KinesisFirehoseOutputProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplicationOutput.KinesisFirehoseOutputProperty.ResourceARN``.
:param role_arn: ``CfnApplicationOutput.KinesisFirehoseOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisfirehoseoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutput.KinesisFirehoseOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisfirehoseoutput.html#cfn-kinesisanalytics-applicationoutput-kinesisfirehoseoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplicationOutput.KinesisFirehoseOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisfirehoseoutput.html#cfn-kinesisanalytics-applicationoutput-kinesisfirehoseoutput-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisFirehoseOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput.KinesisStreamsOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class KinesisStreamsOutputProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplicationOutput.KinesisStreamsOutputProperty.ResourceARN``.
:param role_arn: ``CfnApplicationOutput.KinesisStreamsOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisstreamsoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutput.KinesisStreamsOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisstreamsoutput.html#cfn-kinesisanalytics-applicationoutput-kinesisstreamsoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplicationOutput.KinesisStreamsOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-kinesisstreamsoutput.html#cfn-kinesisanalytics-applicationoutput-kinesisstreamsoutput-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisStreamsOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput.LambdaOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn", "role_arn": "roleArn"},
)
class LambdaOutputProperty:
def __init__(
self,
*,
resource_arn: builtins.str,
role_arn: builtins.str,
) -> None:
"""
:param resource_arn: ``CfnApplicationOutput.LambdaOutputProperty.ResourceARN``.
:param role_arn: ``CfnApplicationOutput.LambdaOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-lambdaoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
"role_arn": role_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutput.LambdaOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-lambdaoutput.html#cfn-kinesisanalytics-applicationoutput-lambdaoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
@builtins.property
def role_arn(self) -> builtins.str:
"""``CfnApplicationOutput.LambdaOutputProperty.RoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-lambdaoutput.html#cfn-kinesisanalytics-applicationoutput-lambdaoutput-rolearn
"""
result = self._values.get("role_arn")
assert result is not None, "Required property 'role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "LambdaOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutput.OutputProperty",
jsii_struct_bases=[],
name_mapping={
"destination_schema": "destinationSchema",
"kinesis_firehose_output": "kinesisFirehoseOutput",
"kinesis_streams_output": "kinesisStreamsOutput",
"lambda_output": "lambdaOutput",
"name": "name",
},
)
class OutputProperty:
def __init__(
self,
*,
destination_schema: typing.Union["CfnApplicationOutput.DestinationSchemaProperty", _IResolvable_9ceae33e],
kinesis_firehose_output: typing.Optional[typing.Union["CfnApplicationOutput.KinesisFirehoseOutputProperty", _IResolvable_9ceae33e]] = None,
kinesis_streams_output: typing.Optional[typing.Union["CfnApplicationOutput.KinesisStreamsOutputProperty", _IResolvable_9ceae33e]] = None,
lambda_output: typing.Optional[typing.Union["CfnApplicationOutput.LambdaOutputProperty", _IResolvable_9ceae33e]] = None,
name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param destination_schema: ``CfnApplicationOutput.OutputProperty.DestinationSchema``.
:param kinesis_firehose_output: ``CfnApplicationOutput.OutputProperty.KinesisFirehoseOutput``.
:param kinesis_streams_output: ``CfnApplicationOutput.OutputProperty.KinesisStreamsOutput``.
:param lambda_output: ``CfnApplicationOutput.OutputProperty.LambdaOutput``.
:param name: ``CfnApplicationOutput.OutputProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html
"""
self._values: typing.Dict[str, typing.Any] = {
"destination_schema": destination_schema,
}
if kinesis_firehose_output is not None:
self._values["kinesis_firehose_output"] = kinesis_firehose_output
if kinesis_streams_output is not None:
self._values["kinesis_streams_output"] = kinesis_streams_output
if lambda_output is not None:
self._values["lambda_output"] = lambda_output
if name is not None:
self._values["name"] = name
@builtins.property
def destination_schema(
self,
) -> typing.Union["CfnApplicationOutput.DestinationSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationOutput.OutputProperty.DestinationSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html#cfn-kinesisanalytics-applicationoutput-output-destinationschema
"""
result = self._values.get("destination_schema")
assert result is not None, "Required property 'destination_schema' is missing"
return result
@builtins.property
def kinesis_firehose_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutput.KinesisFirehoseOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutput.OutputProperty.KinesisFirehoseOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html#cfn-kinesisanalytics-applicationoutput-output-kinesisfirehoseoutput
"""
result = self._values.get("kinesis_firehose_output")
return result
@builtins.property
def kinesis_streams_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutput.KinesisStreamsOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutput.OutputProperty.KinesisStreamsOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html#cfn-kinesisanalytics-applicationoutput-output-kinesisstreamsoutput
"""
result = self._values.get("kinesis_streams_output")
return result
@builtins.property
def lambda_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutput.LambdaOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutput.OutputProperty.LambdaOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html#cfn-kinesisanalytics-applicationoutput-output-lambdaoutput
"""
result = self._values.get("lambda_output")
return result
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationOutput.OutputProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationoutput-output.html#cfn-kinesisanalytics-applicationoutput-output-name
"""
result = self._values.get("name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "OutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputProps",
jsii_struct_bases=[],
name_mapping={"application_name": "applicationName", "output": "output"},
)
class CfnApplicationOutputProps:
def __init__(
self,
*,
application_name: builtins.str,
output: typing.Union[CfnApplicationOutput.OutputProperty, _IResolvable_9ceae33e],
) -> None:
"""Properties for defining a ``AWS::KinesisAnalytics::ApplicationOutput``.
:param application_name: ``AWS::KinesisAnalytics::ApplicationOutput.ApplicationName``.
:param output: ``AWS::KinesisAnalytics::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"application_name": application_name,
"output": output,
}
@builtins.property
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalytics::ApplicationOutput.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html#cfn-kinesisanalytics-applicationoutput-applicationname
"""
result = self._values.get("application_name")
assert result is not None, "Required property 'application_name' is missing"
return result
@builtins.property
def output(
self,
) -> typing.Union[CfnApplicationOutput.OutputProperty, _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalytics::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationoutput.html#cfn-kinesisanalytics-applicationoutput-output
"""
result = self._values.get("output")
assert result is not None, "Required property 'output' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationOutputProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationOutputV2(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2",
):
"""A CloudFormation ``AWS::KinesisAnalyticsV2::ApplicationOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html
:cloudformationResource: AWS::KinesisAnalyticsV2::ApplicationOutput
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
application_name: builtins.str,
output: typing.Union["CfnApplicationOutputV2.OutputProperty", _IResolvable_9ceae33e],
) -> None:
"""Create a new ``AWS::KinesisAnalyticsV2::ApplicationOutput``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationOutput.ApplicationName``.
:param output: ``AWS::KinesisAnalyticsV2::ApplicationOutput.Output``.
"""
props = CfnApplicationOutputV2Props(
application_name=application_name, output=output
)
jsii.create(CfnApplicationOutputV2, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationOutput.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: builtins.str) -> None:
jsii.set(self, "applicationName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="output")
def output(
self,
) -> typing.Union["CfnApplicationOutputV2.OutputProperty", _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-output
"""
return jsii.get(self, "output")
@output.setter # type: ignore
def output(
self,
value: typing.Union["CfnApplicationOutputV2.OutputProperty", _IResolvable_9ceae33e],
) -> None:
jsii.set(self, "output", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2.DestinationSchemaProperty",
jsii_struct_bases=[],
name_mapping={"record_format_type": "recordFormatType"},
)
class DestinationSchemaProperty:
def __init__(
self,
*,
record_format_type: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_format_type: ``CfnApplicationOutputV2.DestinationSchemaProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-destinationschema.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if record_format_type is not None:
self._values["record_format_type"] = record_format_type
@builtins.property
def record_format_type(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationOutputV2.DestinationSchemaProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-destinationschema.html#cfn-kinesisanalyticsv2-applicationoutput-destinationschema-recordformattype
"""
result = self._values.get("record_format_type")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "DestinationSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2.KinesisFirehoseOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class KinesisFirehoseOutputProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationOutputV2.KinesisFirehoseOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutputV2.KinesisFirehoseOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisfirehoseoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisFirehoseOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2.KinesisStreamsOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class KinesisStreamsOutputProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationOutputV2.KinesisStreamsOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutputV2.KinesisStreamsOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput.html#cfn-kinesisanalyticsv2-applicationoutput-kinesisstreamsoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisStreamsOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2.LambdaOutputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class LambdaOutputProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationOutputV2.LambdaOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-lambdaoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationOutputV2.LambdaOutputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-lambdaoutput.html#cfn-kinesisanalyticsv2-applicationoutput-lambdaoutput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "LambdaOutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2.OutputProperty",
jsii_struct_bases=[],
name_mapping={
"destination_schema": "destinationSchema",
"kinesis_firehose_output": "kinesisFirehoseOutput",
"kinesis_streams_output": "kinesisStreamsOutput",
"lambda_output": "lambdaOutput",
"name": "name",
},
)
class OutputProperty:
def __init__(
self,
*,
destination_schema: typing.Union["CfnApplicationOutputV2.DestinationSchemaProperty", _IResolvable_9ceae33e],
kinesis_firehose_output: typing.Optional[typing.Union["CfnApplicationOutputV2.KinesisFirehoseOutputProperty", _IResolvable_9ceae33e]] = None,
kinesis_streams_output: typing.Optional[typing.Union["CfnApplicationOutputV2.KinesisStreamsOutputProperty", _IResolvable_9ceae33e]] = None,
lambda_output: typing.Optional[typing.Union["CfnApplicationOutputV2.LambdaOutputProperty", _IResolvable_9ceae33e]] = None,
name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param destination_schema: ``CfnApplicationOutputV2.OutputProperty.DestinationSchema``.
:param kinesis_firehose_output: ``CfnApplicationOutputV2.OutputProperty.KinesisFirehoseOutput``.
:param kinesis_streams_output: ``CfnApplicationOutputV2.OutputProperty.KinesisStreamsOutput``.
:param lambda_output: ``CfnApplicationOutputV2.OutputProperty.LambdaOutput``.
:param name: ``CfnApplicationOutputV2.OutputProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html
"""
self._values: typing.Dict[str, typing.Any] = {
"destination_schema": destination_schema,
}
if kinesis_firehose_output is not None:
self._values["kinesis_firehose_output"] = kinesis_firehose_output
if kinesis_streams_output is not None:
self._values["kinesis_streams_output"] = kinesis_streams_output
if lambda_output is not None:
self._values["lambda_output"] = lambda_output
if name is not None:
self._values["name"] = name
@builtins.property
def destination_schema(
self,
) -> typing.Union["CfnApplicationOutputV2.DestinationSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationOutputV2.OutputProperty.DestinationSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-destinationschema
"""
result = self._values.get("destination_schema")
assert result is not None, "Required property 'destination_schema' is missing"
return result
@builtins.property
def kinesis_firehose_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutputV2.KinesisFirehoseOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutputV2.OutputProperty.KinesisFirehoseOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisfirehoseoutput
"""
result = self._values.get("kinesis_firehose_output")
return result
@builtins.property
def kinesis_streams_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutputV2.KinesisStreamsOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutputV2.OutputProperty.KinesisStreamsOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-kinesisstreamsoutput
"""
result = self._values.get("kinesis_streams_output")
return result
@builtins.property
def lambda_output(
self,
) -> typing.Optional[typing.Union["CfnApplicationOutputV2.LambdaOutputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationOutputV2.OutputProperty.LambdaOutput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-lambdaoutput
"""
result = self._values.get("lambda_output")
return result
@builtins.property
def name(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationOutputV2.OutputProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationoutput-output.html#cfn-kinesisanalyticsv2-applicationoutput-output-name
"""
result = self._values.get("name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "OutputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationOutputV2Props",
jsii_struct_bases=[],
name_mapping={"application_name": "applicationName", "output": "output"},
)
class CfnApplicationOutputV2Props:
def __init__(
self,
*,
application_name: builtins.str,
output: typing.Union[CfnApplicationOutputV2.OutputProperty, _IResolvable_9ceae33e],
) -> None:
"""Properties for defining a ``AWS::KinesisAnalyticsV2::ApplicationOutput``.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationOutput.ApplicationName``.
:param output: ``AWS::KinesisAnalyticsV2::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"application_name": application_name,
"output": output,
}
@builtins.property
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationOutput.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-applicationname
"""
result = self._values.get("application_name")
assert result is not None, "Required property 'application_name' is missing"
return result
@builtins.property
def output(
self,
) -> typing.Union[CfnApplicationOutputV2.OutputProperty, _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationOutput.Output``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationoutput.html#cfn-kinesisanalyticsv2-applicationoutput-output
"""
result = self._values.get("output")
assert result is not None, "Required property 'output' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationOutputV2Props(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationProps",
jsii_struct_bases=[],
name_mapping={
"inputs": "inputs",
"application_code": "applicationCode",
"application_description": "applicationDescription",
"application_name": "applicationName",
},
)
class CfnApplicationProps:
def __init__(
self,
*,
inputs: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union[CfnApplication.InputProperty, _IResolvable_9ceae33e]]],
application_code: typing.Optional[builtins.str] = None,
application_description: typing.Optional[builtins.str] = None,
application_name: typing.Optional[builtins.str] = None,
) -> None:
"""Properties for defining a ``AWS::KinesisAnalytics::Application``.
:param inputs: ``AWS::KinesisAnalytics::Application.Inputs``.
:param application_code: ``AWS::KinesisAnalytics::Application.ApplicationCode``.
:param application_description: ``AWS::KinesisAnalytics::Application.ApplicationDescription``.
:param application_name: ``AWS::KinesisAnalytics::Application.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html
"""
self._values: typing.Dict[str, typing.Any] = {
"inputs": inputs,
}
if application_code is not None:
self._values["application_code"] = application_code
if application_description is not None:
self._values["application_description"] = application_description
if application_name is not None:
self._values["application_name"] = application_name
@builtins.property
def inputs(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union[CfnApplication.InputProperty, _IResolvable_9ceae33e]]]:
"""``AWS::KinesisAnalytics::Application.Inputs``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-inputs
"""
result = self._values.get("inputs")
assert result is not None, "Required property 'inputs' is missing"
return result
@builtins.property
def application_code(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationCode``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationcode
"""
result = self._values.get("application_code")
return result
@builtins.property
def application_description(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationdescription
"""
result = self._values.get("application_description")
return result
@builtins.property
def application_name(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalytics::Application.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-application.html#cfn-kinesisanalytics-application-applicationname
"""
result = self._values.get("application_name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationReferenceDataSource(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource",
):
"""A CloudFormation ``AWS::KinesisAnalytics::ApplicationReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html
:cloudformationResource: AWS::KinesisAnalytics::ApplicationReferenceDataSource
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
application_name: builtins.str,
reference_data_source: typing.Union["CfnApplicationReferenceDataSource.ReferenceDataSourceProperty", _IResolvable_9ceae33e],
) -> None:
"""Create a new ``AWS::KinesisAnalytics::ApplicationReferenceDataSource``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param application_name: ``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ApplicationName``.
:param reference_data_source: ``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ReferenceDataSource``.
"""
props = CfnApplicationReferenceDataSourceProps(
application_name=application_name,
reference_data_source=reference_data_source,
)
jsii.create(CfnApplicationReferenceDataSource, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: builtins.str) -> None:
jsii.set(self, "applicationName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="referenceDataSource")
def reference_data_source(
self,
) -> typing.Union["CfnApplicationReferenceDataSource.ReferenceDataSourceProperty", _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-referencedatasource
"""
return jsii.get(self, "referenceDataSource")
@reference_data_source.setter # type: ignore
def reference_data_source(
self,
value: typing.Union["CfnApplicationReferenceDataSource.ReferenceDataSourceProperty", _IResolvable_9ceae33e],
) -> None:
jsii.set(self, "referenceDataSource", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.CSVMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"record_column_delimiter": "recordColumnDelimiter",
"record_row_delimiter": "recordRowDelimiter",
},
)
class CSVMappingParametersProperty:
def __init__(
self,
*,
record_column_delimiter: builtins.str,
record_row_delimiter: builtins.str,
) -> None:
"""
:param record_column_delimiter: ``CfnApplicationReferenceDataSource.CSVMappingParametersProperty.RecordColumnDelimiter``.
:param record_row_delimiter: ``CfnApplicationReferenceDataSource.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-csvmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_column_delimiter": record_column_delimiter,
"record_row_delimiter": record_row_delimiter,
}
@builtins.property
def record_column_delimiter(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.CSVMappingParametersProperty.RecordColumnDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalytics-applicationreferencedatasource-csvmappingparameters-recordcolumndelimiter
"""
result = self._values.get("record_column_delimiter")
assert result is not None, "Required property 'record_column_delimiter' is missing"
return result
@builtins.property
def record_row_delimiter(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalytics-applicationreferencedatasource-csvmappingparameters-recordrowdelimiter
"""
result = self._values.get("record_row_delimiter")
assert result is not None, "Required property 'record_row_delimiter' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CSVMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.JSONMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={"record_row_path": "recordRowPath"},
)
class JSONMappingParametersProperty:
def __init__(self, *, record_row_path: builtins.str) -> None:
"""
:param record_row_path: ``CfnApplicationReferenceDataSource.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-jsonmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_row_path": record_row_path,
}
@builtins.property
def record_row_path(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-jsonmappingparameters.html#cfn-kinesisanalytics-applicationreferencedatasource-jsonmappingparameters-recordrowpath
"""
result = self._values.get("record_row_path")
assert result is not None, "Required property 'record_row_path' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "JSONMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.MappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"csv_mapping_parameters": "csvMappingParameters",
"json_mapping_parameters": "jsonMappingParameters",
},
)
class MappingParametersProperty:
def __init__(
self,
*,
csv_mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSource.CSVMappingParametersProperty", _IResolvable_9ceae33e]] = None,
json_mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSource.JSONMappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param csv_mapping_parameters: ``CfnApplicationReferenceDataSource.MappingParametersProperty.CSVMappingParameters``.
:param json_mapping_parameters: ``CfnApplicationReferenceDataSource.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-mappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if csv_mapping_parameters is not None:
self._values["csv_mapping_parameters"] = csv_mapping_parameters
if json_mapping_parameters is not None:
self._values["json_mapping_parameters"] = json_mapping_parameters
@builtins.property
def csv_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSource.CSVMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSource.MappingParametersProperty.CSVMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalytics-applicationreferencedatasource-mappingparameters-csvmappingparameters
"""
result = self._values.get("csv_mapping_parameters")
return result
@builtins.property
def json_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSource.JSONMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSource.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalytics-applicationreferencedatasource-mappingparameters-jsonmappingparameters
"""
result = self._values.get("json_mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "MappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.RecordColumnProperty",
jsii_struct_bases=[],
name_mapping={"name": "name", "sql_type": "sqlType", "mapping": "mapping"},
)
class RecordColumnProperty:
def __init__(
self,
*,
name: builtins.str,
sql_type: builtins.str,
mapping: typing.Optional[builtins.str] = None,
) -> None:
"""
:param name: ``CfnApplicationReferenceDataSource.RecordColumnProperty.Name``.
:param sql_type: ``CfnApplicationReferenceDataSource.RecordColumnProperty.SqlType``.
:param mapping: ``CfnApplicationReferenceDataSource.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordcolumn.html
"""
self._values: typing.Dict[str, typing.Any] = {
"name": name,
"sql_type": sql_type,
}
if mapping is not None:
self._values["mapping"] = mapping
@builtins.property
def name(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.RecordColumnProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalytics-applicationreferencedatasource-recordcolumn-name
"""
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return result
@builtins.property
def sql_type(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.RecordColumnProperty.SqlType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalytics-applicationreferencedatasource-recordcolumn-sqltype
"""
result = self._values.get("sql_type")
assert result is not None, "Required property 'sql_type' is missing"
return result
@builtins.property
def mapping(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSource.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalytics-applicationreferencedatasource-recordcolumn-mapping
"""
result = self._values.get("mapping")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordColumnProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.RecordFormatProperty",
jsii_struct_bases=[],
name_mapping={
"record_format_type": "recordFormatType",
"mapping_parameters": "mappingParameters",
},
)
class RecordFormatProperty:
def __init__(
self,
*,
record_format_type: builtins.str,
mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSource.MappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param record_format_type: ``CfnApplicationReferenceDataSource.RecordFormatProperty.RecordFormatType``.
:param mapping_parameters: ``CfnApplicationReferenceDataSource.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordformat.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_format_type": record_format_type,
}
if mapping_parameters is not None:
self._values["mapping_parameters"] = mapping_parameters
@builtins.property
def record_format_type(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.RecordFormatProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordformat.html#cfn-kinesisanalytics-applicationreferencedatasource-recordformat-recordformattype
"""
result = self._values.get("record_format_type")
assert result is not None, "Required property 'record_format_type' is missing"
return result
@builtins.property
def mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSource.MappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSource.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-recordformat.html#cfn-kinesisanalytics-applicationreferencedatasource-recordformat-mappingparameters
"""
result = self._values.get("mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordFormatProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.ReferenceDataSourceProperty",
jsii_struct_bases=[],
name_mapping={
"reference_schema": "referenceSchema",
"s3_reference_data_source": "s3ReferenceDataSource",
"table_name": "tableName",
},
)
class ReferenceDataSourceProperty:
def __init__(
self,
*,
reference_schema: typing.Union["CfnApplicationReferenceDataSource.ReferenceSchemaProperty", _IResolvable_9ceae33e],
s3_reference_data_source: typing.Optional[typing.Union["CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty", _IResolvable_9ceae33e]] = None,
table_name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param reference_schema: ``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.ReferenceSchema``.
:param s3_reference_data_source: ``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.S3ReferenceDataSource``.
:param table_name: ``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.TableName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"reference_schema": reference_schema,
}
if s3_reference_data_source is not None:
self._values["s3_reference_data_source"] = s3_reference_data_source
if table_name is not None:
self._values["table_name"] = table_name
@builtins.property
def reference_schema(
self,
) -> typing.Union["CfnApplicationReferenceDataSource.ReferenceSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.ReferenceSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-referencedatasource-referenceschema
"""
result = self._values.get("reference_schema")
assert result is not None, "Required property 'reference_schema' is missing"
return result
@builtins.property
def s3_reference_data_source(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.S3ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-referencedatasource-s3referencedatasource
"""
result = self._values.get("s3_reference_data_source")
return result
@builtins.property
def table_name(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSource.ReferenceDataSourceProperty.TableName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-referencedatasource-tablename
"""
result = self._values.get("table_name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ReferenceDataSourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.ReferenceSchemaProperty",
jsii_struct_bases=[],
name_mapping={
"record_columns": "recordColumns",
"record_format": "recordFormat",
"record_encoding": "recordEncoding",
},
)
class ReferenceSchemaProperty:
def __init__(
self,
*,
record_columns: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationReferenceDataSource.RecordColumnProperty", _IResolvable_9ceae33e]]],
record_format: typing.Union["CfnApplicationReferenceDataSource.RecordFormatProperty", _IResolvable_9ceae33e],
record_encoding: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_columns: ``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordColumns``.
:param record_format: ``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordFormat``.
:param record_encoding: ``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referenceschema.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_columns": record_columns,
"record_format": record_format,
}
if record_encoding is not None:
self._values["record_encoding"] = record_encoding
@builtins.property
def record_columns(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationReferenceDataSource.RecordColumnProperty", _IResolvable_9ceae33e]]]:
"""``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordColumns``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalytics-applicationreferencedatasource-referenceschema-recordcolumns
"""
result = self._values.get("record_columns")
assert result is not None, "Required property 'record_columns' is missing"
return result
@builtins.property
def record_format(
self,
) -> typing.Union["CfnApplicationReferenceDataSource.RecordFormatProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordFormat``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalytics-applicationreferencedatasource-referenceschema-recordformat
"""
result = self._values.get("record_format")
assert result is not None, "Required property 'record_format' is missing"
return result
@builtins.property
def record_encoding(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSource.ReferenceSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalytics-applicationreferencedatasource-referenceschema-recordencoding
"""
result = self._values.get("record_encoding")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ReferenceSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty",
jsii_struct_bases=[],
name_mapping={
"bucket_arn": "bucketArn",
"file_key": "fileKey",
"reference_role_arn": "referenceRoleArn",
},
)
class S3ReferenceDataSourceProperty:
def __init__(
self,
*,
bucket_arn: builtins.str,
file_key: builtins.str,
reference_role_arn: builtins.str,
) -> None:
"""
:param bucket_arn: ``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.BucketARN``.
:param file_key: ``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.FileKey``.
:param reference_role_arn: ``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.ReferenceRoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-s3referencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"bucket_arn": bucket_arn,
"file_key": file_key,
"reference_role_arn": reference_role_arn,
}
@builtins.property
def bucket_arn(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.BucketARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-s3referencedatasource-bucketarn
"""
result = self._values.get("bucket_arn")
assert result is not None, "Required property 'bucket_arn' is missing"
return result
@builtins.property
def file_key(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.FileKey``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-s3referencedatasource-filekey
"""
result = self._values.get("file_key")
assert result is not None, "Required property 'file_key' is missing"
return result
@builtins.property
def reference_role_arn(self) -> builtins.str:
"""``CfnApplicationReferenceDataSource.S3ReferenceDataSourceProperty.ReferenceRoleARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalytics-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-s3referencedatasource-referencerolearn
"""
result = self._values.get("reference_role_arn")
assert result is not None, "Required property 'reference_role_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "S3ReferenceDataSourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceProps",
jsii_struct_bases=[],
name_mapping={
"application_name": "applicationName",
"reference_data_source": "referenceDataSource",
},
)
class CfnApplicationReferenceDataSourceProps:
def __init__(
self,
*,
application_name: builtins.str,
reference_data_source: typing.Union[CfnApplicationReferenceDataSource.ReferenceDataSourceProperty, _IResolvable_9ceae33e],
) -> None:
"""Properties for defining a ``AWS::KinesisAnalytics::ApplicationReferenceDataSource``.
:param application_name: ``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ApplicationName``.
:param reference_data_source: ``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"application_name": application_name,
"reference_data_source": reference_data_source,
}
@builtins.property
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-applicationname
"""
result = self._values.get("application_name")
assert result is not None, "Required property 'application_name' is missing"
return result
@builtins.property
def reference_data_source(
self,
) -> typing.Union[CfnApplicationReferenceDataSource.ReferenceDataSourceProperty, _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalytics::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalytics-applicationreferencedatasource.html#cfn-kinesisanalytics-applicationreferencedatasource-referencedatasource
"""
result = self._values.get("reference_data_source")
assert result is not None, "Required property 'reference_data_source' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationReferenceDataSourceProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationReferenceDataSourceV2(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2",
):
"""A CloudFormation ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html
:cloudformationResource: AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
application_name: builtins.str,
reference_data_source: typing.Union["CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty", _IResolvable_9ceae33e],
) -> None:
"""Create a new ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ApplicationName``.
:param reference_data_source: ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource``.
"""
props = CfnApplicationReferenceDataSourceV2Props(
application_name=application_name,
reference_data_source=reference_data_source,
)
jsii.create(CfnApplicationReferenceDataSourceV2, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: builtins.str) -> None:
jsii.set(self, "applicationName", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="referenceDataSource")
def reference_data_source(
self,
) -> typing.Union["CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty", _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource
"""
return jsii.get(self, "referenceDataSource")
@reference_data_source.setter # type: ignore
def reference_data_source(
self,
value: typing.Union["CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty", _IResolvable_9ceae33e],
) -> None:
jsii.set(self, "referenceDataSource", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"record_column_delimiter": "recordColumnDelimiter",
"record_row_delimiter": "recordRowDelimiter",
},
)
class CSVMappingParametersProperty:
def __init__(
self,
*,
record_column_delimiter: builtins.str,
record_row_delimiter: builtins.str,
) -> None:
"""
:param record_column_delimiter: ``CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty.RecordColumnDelimiter``.
:param record_row_delimiter: ``CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_column_delimiter": record_column_delimiter,
"record_row_delimiter": record_row_delimiter,
}
@builtins.property
def record_column_delimiter(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty.RecordColumnDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordcolumndelimiter
"""
result = self._values.get("record_column_delimiter")
assert result is not None, "Required property 'record_column_delimiter' is missing"
return result
@builtins.property
def record_row_delimiter(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-csvmappingparameters-recordrowdelimiter
"""
result = self._values.get("record_row_delimiter")
assert result is not None, "Required property 'record_row_delimiter' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CSVMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.JSONMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={"record_row_path": "recordRowPath"},
)
class JSONMappingParametersProperty:
def __init__(self, *, record_row_path: builtins.str) -> None:
"""
:param record_row_path: ``CfnApplicationReferenceDataSourceV2.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_row_path": record_row_path,
}
@builtins.property
def record_row_path(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-jsonmappingparameters-recordrowpath
"""
result = self._values.get("record_row_path")
assert result is not None, "Required property 'record_row_path' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "JSONMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.MappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"csv_mapping_parameters": "csvMappingParameters",
"json_mapping_parameters": "jsonMappingParameters",
},
)
class MappingParametersProperty:
def __init__(
self,
*,
csv_mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty", _IResolvable_9ceae33e]] = None,
json_mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.JSONMappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param csv_mapping_parameters: ``CfnApplicationReferenceDataSourceV2.MappingParametersProperty.CSVMappingParameters``.
:param json_mapping_parameters: ``CfnApplicationReferenceDataSourceV2.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if csv_mapping_parameters is not None:
self._values["csv_mapping_parameters"] = csv_mapping_parameters
if json_mapping_parameters is not None:
self._values["json_mapping_parameters"] = json_mapping_parameters
@builtins.property
def csv_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.CSVMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSourceV2.MappingParametersProperty.CSVMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-csvmappingparameters
"""
result = self._values.get("csv_mapping_parameters")
return result
@builtins.property
def json_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.JSONMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSourceV2.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-mappingparameters-jsonmappingparameters
"""
result = self._values.get("json_mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "MappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.RecordColumnProperty",
jsii_struct_bases=[],
name_mapping={"name": "name", "sql_type": "sqlType", "mapping": "mapping"},
)
class RecordColumnProperty:
def __init__(
self,
*,
name: builtins.str,
sql_type: builtins.str,
mapping: typing.Optional[builtins.str] = None,
) -> None:
"""
:param name: ``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.Name``.
:param sql_type: ``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.SqlType``.
:param mapping: ``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html
"""
self._values: typing.Dict[str, typing.Any] = {
"name": name,
"sql_type": sql_type,
}
if mapping is not None:
self._values["mapping"] = mapping
@builtins.property
def name(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-name
"""
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return result
@builtins.property
def sql_type(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.SqlType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-sqltype
"""
result = self._values.get("sql_type")
assert result is not None, "Required property 'sql_type' is missing"
return result
@builtins.property
def mapping(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSourceV2.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordcolumn-mapping
"""
result = self._values.get("mapping")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordColumnProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.RecordFormatProperty",
jsii_struct_bases=[],
name_mapping={
"record_format_type": "recordFormatType",
"mapping_parameters": "mappingParameters",
},
)
class RecordFormatProperty:
def __init__(
self,
*,
record_format_type: builtins.str,
mapping_parameters: typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.MappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param record_format_type: ``CfnApplicationReferenceDataSourceV2.RecordFormatProperty.RecordFormatType``.
:param mapping_parameters: ``CfnApplicationReferenceDataSourceV2.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_format_type": record_format_type,
}
if mapping_parameters is not None:
self._values["mapping_parameters"] = mapping_parameters
@builtins.property
def record_format_type(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.RecordFormatProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-recordformattype
"""
result = self._values.get("record_format_type")
assert result is not None, "Required property 'record_format_type' is missing"
return result
@builtins.property
def mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.MappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSourceV2.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-recordformat.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-recordformat-mappingparameters
"""
result = self._values.get("mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordFormatProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty",
jsii_struct_bases=[],
name_mapping={
"reference_schema": "referenceSchema",
"s3_reference_data_source": "s3ReferenceDataSource",
"table_name": "tableName",
},
)
class ReferenceDataSourceProperty:
def __init__(
self,
*,
reference_schema: typing.Union["CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty", _IResolvable_9ceae33e],
s3_reference_data_source: typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty", _IResolvable_9ceae33e]] = None,
table_name: typing.Optional[builtins.str] = None,
) -> None:
"""
:param reference_schema: ``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.ReferenceSchema``.
:param s3_reference_data_source: ``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.S3ReferenceDataSource``.
:param table_name: ``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.TableName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"reference_schema": reference_schema,
}
if s3_reference_data_source is not None:
self._values["s3_reference_data_source"] = s3_reference_data_source
if table_name is not None:
self._values["table_name"] = table_name
@builtins.property
def reference_schema(
self,
) -> typing.Union["CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.ReferenceSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-referenceschema
"""
result = self._values.get("reference_schema")
assert result is not None, "Required property 'reference_schema' is missing"
return result
@builtins.property
def s3_reference_data_source(
self,
) -> typing.Optional[typing.Union["CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.S3ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-s3referencedatasource
"""
result = self._values.get("s3_reference_data_source")
return result
@builtins.property
def table_name(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty.TableName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource-tablename
"""
result = self._values.get("table_name")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ReferenceDataSourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty",
jsii_struct_bases=[],
name_mapping={
"record_columns": "recordColumns",
"record_format": "recordFormat",
"record_encoding": "recordEncoding",
},
)
class ReferenceSchemaProperty:
def __init__(
self,
*,
record_columns: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationReferenceDataSourceV2.RecordColumnProperty", _IResolvable_9ceae33e]]],
record_format: typing.Union["CfnApplicationReferenceDataSourceV2.RecordFormatProperty", _IResolvable_9ceae33e],
record_encoding: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_columns: ``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordColumns``.
:param record_format: ``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordFormat``.
:param record_encoding: ``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_columns": record_columns,
"record_format": record_format,
}
if record_encoding is not None:
self._values["record_encoding"] = record_encoding
@builtins.property
def record_columns(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationReferenceDataSourceV2.RecordColumnProperty", _IResolvable_9ceae33e]]]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordColumns``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordcolumns
"""
result = self._values.get("record_columns")
assert result is not None, "Required property 'record_columns' is missing"
return result
@builtins.property
def record_format(
self,
) -> typing.Union["CfnApplicationReferenceDataSourceV2.RecordFormatProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordFormat``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordformat
"""
result = self._values.get("record_format")
assert result is not None, "Required property 'record_format' is missing"
return result
@builtins.property
def record_encoding(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationReferenceDataSourceV2.ReferenceSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-referenceschema.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referenceschema-recordencoding
"""
result = self._values.get("record_encoding")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ReferenceSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty",
jsii_struct_bases=[],
name_mapping={"bucket_arn": "bucketArn", "file_key": "fileKey"},
)
class S3ReferenceDataSourceProperty:
def __init__(self, *, bucket_arn: builtins.str, file_key: builtins.str) -> None:
"""
:param bucket_arn: ``CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty.BucketARN``.
:param file_key: ``CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty.FileKey``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"bucket_arn": bucket_arn,
"file_key": file_key,
}
@builtins.property
def bucket_arn(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty.BucketARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-bucketarn
"""
result = self._values.get("bucket_arn")
assert result is not None, "Required property 'bucket_arn' is missing"
return result
@builtins.property
def file_key(self) -> builtins.str:
"""``CfnApplicationReferenceDataSourceV2.S3ReferenceDataSourceProperty.FileKey``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-s3referencedatasource-filekey
"""
result = self._values.get("file_key")
assert result is not None, "Required property 'file_key' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "S3ReferenceDataSourceProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationReferenceDataSourceV2Props",
jsii_struct_bases=[],
name_mapping={
"application_name": "applicationName",
"reference_data_source": "referenceDataSource",
},
)
class CfnApplicationReferenceDataSourceV2Props:
def __init__(
self,
*,
application_name: builtins.str,
reference_data_source: typing.Union[CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty, _IResolvable_9ceae33e],
) -> None:
"""Properties for defining a ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource``.
:param application_name: ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ApplicationName``.
:param reference_data_source: ``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html
"""
self._values: typing.Dict[str, typing.Any] = {
"application_name": application_name,
"reference_data_source": reference_data_source,
}
@builtins.property
def application_name(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-applicationname
"""
result = self._values.get("application_name")
assert result is not None, "Required property 'application_name' is missing"
return result
@builtins.property
def reference_data_source(
self,
) -> typing.Union[CfnApplicationReferenceDataSourceV2.ReferenceDataSourceProperty, _IResolvable_9ceae33e]:
"""``AWS::KinesisAnalyticsV2::ApplicationReferenceDataSource.ReferenceDataSource``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-applicationreferencedatasource.html#cfn-kinesisanalyticsv2-applicationreferencedatasource-referencedatasource
"""
result = self._values.get("reference_data_source")
assert result is not None, "Required property 'reference_data_source' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationReferenceDataSourceV2Props(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.implements(_IInspectable_051e6ed8)
class CfnApplicationV2(
_CfnResource_7760e8e4,
metaclass=jsii.JSIIMeta,
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2",
):
"""A CloudFormation ``AWS::KinesisAnalyticsV2::Application``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html
:cloudformationResource: AWS::KinesisAnalyticsV2::Application
"""
def __init__(
self,
scope: _Construct_f50a3f53,
id: builtins.str,
*,
runtime_environment: builtins.str,
service_execution_role: builtins.str,
application_configuration: typing.Optional[typing.Union["CfnApplicationV2.ApplicationConfigurationProperty", _IResolvable_9ceae33e]] = None,
application_description: typing.Optional[builtins.str] = None,
application_name: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.List[_CfnTag_b4661f1a]] = None,
) -> None:
"""Create a new ``AWS::KinesisAnalyticsV2::Application``.
:param scope: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param runtime_environment: ``AWS::KinesisAnalyticsV2::Application.RuntimeEnvironment``.
:param service_execution_role: ``AWS::KinesisAnalyticsV2::Application.ServiceExecutionRole``.
:param application_configuration: ``AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration``.
:param application_description: ``AWS::KinesisAnalyticsV2::Application.ApplicationDescription``.
:param application_name: ``AWS::KinesisAnalyticsV2::Application.ApplicationName``.
:param tags: ``AWS::KinesisAnalyticsV2::Application.Tags``.
"""
props = CfnApplicationV2Props(
runtime_environment=runtime_environment,
service_execution_role=service_execution_role,
application_configuration=application_configuration,
application_description=application_description,
application_name=application_name,
tags=tags,
)
jsii.create(CfnApplicationV2, self, [scope, id, props])
@jsii.member(jsii_name="inspect")
def inspect(self, inspector: _TreeInspector_154f5999) -> None:
"""(experimental) Examines the CloudFormation resource and discloses attributes.
:param inspector: - tree inspector to collect and process attributes.
:stability: experimental
"""
return jsii.invoke(self, "inspect", [inspector])
@jsii.member(jsii_name="renderProperties")
def _render_properties(
self,
props: typing.Mapping[builtins.str, typing.Any],
) -> typing.Mapping[builtins.str, typing.Any]:
"""
:param props: -
"""
return jsii.invoke(self, "renderProperties", [props])
@jsii.python.classproperty # type: ignore
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
"""The CloudFormation resource type name for this resource class."""
return jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME")
@builtins.property # type: ignore
@jsii.member(jsii_name="cfnProperties")
def _cfn_properties(self) -> typing.Mapping[builtins.str, typing.Any]:
return jsii.get(self, "cfnProperties")
@builtins.property # type: ignore
@jsii.member(jsii_name="tags")
def tags(self) -> _TagManager_2508893f:
"""``AWS::KinesisAnalyticsV2::Application.Tags``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-tags
"""
return jsii.get(self, "tags")
@builtins.property # type: ignore
@jsii.member(jsii_name="runtimeEnvironment")
def runtime_environment(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::Application.RuntimeEnvironment``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-runtimeenvironment
"""
return jsii.get(self, "runtimeEnvironment")
@runtime_environment.setter # type: ignore
def runtime_environment(self, value: builtins.str) -> None:
jsii.set(self, "runtimeEnvironment", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="serviceExecutionRole")
def service_execution_role(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::Application.ServiceExecutionRole``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-serviceexecutionrole
"""
return jsii.get(self, "serviceExecutionRole")
@service_execution_role.setter # type: ignore
def service_execution_role(self, value: builtins.str) -> None:
jsii.set(self, "serviceExecutionRole", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationConfiguration")
def application_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.ApplicationConfigurationProperty", _IResolvable_9ceae33e]]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationconfiguration
"""
return jsii.get(self, "applicationConfiguration")
@application_configuration.setter # type: ignore
def application_configuration(
self,
value: typing.Optional[typing.Union["CfnApplicationV2.ApplicationConfigurationProperty", _IResolvable_9ceae33e]],
) -> None:
jsii.set(self, "applicationConfiguration", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationDescription")
def application_description(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationdescription
"""
return jsii.get(self, "applicationDescription")
@application_description.setter # type: ignore
def application_description(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "applicationDescription", value)
@builtins.property # type: ignore
@jsii.member(jsii_name="applicationName")
def application_name(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationname
"""
return jsii.get(self, "applicationName")
@application_name.setter # type: ignore
def application_name(self, value: typing.Optional[builtins.str]) -> None:
jsii.set(self, "applicationName", value)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.ApplicationCodeConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"code_content": "codeContent",
"code_content_type": "codeContentType",
},
)
class ApplicationCodeConfigurationProperty:
def __init__(
self,
*,
code_content: typing.Union["CfnApplicationV2.CodeContentProperty", _IResolvable_9ceae33e],
code_content_type: builtins.str,
) -> None:
"""
:param code_content: ``CfnApplicationV2.ApplicationCodeConfigurationProperty.CodeContent``.
:param code_content_type: ``CfnApplicationV2.ApplicationCodeConfigurationProperty.CodeContentType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {
"code_content": code_content,
"code_content_type": code_content_type,
}
@builtins.property
def code_content(
self,
) -> typing.Union["CfnApplicationV2.CodeContentProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationV2.ApplicationCodeConfigurationProperty.CodeContent``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontent
"""
result = self._values.get("code_content")
assert result is not None, "Required property 'code_content' is missing"
return result
@builtins.property
def code_content_type(self) -> builtins.str:
"""``CfnApplicationV2.ApplicationCodeConfigurationProperty.CodeContentType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationcodeconfiguration.html#cfn-kinesisanalyticsv2-application-applicationcodeconfiguration-codecontenttype
"""
result = self._values.get("code_content_type")
assert result is not None, "Required property 'code_content_type' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ApplicationCodeConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.ApplicationConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"application_code_configuration": "applicationCodeConfiguration",
"application_snapshot_configuration": "applicationSnapshotConfiguration",
"environment_properties": "environmentProperties",
"flink_application_configuration": "flinkApplicationConfiguration",
"sql_application_configuration": "sqlApplicationConfiguration",
},
)
class ApplicationConfigurationProperty:
def __init__(
self,
*,
application_code_configuration: typing.Optional[typing.Union["CfnApplicationV2.ApplicationCodeConfigurationProperty", _IResolvable_9ceae33e]] = None,
application_snapshot_configuration: typing.Optional[typing.Union["CfnApplicationV2.ApplicationSnapshotConfigurationProperty", _IResolvable_9ceae33e]] = None,
environment_properties: typing.Optional[typing.Union["CfnApplicationV2.EnvironmentPropertiesProperty", _IResolvable_9ceae33e]] = None,
flink_application_configuration: typing.Optional[typing.Union["CfnApplicationV2.FlinkApplicationConfigurationProperty", _IResolvable_9ceae33e]] = None,
sql_application_configuration: typing.Optional[typing.Union["CfnApplicationV2.SqlApplicationConfigurationProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param application_code_configuration: ``CfnApplicationV2.ApplicationConfigurationProperty.ApplicationCodeConfiguration``.
:param application_snapshot_configuration: ``CfnApplicationV2.ApplicationConfigurationProperty.ApplicationSnapshotConfiguration``.
:param environment_properties: ``CfnApplicationV2.ApplicationConfigurationProperty.EnvironmentProperties``.
:param flink_application_configuration: ``CfnApplicationV2.ApplicationConfigurationProperty.FlinkApplicationConfiguration``.
:param sql_application_configuration: ``CfnApplicationV2.ApplicationConfigurationProperty.SqlApplicationConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if application_code_configuration is not None:
self._values["application_code_configuration"] = application_code_configuration
if application_snapshot_configuration is not None:
self._values["application_snapshot_configuration"] = application_snapshot_configuration
if environment_properties is not None:
self._values["environment_properties"] = environment_properties
if flink_application_configuration is not None:
self._values["flink_application_configuration"] = flink_application_configuration
if sql_application_configuration is not None:
self._values["sql_application_configuration"] = sql_application_configuration
@builtins.property
def application_code_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.ApplicationCodeConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ApplicationConfigurationProperty.ApplicationCodeConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationcodeconfiguration
"""
result = self._values.get("application_code_configuration")
return result
@builtins.property
def application_snapshot_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.ApplicationSnapshotConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ApplicationConfigurationProperty.ApplicationSnapshotConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-applicationsnapshotconfiguration
"""
result = self._values.get("application_snapshot_configuration")
return result
@builtins.property
def environment_properties(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.EnvironmentPropertiesProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ApplicationConfigurationProperty.EnvironmentProperties``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-environmentproperties
"""
result = self._values.get("environment_properties")
return result
@builtins.property
def flink_application_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.FlinkApplicationConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ApplicationConfigurationProperty.FlinkApplicationConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-flinkapplicationconfiguration
"""
result = self._values.get("flink_application_configuration")
return result
@builtins.property
def sql_application_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.SqlApplicationConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ApplicationConfigurationProperty.SqlApplicationConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationconfiguration.html#cfn-kinesisanalyticsv2-application-applicationconfiguration-sqlapplicationconfiguration
"""
result = self._values.get("sql_application_configuration")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ApplicationConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.ApplicationSnapshotConfigurationProperty",
jsii_struct_bases=[],
name_mapping={"snapshots_enabled": "snapshotsEnabled"},
)
class ApplicationSnapshotConfigurationProperty:
def __init__(
self,
*,
snapshots_enabled: typing.Union[builtins.bool, _IResolvable_9ceae33e],
) -> None:
"""
:param snapshots_enabled: ``CfnApplicationV2.ApplicationSnapshotConfigurationProperty.SnapshotsEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsnapshotconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {
"snapshots_enabled": snapshots_enabled,
}
@builtins.property
def snapshots_enabled(
self,
) -> typing.Union[builtins.bool, _IResolvable_9ceae33e]:
"""``CfnApplicationV2.ApplicationSnapshotConfigurationProperty.SnapshotsEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-applicationsnapshotconfiguration.html#cfn-kinesisanalyticsv2-application-applicationsnapshotconfiguration-snapshotsenabled
"""
result = self._values.get("snapshots_enabled")
assert result is not None, "Required property 'snapshots_enabled' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ApplicationSnapshotConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.CSVMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"record_column_delimiter": "recordColumnDelimiter",
"record_row_delimiter": "recordRowDelimiter",
},
)
class CSVMappingParametersProperty:
def __init__(
self,
*,
record_column_delimiter: builtins.str,
record_row_delimiter: builtins.str,
) -> None:
"""
:param record_column_delimiter: ``CfnApplicationV2.CSVMappingParametersProperty.RecordColumnDelimiter``.
:param record_row_delimiter: ``CfnApplicationV2.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_column_delimiter": record_column_delimiter,
"record_row_delimiter": record_row_delimiter,
}
@builtins.property
def record_column_delimiter(self) -> builtins.str:
"""``CfnApplicationV2.CSVMappingParametersProperty.RecordColumnDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordcolumndelimiter
"""
result = self._values.get("record_column_delimiter")
assert result is not None, "Required property 'record_column_delimiter' is missing"
return result
@builtins.property
def record_row_delimiter(self) -> builtins.str:
"""``CfnApplicationV2.CSVMappingParametersProperty.RecordRowDelimiter``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-csvmappingparameters.html#cfn-kinesisanalyticsv2-application-csvmappingparameters-recordrowdelimiter
"""
result = self._values.get("record_row_delimiter")
assert result is not None, "Required property 'record_row_delimiter' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CSVMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.CheckpointConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"configuration_type": "configurationType",
"checkpointing_enabled": "checkpointingEnabled",
"checkpoint_interval": "checkpointInterval",
"min_pause_between_checkpoints": "minPauseBetweenCheckpoints",
},
)
class CheckpointConfigurationProperty:
def __init__(
self,
*,
configuration_type: builtins.str,
checkpointing_enabled: typing.Optional[typing.Union[builtins.bool, _IResolvable_9ceae33e]] = None,
checkpoint_interval: typing.Optional[jsii.Number] = None,
min_pause_between_checkpoints: typing.Optional[jsii.Number] = None,
) -> None:
"""
:param configuration_type: ``CfnApplicationV2.CheckpointConfigurationProperty.ConfigurationType``.
:param checkpointing_enabled: ``CfnApplicationV2.CheckpointConfigurationProperty.CheckpointingEnabled``.
:param checkpoint_interval: ``CfnApplicationV2.CheckpointConfigurationProperty.CheckpointInterval``.
:param min_pause_between_checkpoints: ``CfnApplicationV2.CheckpointConfigurationProperty.MinPauseBetweenCheckpoints``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {
"configuration_type": configuration_type,
}
if checkpointing_enabled is not None:
self._values["checkpointing_enabled"] = checkpointing_enabled
if checkpoint_interval is not None:
self._values["checkpoint_interval"] = checkpoint_interval
if min_pause_between_checkpoints is not None:
self._values["min_pause_between_checkpoints"] = min_pause_between_checkpoints
@builtins.property
def configuration_type(self) -> builtins.str:
"""``CfnApplicationV2.CheckpointConfigurationProperty.ConfigurationType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-configurationtype
"""
result = self._values.get("configuration_type")
assert result is not None, "Required property 'configuration_type' is missing"
return result
@builtins.property
def checkpointing_enabled(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.CheckpointConfigurationProperty.CheckpointingEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointingenabled
"""
result = self._values.get("checkpointing_enabled")
return result
@builtins.property
def checkpoint_interval(self) -> typing.Optional[jsii.Number]:
"""``CfnApplicationV2.CheckpointConfigurationProperty.CheckpointInterval``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-checkpointinterval
"""
result = self._values.get("checkpoint_interval")
return result
@builtins.property
def min_pause_between_checkpoints(self) -> typing.Optional[jsii.Number]:
"""``CfnApplicationV2.CheckpointConfigurationProperty.MinPauseBetweenCheckpoints``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-checkpointconfiguration.html#cfn-kinesisanalyticsv2-application-checkpointconfiguration-minpausebetweencheckpoints
"""
result = self._values.get("min_pause_between_checkpoints")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CheckpointConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.CodeContentProperty",
jsii_struct_bases=[],
name_mapping={
"s3_content_location": "s3ContentLocation",
"text_content": "textContent",
"zip_file_content": "zipFileContent",
},
)
class CodeContentProperty:
def __init__(
self,
*,
s3_content_location: typing.Optional[typing.Union["CfnApplicationV2.S3ContentLocationProperty", _IResolvable_9ceae33e]] = None,
text_content: typing.Optional[builtins.str] = None,
zip_file_content: typing.Optional[builtins.str] = None,
) -> None:
"""
:param s3_content_location: ``CfnApplicationV2.CodeContentProperty.S3ContentLocation``.
:param text_content: ``CfnApplicationV2.CodeContentProperty.TextContent``.
:param zip_file_content: ``CfnApplicationV2.CodeContentProperty.ZipFileContent``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if s3_content_location is not None:
self._values["s3_content_location"] = s3_content_location
if text_content is not None:
self._values["text_content"] = text_content
if zip_file_content is not None:
self._values["zip_file_content"] = zip_file_content
@builtins.property
def s3_content_location(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.S3ContentLocationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.CodeContentProperty.S3ContentLocation``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-s3contentlocation
"""
result = self._values.get("s3_content_location")
return result
@builtins.property
def text_content(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.CodeContentProperty.TextContent``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-textcontent
"""
result = self._values.get("text_content")
return result
@builtins.property
def zip_file_content(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.CodeContentProperty.ZipFileContent``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-codecontent.html#cfn-kinesisanalyticsv2-application-codecontent-zipfilecontent
"""
result = self._values.get("zip_file_content")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CodeContentProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.EnvironmentPropertiesProperty",
jsii_struct_bases=[],
name_mapping={"property_groups": "propertyGroups"},
)
class EnvironmentPropertiesProperty:
def __init__(
self,
*,
property_groups: typing.Optional[typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.PropertyGroupProperty", _IResolvable_9ceae33e]]]] = None,
) -> None:
"""
:param property_groups: ``CfnApplicationV2.EnvironmentPropertiesProperty.PropertyGroups``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-environmentproperties.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if property_groups is not None:
self._values["property_groups"] = property_groups
@builtins.property
def property_groups(
self,
) -> typing.Optional[typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.PropertyGroupProperty", _IResolvable_9ceae33e]]]]:
"""``CfnApplicationV2.EnvironmentPropertiesProperty.PropertyGroups``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-environmentproperties.html#cfn-kinesisanalyticsv2-application-environmentproperties-propertygroups
"""
result = self._values.get("property_groups")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "EnvironmentPropertiesProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.FlinkApplicationConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"checkpoint_configuration": "checkpointConfiguration",
"monitoring_configuration": "monitoringConfiguration",
"parallelism_configuration": "parallelismConfiguration",
},
)
class FlinkApplicationConfigurationProperty:
def __init__(
self,
*,
checkpoint_configuration: typing.Optional[typing.Union["CfnApplicationV2.CheckpointConfigurationProperty", _IResolvable_9ceae33e]] = None,
monitoring_configuration: typing.Optional[typing.Union["CfnApplicationV2.MonitoringConfigurationProperty", _IResolvable_9ceae33e]] = None,
parallelism_configuration: typing.Optional[typing.Union["CfnApplicationV2.ParallelismConfigurationProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param checkpoint_configuration: ``CfnApplicationV2.FlinkApplicationConfigurationProperty.CheckpointConfiguration``.
:param monitoring_configuration: ``CfnApplicationV2.FlinkApplicationConfigurationProperty.MonitoringConfiguration``.
:param parallelism_configuration: ``CfnApplicationV2.FlinkApplicationConfigurationProperty.ParallelismConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if checkpoint_configuration is not None:
self._values["checkpoint_configuration"] = checkpoint_configuration
if monitoring_configuration is not None:
self._values["monitoring_configuration"] = monitoring_configuration
if parallelism_configuration is not None:
self._values["parallelism_configuration"] = parallelism_configuration
@builtins.property
def checkpoint_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.CheckpointConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.FlinkApplicationConfigurationProperty.CheckpointConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-checkpointconfiguration
"""
result = self._values.get("checkpoint_configuration")
return result
@builtins.property
def monitoring_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.MonitoringConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.FlinkApplicationConfigurationProperty.MonitoringConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-monitoringconfiguration
"""
result = self._values.get("monitoring_configuration")
return result
@builtins.property
def parallelism_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.ParallelismConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.FlinkApplicationConfigurationProperty.ParallelismConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-flinkapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-flinkapplicationconfiguration-parallelismconfiguration
"""
result = self._values.get("parallelism_configuration")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "FlinkApplicationConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.InputLambdaProcessorProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class InputLambdaProcessorProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationV2.InputLambdaProcessorProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputlambdaprocessor.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationV2.InputLambdaProcessorProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputlambdaprocessor.html#cfn-kinesisanalyticsv2-application-inputlambdaprocessor-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputLambdaProcessorProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.InputParallelismProperty",
jsii_struct_bases=[],
name_mapping={"count": "count"},
)
class InputParallelismProperty:
def __init__(self, *, count: typing.Optional[jsii.Number] = None) -> None:
"""
:param count: ``CfnApplicationV2.InputParallelismProperty.Count``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputparallelism.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if count is not None:
self._values["count"] = count
@builtins.property
def count(self) -> typing.Optional[jsii.Number]:
"""``CfnApplicationV2.InputParallelismProperty.Count``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputparallelism.html#cfn-kinesisanalyticsv2-application-inputparallelism-count
"""
result = self._values.get("count")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputParallelismProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.InputProcessingConfigurationProperty",
jsii_struct_bases=[],
name_mapping={"input_lambda_processor": "inputLambdaProcessor"},
)
class InputProcessingConfigurationProperty:
def __init__(
self,
*,
input_lambda_processor: typing.Optional[typing.Union["CfnApplicationV2.InputLambdaProcessorProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param input_lambda_processor: ``CfnApplicationV2.InputProcessingConfigurationProperty.InputLambdaProcessor``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputprocessingconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if input_lambda_processor is not None:
self._values["input_lambda_processor"] = input_lambda_processor
@builtins.property
def input_lambda_processor(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.InputLambdaProcessorProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.InputProcessingConfigurationProperty.InputLambdaProcessor``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputprocessingconfiguration.html#cfn-kinesisanalyticsv2-application-inputprocessingconfiguration-inputlambdaprocessor
"""
result = self._values.get("input_lambda_processor")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputProcessingConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.InputProperty",
jsii_struct_bases=[],
name_mapping={
"input_schema": "inputSchema",
"name_prefix": "namePrefix",
"input_parallelism": "inputParallelism",
"input_processing_configuration": "inputProcessingConfiguration",
"kinesis_firehose_input": "kinesisFirehoseInput",
"kinesis_streams_input": "kinesisStreamsInput",
},
)
class InputProperty:
def __init__(
self,
*,
input_schema: typing.Union["CfnApplicationV2.InputSchemaProperty", _IResolvable_9ceae33e],
name_prefix: builtins.str,
input_parallelism: typing.Optional[typing.Union["CfnApplicationV2.InputParallelismProperty", _IResolvable_9ceae33e]] = None,
input_processing_configuration: typing.Optional[typing.Union["CfnApplicationV2.InputProcessingConfigurationProperty", _IResolvable_9ceae33e]] = None,
kinesis_firehose_input: typing.Optional[typing.Union["CfnApplicationV2.KinesisFirehoseInputProperty", _IResolvable_9ceae33e]] = None,
kinesis_streams_input: typing.Optional[typing.Union["CfnApplicationV2.KinesisStreamsInputProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param input_schema: ``CfnApplicationV2.InputProperty.InputSchema``.
:param name_prefix: ``CfnApplicationV2.InputProperty.NamePrefix``.
:param input_parallelism: ``CfnApplicationV2.InputProperty.InputParallelism``.
:param input_processing_configuration: ``CfnApplicationV2.InputProperty.InputProcessingConfiguration``.
:param kinesis_firehose_input: ``CfnApplicationV2.InputProperty.KinesisFirehoseInput``.
:param kinesis_streams_input: ``CfnApplicationV2.InputProperty.KinesisStreamsInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html
"""
self._values: typing.Dict[str, typing.Any] = {
"input_schema": input_schema,
"name_prefix": name_prefix,
}
if input_parallelism is not None:
self._values["input_parallelism"] = input_parallelism
if input_processing_configuration is not None:
self._values["input_processing_configuration"] = input_processing_configuration
if kinesis_firehose_input is not None:
self._values["kinesis_firehose_input"] = kinesis_firehose_input
if kinesis_streams_input is not None:
self._values["kinesis_streams_input"] = kinesis_streams_input
@builtins.property
def input_schema(
self,
) -> typing.Union["CfnApplicationV2.InputSchemaProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationV2.InputProperty.InputSchema``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputschema
"""
result = self._values.get("input_schema")
assert result is not None, "Required property 'input_schema' is missing"
return result
@builtins.property
def name_prefix(self) -> builtins.str:
"""``CfnApplicationV2.InputProperty.NamePrefix``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-nameprefix
"""
result = self._values.get("name_prefix")
assert result is not None, "Required property 'name_prefix' is missing"
return result
@builtins.property
def input_parallelism(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.InputParallelismProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.InputProperty.InputParallelism``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputparallelism
"""
result = self._values.get("input_parallelism")
return result
@builtins.property
def input_processing_configuration(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.InputProcessingConfigurationProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.InputProperty.InputProcessingConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-inputprocessingconfiguration
"""
result = self._values.get("input_processing_configuration")
return result
@builtins.property
def kinesis_firehose_input(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.KinesisFirehoseInputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.InputProperty.KinesisFirehoseInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisfirehoseinput
"""
result = self._values.get("kinesis_firehose_input")
return result
@builtins.property
def kinesis_streams_input(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.KinesisStreamsInputProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.InputProperty.KinesisStreamsInput``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-input.html#cfn-kinesisanalyticsv2-application-input-kinesisstreamsinput
"""
result = self._values.get("kinesis_streams_input")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.InputSchemaProperty",
jsii_struct_bases=[],
name_mapping={
"record_columns": "recordColumns",
"record_format": "recordFormat",
"record_encoding": "recordEncoding",
},
)
class InputSchemaProperty:
def __init__(
self,
*,
record_columns: typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.RecordColumnProperty", _IResolvable_9ceae33e]]],
record_format: typing.Union["CfnApplicationV2.RecordFormatProperty", _IResolvable_9ceae33e],
record_encoding: typing.Optional[builtins.str] = None,
) -> None:
"""
:param record_columns: ``CfnApplicationV2.InputSchemaProperty.RecordColumns``.
:param record_format: ``CfnApplicationV2.InputSchemaProperty.RecordFormat``.
:param record_encoding: ``CfnApplicationV2.InputSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_columns": record_columns,
"record_format": record_format,
}
if record_encoding is not None:
self._values["record_encoding"] = record_encoding
@builtins.property
def record_columns(
self,
) -> typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.RecordColumnProperty", _IResolvable_9ceae33e]]]:
"""``CfnApplicationV2.InputSchemaProperty.RecordColumns``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordcolumns
"""
result = self._values.get("record_columns")
assert result is not None, "Required property 'record_columns' is missing"
return result
@builtins.property
def record_format(
self,
) -> typing.Union["CfnApplicationV2.RecordFormatProperty", _IResolvable_9ceae33e]:
"""``CfnApplicationV2.InputSchemaProperty.RecordFormat``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordformat
"""
result = self._values.get("record_format")
assert result is not None, "Required property 'record_format' is missing"
return result
@builtins.property
def record_encoding(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.InputSchemaProperty.RecordEncoding``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-inputschema.html#cfn-kinesisanalyticsv2-application-inputschema-recordencoding
"""
result = self._values.get("record_encoding")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "InputSchemaProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.JSONMappingParametersProperty",
jsii_struct_bases=[],
name_mapping={"record_row_path": "recordRowPath"},
)
class JSONMappingParametersProperty:
def __init__(self, *, record_row_path: builtins.str) -> None:
"""
:param record_row_path: ``CfnApplicationV2.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-jsonmappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_row_path": record_row_path,
}
@builtins.property
def record_row_path(self) -> builtins.str:
"""``CfnApplicationV2.JSONMappingParametersProperty.RecordRowPath``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-jsonmappingparameters.html#cfn-kinesisanalyticsv2-application-jsonmappingparameters-recordrowpath
"""
result = self._values.get("record_row_path")
assert result is not None, "Required property 'record_row_path' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "JSONMappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.KinesisFirehoseInputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class KinesisFirehoseInputProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationV2.KinesisFirehoseInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisfirehoseinput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationV2.KinesisFirehoseInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisfirehoseinput.html#cfn-kinesisanalyticsv2-application-kinesisfirehoseinput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisFirehoseInputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.KinesisStreamsInputProperty",
jsii_struct_bases=[],
name_mapping={"resource_arn": "resourceArn"},
)
class KinesisStreamsInputProperty:
def __init__(self, *, resource_arn: builtins.str) -> None:
"""
:param resource_arn: ``CfnApplicationV2.KinesisStreamsInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisstreamsinput.html
"""
self._values: typing.Dict[str, typing.Any] = {
"resource_arn": resource_arn,
}
@builtins.property
def resource_arn(self) -> builtins.str:
"""``CfnApplicationV2.KinesisStreamsInputProperty.ResourceARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-kinesisstreamsinput.html#cfn-kinesisanalyticsv2-application-kinesisstreamsinput-resourcearn
"""
result = self._values.get("resource_arn")
assert result is not None, "Required property 'resource_arn' is missing"
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "KinesisStreamsInputProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.MappingParametersProperty",
jsii_struct_bases=[],
name_mapping={
"csv_mapping_parameters": "csvMappingParameters",
"json_mapping_parameters": "jsonMappingParameters",
},
)
class MappingParametersProperty:
def __init__(
self,
*,
csv_mapping_parameters: typing.Optional[typing.Union["CfnApplicationV2.CSVMappingParametersProperty", _IResolvable_9ceae33e]] = None,
json_mapping_parameters: typing.Optional[typing.Union["CfnApplicationV2.JSONMappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param csv_mapping_parameters: ``CfnApplicationV2.MappingParametersProperty.CSVMappingParameters``.
:param json_mapping_parameters: ``CfnApplicationV2.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if csv_mapping_parameters is not None:
self._values["csv_mapping_parameters"] = csv_mapping_parameters
if json_mapping_parameters is not None:
self._values["json_mapping_parameters"] = json_mapping_parameters
@builtins.property
def csv_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.CSVMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.MappingParametersProperty.CSVMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-csvmappingparameters
"""
result = self._values.get("csv_mapping_parameters")
return result
@builtins.property
def json_mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.JSONMappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.MappingParametersProperty.JSONMappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-mappingparameters.html#cfn-kinesisanalyticsv2-application-mappingparameters-jsonmappingparameters
"""
result = self._values.get("json_mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "MappingParametersProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.MonitoringConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"configuration_type": "configurationType",
"log_level": "logLevel",
"metrics_level": "metricsLevel",
},
)
class MonitoringConfigurationProperty:
def __init__(
self,
*,
configuration_type: builtins.str,
log_level: typing.Optional[builtins.str] = None,
metrics_level: typing.Optional[builtins.str] = None,
) -> None:
"""
:param configuration_type: ``CfnApplicationV2.MonitoringConfigurationProperty.ConfigurationType``.
:param log_level: ``CfnApplicationV2.MonitoringConfigurationProperty.LogLevel``.
:param metrics_level: ``CfnApplicationV2.MonitoringConfigurationProperty.MetricsLevel``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {
"configuration_type": configuration_type,
}
if log_level is not None:
self._values["log_level"] = log_level
if metrics_level is not None:
self._values["metrics_level"] = metrics_level
@builtins.property
def configuration_type(self) -> builtins.str:
"""``CfnApplicationV2.MonitoringConfigurationProperty.ConfigurationType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-configurationtype
"""
result = self._values.get("configuration_type")
assert result is not None, "Required property 'configuration_type' is missing"
return result
@builtins.property
def log_level(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.MonitoringConfigurationProperty.LogLevel``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-loglevel
"""
result = self._values.get("log_level")
return result
@builtins.property
def metrics_level(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.MonitoringConfigurationProperty.MetricsLevel``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-monitoringconfiguration.html#cfn-kinesisanalyticsv2-application-monitoringconfiguration-metricslevel
"""
result = self._values.get("metrics_level")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "MonitoringConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.ParallelismConfigurationProperty",
jsii_struct_bases=[],
name_mapping={
"configuration_type": "configurationType",
"auto_scaling_enabled": "autoScalingEnabled",
"parallelism": "parallelism",
"parallelism_per_kpu": "parallelismPerKpu",
},
)
class ParallelismConfigurationProperty:
def __init__(
self,
*,
configuration_type: builtins.str,
auto_scaling_enabled: typing.Optional[typing.Union[builtins.bool, _IResolvable_9ceae33e]] = None,
parallelism: typing.Optional[jsii.Number] = None,
parallelism_per_kpu: typing.Optional[jsii.Number] = None,
) -> None:
"""
:param configuration_type: ``CfnApplicationV2.ParallelismConfigurationProperty.ConfigurationType``.
:param auto_scaling_enabled: ``CfnApplicationV2.ParallelismConfigurationProperty.AutoScalingEnabled``.
:param parallelism: ``CfnApplicationV2.ParallelismConfigurationProperty.Parallelism``.
:param parallelism_per_kpu: ``CfnApplicationV2.ParallelismConfigurationProperty.ParallelismPerKPU``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {
"configuration_type": configuration_type,
}
if auto_scaling_enabled is not None:
self._values["auto_scaling_enabled"] = auto_scaling_enabled
if parallelism is not None:
self._values["parallelism"] = parallelism
if parallelism_per_kpu is not None:
self._values["parallelism_per_kpu"] = parallelism_per_kpu
@builtins.property
def configuration_type(self) -> builtins.str:
"""``CfnApplicationV2.ParallelismConfigurationProperty.ConfigurationType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-configurationtype
"""
result = self._values.get("configuration_type")
assert result is not None, "Required property 'configuration_type' is missing"
return result
@builtins.property
def auto_scaling_enabled(
self,
) -> typing.Optional[typing.Union[builtins.bool, _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.ParallelismConfigurationProperty.AutoScalingEnabled``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-autoscalingenabled
"""
result = self._values.get("auto_scaling_enabled")
return result
@builtins.property
def parallelism(self) -> typing.Optional[jsii.Number]:
"""``CfnApplicationV2.ParallelismConfigurationProperty.Parallelism``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelism
"""
result = self._values.get("parallelism")
return result
@builtins.property
def parallelism_per_kpu(self) -> typing.Optional[jsii.Number]:
"""``CfnApplicationV2.ParallelismConfigurationProperty.ParallelismPerKPU``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-parallelismconfiguration.html#cfn-kinesisanalyticsv2-application-parallelismconfiguration-parallelismperkpu
"""
result = self._values.get("parallelism_per_kpu")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "ParallelismConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.PropertyGroupProperty",
jsii_struct_bases=[],
name_mapping={
"property_group_id": "propertyGroupId",
"property_map": "propertyMap",
},
)
class PropertyGroupProperty:
def __init__(
self,
*,
property_group_id: typing.Optional[builtins.str] = None,
property_map: typing.Any = None,
) -> None:
"""
:param property_group_id: ``CfnApplicationV2.PropertyGroupProperty.PropertyGroupId``.
:param property_map: ``CfnApplicationV2.PropertyGroupProperty.PropertyMap``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if property_group_id is not None:
self._values["property_group_id"] = property_group_id
if property_map is not None:
self._values["property_map"] = property_map
@builtins.property
def property_group_id(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.PropertyGroupProperty.PropertyGroupId``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertygroupid
"""
result = self._values.get("property_group_id")
return result
@builtins.property
def property_map(self) -> typing.Any:
"""``CfnApplicationV2.PropertyGroupProperty.PropertyMap``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-propertygroup.html#cfn-kinesisanalyticsv2-application-propertygroup-propertymap
"""
result = self._values.get("property_map")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "PropertyGroupProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.RecordColumnProperty",
jsii_struct_bases=[],
name_mapping={"name": "name", "sql_type": "sqlType", "mapping": "mapping"},
)
class RecordColumnProperty:
def __init__(
self,
*,
name: builtins.str,
sql_type: builtins.str,
mapping: typing.Optional[builtins.str] = None,
) -> None:
"""
:param name: ``CfnApplicationV2.RecordColumnProperty.Name``.
:param sql_type: ``CfnApplicationV2.RecordColumnProperty.SqlType``.
:param mapping: ``CfnApplicationV2.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html
"""
self._values: typing.Dict[str, typing.Any] = {
"name": name,
"sql_type": sql_type,
}
if mapping is not None:
self._values["mapping"] = mapping
@builtins.property
def name(self) -> builtins.str:
"""``CfnApplicationV2.RecordColumnProperty.Name``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-name
"""
result = self._values.get("name")
assert result is not None, "Required property 'name' is missing"
return result
@builtins.property
def sql_type(self) -> builtins.str:
"""``CfnApplicationV2.RecordColumnProperty.SqlType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-sqltype
"""
result = self._values.get("sql_type")
assert result is not None, "Required property 'sql_type' is missing"
return result
@builtins.property
def mapping(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.RecordColumnProperty.Mapping``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordcolumn.html#cfn-kinesisanalyticsv2-application-recordcolumn-mapping
"""
result = self._values.get("mapping")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordColumnProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.RecordFormatProperty",
jsii_struct_bases=[],
name_mapping={
"record_format_type": "recordFormatType",
"mapping_parameters": "mappingParameters",
},
)
class RecordFormatProperty:
def __init__(
self,
*,
record_format_type: builtins.str,
mapping_parameters: typing.Optional[typing.Union["CfnApplicationV2.MappingParametersProperty", _IResolvable_9ceae33e]] = None,
) -> None:
"""
:param record_format_type: ``CfnApplicationV2.RecordFormatProperty.RecordFormatType``.
:param mapping_parameters: ``CfnApplicationV2.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html
"""
self._values: typing.Dict[str, typing.Any] = {
"record_format_type": record_format_type,
}
if mapping_parameters is not None:
self._values["mapping_parameters"] = mapping_parameters
@builtins.property
def record_format_type(self) -> builtins.str:
"""``CfnApplicationV2.RecordFormatProperty.RecordFormatType``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-recordformattype
"""
result = self._values.get("record_format_type")
assert result is not None, "Required property 'record_format_type' is missing"
return result
@builtins.property
def mapping_parameters(
self,
) -> typing.Optional[typing.Union["CfnApplicationV2.MappingParametersProperty", _IResolvable_9ceae33e]]:
"""``CfnApplicationV2.RecordFormatProperty.MappingParameters``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-recordformat.html#cfn-kinesisanalyticsv2-application-recordformat-mappingparameters
"""
result = self._values.get("mapping_parameters")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "RecordFormatProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.S3ContentLocationProperty",
jsii_struct_bases=[],
name_mapping={
"bucket_arn": "bucketArn",
"file_key": "fileKey",
"object_version": "objectVersion",
},
)
class S3ContentLocationProperty:
def __init__(
self,
*,
bucket_arn: typing.Optional[builtins.str] = None,
file_key: typing.Optional[builtins.str] = None,
object_version: typing.Optional[builtins.str] = None,
) -> None:
"""
:param bucket_arn: ``CfnApplicationV2.S3ContentLocationProperty.BucketARN``.
:param file_key: ``CfnApplicationV2.S3ContentLocationProperty.FileKey``.
:param object_version: ``CfnApplicationV2.S3ContentLocationProperty.ObjectVersion``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if bucket_arn is not None:
self._values["bucket_arn"] = bucket_arn
if file_key is not None:
self._values["file_key"] = file_key
if object_version is not None:
self._values["object_version"] = object_version
@builtins.property
def bucket_arn(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.S3ContentLocationProperty.BucketARN``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-bucketarn
"""
result = self._values.get("bucket_arn")
return result
@builtins.property
def file_key(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.S3ContentLocationProperty.FileKey``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-filekey
"""
result = self._values.get("file_key")
return result
@builtins.property
def object_version(self) -> typing.Optional[builtins.str]:
"""``CfnApplicationV2.S3ContentLocationProperty.ObjectVersion``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-s3contentlocation.html#cfn-kinesisanalyticsv2-application-s3contentlocation-objectversion
"""
result = self._values.get("object_version")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "S3ContentLocationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2.SqlApplicationConfigurationProperty",
jsii_struct_bases=[],
name_mapping={"inputs": "inputs"},
)
class SqlApplicationConfigurationProperty:
def __init__(
self,
*,
inputs: typing.Optional[typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.InputProperty", _IResolvable_9ceae33e]]]] = None,
) -> None:
"""
:param inputs: ``CfnApplicationV2.SqlApplicationConfigurationProperty.Inputs``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-sqlapplicationconfiguration.html
"""
self._values: typing.Dict[str, typing.Any] = {}
if inputs is not None:
self._values["inputs"] = inputs
@builtins.property
def inputs(
self,
) -> typing.Optional[typing.Union[_IResolvable_9ceae33e, typing.List[typing.Union["CfnApplicationV2.InputProperty", _IResolvable_9ceae33e]]]]:
"""``CfnApplicationV2.SqlApplicationConfigurationProperty.Inputs``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-kinesisanalyticsv2-application-sqlapplicationconfiguration.html#cfn-kinesisanalyticsv2-application-sqlapplicationconfiguration-inputs
"""
result = self._values.get("inputs")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "SqlApplicationConfigurationProperty(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
@jsii.data_type(
jsii_type="monocdk-experiment.aws_kinesisanalytics.CfnApplicationV2Props",
jsii_struct_bases=[],
name_mapping={
"runtime_environment": "runtimeEnvironment",
"service_execution_role": "serviceExecutionRole",
"application_configuration": "applicationConfiguration",
"application_description": "applicationDescription",
"application_name": "applicationName",
"tags": "tags",
},
)
class CfnApplicationV2Props:
def __init__(
self,
*,
runtime_environment: builtins.str,
service_execution_role: builtins.str,
application_configuration: typing.Optional[typing.Union[CfnApplicationV2.ApplicationConfigurationProperty, _IResolvable_9ceae33e]] = None,
application_description: typing.Optional[builtins.str] = None,
application_name: typing.Optional[builtins.str] = None,
tags: typing.Optional[typing.List[_CfnTag_b4661f1a]] = None,
) -> None:
"""Properties for defining a ``AWS::KinesisAnalyticsV2::Application``.
:param runtime_environment: ``AWS::KinesisAnalyticsV2::Application.RuntimeEnvironment``.
:param service_execution_role: ``AWS::KinesisAnalyticsV2::Application.ServiceExecutionRole``.
:param application_configuration: ``AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration``.
:param application_description: ``AWS::KinesisAnalyticsV2::Application.ApplicationDescription``.
:param application_name: ``AWS::KinesisAnalyticsV2::Application.ApplicationName``.
:param tags: ``AWS::KinesisAnalyticsV2::Application.Tags``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html
"""
self._values: typing.Dict[str, typing.Any] = {
"runtime_environment": runtime_environment,
"service_execution_role": service_execution_role,
}
if application_configuration is not None:
self._values["application_configuration"] = application_configuration
if application_description is not None:
self._values["application_description"] = application_description
if application_name is not None:
self._values["application_name"] = application_name
if tags is not None:
self._values["tags"] = tags
@builtins.property
def runtime_environment(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::Application.RuntimeEnvironment``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-runtimeenvironment
"""
result = self._values.get("runtime_environment")
assert result is not None, "Required property 'runtime_environment' is missing"
return result
@builtins.property
def service_execution_role(self) -> builtins.str:
"""``AWS::KinesisAnalyticsV2::Application.ServiceExecutionRole``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-serviceexecutionrole
"""
result = self._values.get("service_execution_role")
assert result is not None, "Required property 'service_execution_role' is missing"
return result
@builtins.property
def application_configuration(
self,
) -> typing.Optional[typing.Union[CfnApplicationV2.ApplicationConfigurationProperty, _IResolvable_9ceae33e]]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationConfiguration``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationconfiguration
"""
result = self._values.get("application_configuration")
return result
@builtins.property
def application_description(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationDescription``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationdescription
"""
result = self._values.get("application_description")
return result
@builtins.property
def application_name(self) -> typing.Optional[builtins.str]:
"""``AWS::KinesisAnalyticsV2::Application.ApplicationName``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-applicationname
"""
result = self._values.get("application_name")
return result
@builtins.property
def tags(self) -> typing.Optional[typing.List[_CfnTag_b4661f1a]]:
"""``AWS::KinesisAnalyticsV2::Application.Tags``.
:see: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-kinesisanalyticsv2-application.html#cfn-kinesisanalyticsv2-application-tags
"""
result = self._values.get("tags")
return result
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnApplicationV2Props(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnApplication",
"CfnApplicationCloudWatchLoggingOptionV2",
"CfnApplicationCloudWatchLoggingOptionV2Props",
"CfnApplicationOutput",
"CfnApplicationOutputProps",
"CfnApplicationOutputV2",
"CfnApplicationOutputV2Props",
"CfnApplicationProps",
"CfnApplicationReferenceDataSource",
"CfnApplicationReferenceDataSourceProps",
"CfnApplicationReferenceDataSourceV2",
"CfnApplicationReferenceDataSourceV2Props",
"CfnApplicationV2",
"CfnApplicationV2Props",
]
publication.publish() | PypiClean |
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/eslint-plugin-jsx-a11y/lib/rules/no-noninteractive-element-interactions.js | "use strict";
var _interopRequireDefault = require("@babel/runtime/helpers/interopRequireDefault");
var _toConsumableArray2 = _interopRequireDefault(require("@babel/runtime/helpers/toConsumableArray"));
var _ariaQuery = require("aria-query");
var _jsxAstUtils = require("jsx-ast-utils");
var _arrayIncludes = _interopRequireDefault(require("array-includes"));
var _has = _interopRequireDefault(require("has"));
var _schemas = require("../util/schemas");
var _isAbstractRole = _interopRequireDefault(require("../util/isAbstractRole"));
var _isHiddenFromScreenReader = _interopRequireDefault(require("../util/isHiddenFromScreenReader"));
var _isInteractiveElement = _interopRequireDefault(require("../util/isInteractiveElement"));
var _isInteractiveRole = _interopRequireDefault(require("../util/isInteractiveRole"));
var _isNonInteractiveElement = _interopRequireDefault(require("../util/isNonInteractiveElement"));
var _isNonInteractiveRole = _interopRequireDefault(require("../util/isNonInteractiveRole"));
var _isPresentationRole = _interopRequireDefault(require("../util/isPresentationRole"));
/**
* @fileoverview Enforce non-interactive elements have no interactive handlers.
* @author Jese Beach
*
*/
// ----------------------------------------------------------------------------
// Rule Definition
// ----------------------------------------------------------------------------
var errorMessage = 'Non-interactive elements should not be assigned mouse or keyboard event listeners.';
var domElements = (0, _toConsumableArray2["default"])(_ariaQuery.dom.keys());
var defaultInteractiveProps = [].concat((0, _toConsumableArray2["default"])(_jsxAstUtils.eventHandlersByType.focus), (0, _toConsumableArray2["default"])(_jsxAstUtils.eventHandlersByType.image), (0, _toConsumableArray2["default"])(_jsxAstUtils.eventHandlersByType.keyboard), (0, _toConsumableArray2["default"])(_jsxAstUtils.eventHandlersByType.mouse));
var schema = (0, _schemas.generateObjSchema)({
handlers: _schemas.arraySchema
});
module.exports = {
meta: {
docs: {
url: 'https://github.com/evcohen/eslint-plugin-jsx-a11y/tree/master/docs/rules/no-noninteractive-element-interactions.md'
},
schema: [schema]
},
create: function create(context) {
var options = context.options;
return {
JSXOpeningElement: function JSXOpeningElement(node) {
var attributes = node.attributes;
var type = (0, _jsxAstUtils.elementType)(node);
var config = options[0] || {};
var interactiveProps = config.handlers || defaultInteractiveProps; // Allow overrides from rule configuration for specific elements and roles.
if ((0, _has["default"])(config, type)) {
attributes = attributes.filter(function (attr) {
return attr.type !== 'JSXSpreadAttribute' && !(0, _arrayIncludes["default"])(config[type], (0, _jsxAstUtils.propName)(attr));
});
}
var hasInteractiveProps = interactiveProps.some(function (prop) {
return (0, _jsxAstUtils.hasProp)(attributes, prop) && (0, _jsxAstUtils.getPropValue)((0, _jsxAstUtils.getProp)(attributes, prop)) != null;
});
if (!(0, _arrayIncludes["default"])(domElements, type)) {
// Do not test higher level JSX components, as we do not know what
// low-level DOM element this maps to.
return;
}
if (!hasInteractiveProps || (0, _isHiddenFromScreenReader["default"])(type, attributes) || (0, _isPresentationRole["default"])(type, attributes)) {
// Presentation is an intentional signal from the author that this
// element is not meant to be perceivable. For example, a click screen
// to close a dialog .
return;
}
if ((0, _isInteractiveElement["default"])(type, attributes) || (0, _isInteractiveRole["default"])(type, attributes) || !(0, _isNonInteractiveElement["default"])(type, attributes) && !(0, _isNonInteractiveRole["default"])(type, attributes) || (0, _isAbstractRole["default"])(type, attributes)) {
// This rule has no opinion about abtract roles.
return;
} // Visible, non-interactive elements should not have an interactive handler.
context.report({
node,
message: errorMessage
});
}
};
}
}; | PypiClean |
/nni_upload_test-0.7.1904290925-py3-none-win_amd64.whl/nni_upload_test-0.7.1904290925.data/data/nni/node_modules/rx/ts/rx.binding.es6.d.ts | declare module Rx {
export interface ConnectableObservable<T> extends Observable<T> {
connect(): IDisposable;
refCount(): Observable<T>;
}
export interface Observable<T> {
/**
* Multicasts the source sequence notifications through an instantiated subject into all uses of the sequence within a selector function. Each
* subscription to the resulting sequence causes a separate multicast invocation, exposing the sequence resulting from the selector function's
* invocation. For specializations with fixed subject types, see Publish, PublishLast, and Replay.
*
* @example
* 1 - res = source.multicast(observable);
* 2 - res = source.multicast(function () { return new Subject(); }, function (x) { return x; });
*
* @param {Function|Subject} subjectOrSubjectSelector
* Factory function to create an intermediate subject through which the source sequence's elements will be multicast to the selector function.
* Or:
* Subject to push source elements into.
*
* @param {Function} [selector] Optional selector function which can use the multicasted source sequence subject to the policies enforced by the created subject. Specified only if <paramref name="subjectOrSubjectSelector" is a factory function.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
multicast(subject: ISubject<T> | (() => ISubject<T>)): ConnectableObservable<T>;
/**
* Multicasts the source sequence notifications through an instantiated subject into all uses of the sequence within a selector function. Each
* subscription to the resulting sequence causes a separate multicast invocation, exposing the sequence resulting from the selector function's
* invocation. For specializations with fixed subject types, see Publish, PublishLast, and Replay.
*
* @example
* 1 - res = source.multicast(observable);
* 2 - res = source.multicast(function () { return new Subject(); }, function (x) { return x; });
*
* @param {Function|Subject} subjectOrSubjectSelector
* Factory function to create an intermediate subject through which the source sequence's elements will be multicast to the selector function.
* Or:
* Subject to push source elements into.
*
* @param {Function} [selector] Optional selector function which can use the multicasted source sequence subject to the policies enforced by the created subject. Specified only if <paramref name="subjectOrSubjectSelector" is a factory function.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
multicast<TResult>(subjectSelector: ISubject<T> | (() => ISubject<T>), selector: (source: ConnectableObservable<T>) => Observable<T>): Observable<T>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence.
* This operator is a specialization of Multicast using a regular Subject.
*
* @example
* var resres = source.publish();
* var res = source.publish(function (x) { return x; });
*
* @param {Function} [selector] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive all notifications of the source from the time of the subscription on.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publish(): ConnectableObservable<T>;
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence.
* This operator is a specialization of Multicast using a regular Subject.
*
* @example
* var resres = source.publish();
* var res = source.publish(function (x) { return x; });
*
* @param {Function} [selector] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive all notifications of the source from the time of the subscription on.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publish<TResult>(selector: (source: ConnectableObservable<T>) => Observable<TResult>): Observable<TResult>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that shares a single subscription to the underlying sequence.
* This operator is a specialization of publish which creates a subscription when the number of observers goes from zero to one, then shares that subscription with all subsequent observers until the number of observers returns to zero, at which point the subscription is disposed.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence.
*/
share(): Observable<T>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence containing only the last notification.
* This operator is a specialization of Multicast using a AsyncSubject.
*
* @example
* var res = source.publishLast();
* var res = source.publishLast(function (x) { return x; });
*
* @param selector [Optional] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will only receive the last notification of the source.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publishLast(): ConnectableObservable<T>;
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence containing only the last notification.
* This operator is a specialization of Multicast using a AsyncSubject.
*
* @example
* var res = source.publishLast();
* var res = source.publishLast(function (x) { return x; });
*
* @param selector [Optional] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will only receive the last notification of the source.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publishLast<TResult>(selector: (source: ConnectableObservable<T>) => Observable<TResult>): Observable<TResult>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence and starts with initialValue.
* This operator is a specialization of Multicast using a BehaviorSubject.
*
* @example
* var res = source.publishValue(42);
* var res = source.publishValue(function (x) { return x.select(function (y) { return y * y; }) }, 42);
*
* @param {Function} [selector] Optional selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive immediately receive the initial value, followed by all notifications of the source from the time of the subscription on.
* @param {Mixed} initialValue Initial value received by observers upon subscription.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publishValue(initialValue: T): ConnectableObservable<T>;
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence and starts with initialValue.
* This operator is a specialization of Multicast using a BehaviorSubject.
*
* @example
* var res = source.publishValue(42);
* var res = source.publishValue(function (x) { return x.select(function (y) { return y * y; }) }, 42);
*
* @param {Function} [selector] Optional selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive immediately receive the initial value, followed by all notifications of the source from the time of the subscription on.
* @param {Mixed} initialValue Initial value received by observers upon subscription.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
publishValue<TResult>(selector: (source: ConnectableObservable<T>) => Observable<TResult>, initialValue: T): Observable<TResult>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that shares a single subscription to the underlying sequence and starts with an initialValue.
* This operator is a specialization of publishValue which creates a subscription when the number of observers goes from zero to one, then shares that subscription with all subsequent observers until the number of observers returns to zero, at which point the subscription is disposed.
* @param {Mixed} initialValue Initial value received by observers upon subscription.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence.
*/
shareValue(initialValue: T): Observable<T>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence replaying notifications subject to a maximum time length for the replay buffer.
* This operator is a specialization of Multicast using a ReplaySubject.
*
* @example
* var res = source.replay(null, 3);
* var res = source.replay(null, 3, 500);
* var res = source.replay(null, 3, 500, scheduler);
* var res = source.replay(function (x) { return x.take(6).repeat(); }, 3, 500, scheduler);
*
* @param selector [Optional] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive all the notifications of the source subject to the specified replay buffer trimming policy.
* @param bufferSize [Optional] Maximum element count of the replay buffer.
* @param windowSize [Optional] Maximum time length of the replay buffer.
* @param scheduler [Optional] Scheduler where connected observers within the selector function will be invoked on.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
replay(selector?: void, bufferSize?: number, window?: number, scheduler?: IScheduler): ConnectableObservable<T>; // hack to catch first omitted parameter
/**
* Returns an observable sequence that is the result of invoking the selector on a connectable observable sequence that shares a single subscription to the underlying sequence replaying notifications subject to a maximum time length for the replay buffer.
* This operator is a specialization of Multicast using a ReplaySubject.
*
* @example
* var res = source.replay(null, 3);
* var res = source.replay(null, 3, 500);
* var res = source.replay(null, 3, 500, scheduler);
* var res = source.replay(function (x) { return x.take(6).repeat(); }, 3, 500, scheduler);
*
* @param selector [Optional] Selector function which can use the multicasted source sequence as many times as needed, without causing multiple subscriptions to the source sequence. Subscribers to the given source will receive all the notifications of the source subject to the specified replay buffer trimming policy.
* @param bufferSize [Optional] Maximum element count of the replay buffer.
* @param windowSize [Optional] Maximum time length of the replay buffer.
* @param scheduler [Optional] Scheduler where connected observers within the selector function will be invoked on.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence within a selector function.
*/
replay(selector: (source: ConnectableObservable<T>) => Observable<T>, bufferSize?: number, window?: number, scheduler?: IScheduler): Observable<T>;
}
export interface Observable<T> {
/**
* Returns an observable sequence that shares a single subscription to the underlying sequence replaying notifications subject to a maximum time length for the replay buffer.
* This operator is a specialization of replay which creates a subscription when the number of observers goes from zero to one, then shares that subscription with all subsequent observers until the number of observers returns to zero, at which point the subscription is disposed.
*
* @example
* var res = source.shareReplay(3);
* var res = source.shareReplay(3, 500);
* var res = source.shareReplay(3, 500, scheduler);
*
* @param bufferSize [Optional] Maximum element count of the replay buffer.
* @param window [Optional] Maximum time length of the replay buffer.
* @param scheduler [Optional] Scheduler where connected observers within the selector function will be invoked on.
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source sequence.
*/
shareReplay(bufferSize?: number, window?: number, scheduler?: IScheduler): Observable<T>;
}
export interface BehaviorSubject<T> extends Subject<T> {
/**
* Gets the current value or throws an exception.
* Value is frozen after onCompleted is called.
* After onError is called always throws the specified exception.
* An exception is always thrown after dispose is called.
* @returns {Mixed} The initial value passed to the constructor until onNext is called; after which, the last value passed to onNext.
*/
getValue(): T;
}
interface BehaviorSubjectStatic {
/**
* Initializes a new instance of the BehaviorSubject class which creates a subject that caches its last value and starts with the specified value.
* @param {Mixed} value Initial value sent to observers when no other value has been received by the subject yet.
*/
new <T>(initialValue: T): BehaviorSubject<T>;
}
/**
* Represents a value that changes over time.
* Observers can subscribe to the subject to receive the last (or initial) value and all subsequent notifications.
*/
export var BehaviorSubject: BehaviorSubjectStatic;
export interface ReplaySubject<T> extends Subject<T> { }
interface ReplaySubjectStatic {
/**
* Initializes a new instance of the ReplaySubject class with the specified buffer size, window size and scheduler.
* @param {Number} [bufferSize] Maximum element count of the replay buffer.
* @param {Number} [windowSize] Maximum time length of the replay buffer.
* @param {Scheduler} [scheduler] Scheduler the observers are invoked on.
*/
new <T>(bufferSize?: number, window?: number, scheduler?: IScheduler): ReplaySubject<T>;
}
/**
* Represents an object that is both an observable sequence as well as an observer.
* Each notification is broadcasted to all subscribed and future observers, subject to buffer trimming policies.
*/
export var ReplaySubject: ReplaySubjectStatic;
export interface Observable<T> {
/**
* Returns an observable sequence that shares a single subscription to the underlying sequence. This observable sequence
* can be resubscribed to, even if all prior subscriptions have ended. (unlike `.publish().refCount()`)
* @returns {Observable} An observable sequence that contains the elements of a sequence produced by multicasting the source.
*/
singleInstance(): Observable<T>;
}
}
declare module "rx.binding" { export = Rx; } | PypiClean |
/youtube-series-downloader-1.5.1.tar.gz/youtube-series-downloader-1.5.1/README.md | # youtube-series-downloader
[![python](https://img.shields.io/pypi/pyversions/youtube-series-downloader.svg)](https://pypi.python.org/pypi/youtube-series-downloader)
[![Latest PyPI version](https://img.shields.io/pypi/v/youtube-series-downloader.svg)](https://pypi.python.org/pypi/youtube-series-downloader)
[![Downloads](https://pepy.tech/badge/youtube-series-downloader)](https://pepy.tech/project/youtube-series-downloader?right_color=orange)
[![Total alerts](https://img.shields.io/lgtm/alerts/g/Senth/youtube-series-downloader.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Senth/youtube-series-downloader/alerts/)
[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/Senth/youtube-series-downloader.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/Senth/youtube-series-downloader/context:python)
Downloads new YouTube episodes from configurable channels and adds them in a [plex](https://plex.tv/) friendly format.
## Features
- Download latest episodes from configurable channels. Configurable how many days back to look for an episode.
- Speed up videos. Configurable both globally and separate for each channel.
- Run as a daemon.
- Plex friendly output.
- Include/exclude regex filters to only download episodes matching the title.
## Usage
```usage
usage: youtube-series-downloader [-h] [-v] [-p] [-t THREADS] [-d] [--max-days-back MAX_DAYS_BACK] [--debug]
optional arguments:
-d, --daemon Run the script as a daemon instead of once.
-p, --pretend Only pretend to download, convert, and store files.
-t THREADS, --threads THREADS
How many threads you want to use (overrides config file).
--max-days-back MAX_DAYS_BACK
How many days back we should check for videos (overrides config file).
-h, --help show this help message and exit.
-v, --verbose Prints out helpful messages.
-s, --silent Turn off all messages except errors.
--debug Turn on debug messages. This automatically turns on --verbose as well.
```
## Installation
Run the commands below and follow the instructions.
```properties
pip install --user --upgrade youtube-series-downloader
youtube-series-downloader
```
### Requirements
- ffmpeg to be installed and available through the PATH environmental variable.
## Authors
`youtube-series-downloader` was written by `Matteus Magnusson <[email protected]>`.
| PypiClean |
/python-kubernetes-wrapper-0.1.tar.gz/python-kubernetes-wrapper-0.1/kubernetes/_file_cache.py |
from hashlib import md5
import os
import tempfile
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self,root_directory=None):
self._InitializeRootDirectory(root_directory)
def Get(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self, key, data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self, key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory ))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self, key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self, hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH]) | PypiClean |
/beekeeper_chatbot_sdk-0.3.4.tar.gz/beekeeper_chatbot_sdk-0.3.4/beekeeper_chatbot_sdk/handlers.py | import re
from abc import ABC
from abc import abstractmethod
from beekeeper_sdk.conversations import MESSAGE_TYPE_REGULAR
from beekeeper_sdk.conversations import ConversationMessage
from beekeeper_sdk.status import Status
class AbstractHandler(ABC):
@abstractmethod
def matches(self, message) -> bool:
"""Should return true if this handler feels responsible for handling `message`, false otherwise
:param message: beekeeper_sdk.conversations.ConversationMessage object
:return Whether or not this handler wants to handle `message`"""
pass
@abstractmethod
def handle(self, bot, message):
"""Handles a message received by `bot`
:param bot: BeekeeperChatBot
:param message: beekeeper_sdk.conversations.ConversationMessage object"""
pass
class CommandHandler(AbstractHandler):
"""A handler that responds to slash commands of the form `/command`"""
def __init__(self, command, callback_function, message_types=None):
"""
:param command: The command this handler should respond to (not including the preceding slash)
:param callback_function: The function to call when a matching message is received.
The callback function is passed the BeekeeperChatBot and beekeeper_sdk.conversations.ConversationMessage as arguments
:param message_types: List of message types this handler should consider.
"""
self.message_types = message_types or [MESSAGE_TYPE_REGULAR]
self.command = command
self.callback_function = callback_function
def matches(self, message) -> bool:
if isinstance(message, ConversationMessage):
if message.get_type() in self.message_types:
if message.get_text():
if message.get_text().startswith("/{}".format(self.command)):
return True
return False
def handle(self, bot, message):
self.callback_function(bot, message)
class RegexHandler(AbstractHandler):
"""A handler that responds to messages matching a RegExp"""
def __init__(self, regex, callback_function, message_types=None):
"""
:param regex: A regular expression that matches the message texts this handler should respond to.
:param callback_function: The function to call when a matching message is received.
The callback function is passed the BeekeeperChatBot and beekeeper_sdk.conversations.ConversationMessage as arguments
:param message_types: List of message types this handler should consider.
"""
self.message_types = message_types or [MESSAGE_TYPE_REGULAR]
self.regex = re.compile(regex)
self.callback_function = callback_function
def matches(self, message) -> bool:
if isinstance(message, ConversationMessage):
if message.get_type() in self.message_types:
if message.get_text():
if self.regex.search(message.get_text()):
return True
return False
def handle(self, bot, message):
self.callback_function(bot, message)
class MessageHandler(AbstractHandler):
"""A handler that responds to all messages"""
def __init__(self, callback_function, message_types=None):
"""
:param callback_function: The function to call when a matching message is received.
The callback function is passed the BeekeeperChatBot and beekeeper_sdk.conversations.ConversationMessage as arguments
:param message_types: List of message types this handler should consider.
"""
self.message_types = message_types or [MESSAGE_TYPE_REGULAR]
self.callback_function = callback_function
def matches(self, message) -> bool:
if isinstance(message, ConversationMessage):
if message.get_type() in self.message_types:
return True
return False
def handle(self, bot, message):
self.callback_function(bot, message)
class StatusUpdateHandler(AbstractHandler):
"""A handler that responds to status changes"""
def __init__(self, callback_function):
"""
:param callback_function: The function to call when a status update is received.
The callback function is passed the BeekeeperChatBot and beekeeper_sdk.status.Status as arguments
"""
self.callback_function = callback_function
def matches(self, status) -> bool:
if isinstance(status, Status):
return True
return False
def handle(self, bot, message):
self.callback_function(bot, message) | PypiClean |
/aos-cube-0.5.11.tar.gz/aos-cube-0.5.11/aos/constant.py | import os
# Default paths to Mercurial and Git
hg_cmd = 'hg'
git_cmd = 'git'
ignores = [
"make",
"make.exe",
"Makefile",
"build",
".cproject",
".gdbinit",
".openocd_cfg",
".project",
"aos",
".aos",
]
toolchains = {
'arm-none-eabi':{
'name': 'gcc-arm-none-eabi',
'path': 'build/compiler/gcc-arm-none-eabi',
'command': 'arm-none-eabi-gcc',
'version': 'all',
'use_global': True,
'Win32_url':'https://gitee.com/alios-things/gcc-arm-none-eabi-win32.git',
'Linux32_url': 'https://gitee.com/alios-things/gcc-arm-none-eabi-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/gcc-arm-none-eabi-linux.git',
'OSX_url': 'https://gitee.com/alios-things/gcc-arm-none-eabi-osx.git',
},
'xtensa-esp32':{
'name': 'gcc-xtensa-esp32',
'path': 'build/compiler/gcc-xtensa-esp32',
'command': 'xtensa-esp32-elf-gcc',
'version': '5.2.0',
'use_global': True,
'Win32_url': 'https://gitee.com/alios-things/gcc-xtensa-esp32-win32.git',
'Linux32_url': 'https://gitee.com/alios-things/gcc-xtensa-esp32-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/gcc-xtensa-esp32-linux.git',
'OSX_url': 'https://gitee.com/alios-things/gcc-xtensa-esp32-osx.git',
},
'xtensa-lx106':{
'name': 'gcc-xtensa-lx106',
'path': 'build/compiler/gcc-xtensa-lx106',
'command': 'xtensa-lx106-elf-gcc',
'version': '4.8.2',
'use_global': True,
'Win32_url': 'https://gitee.com/alios-things/gcc-xtensa-lx106-win32.git',
'Linux32_url': 'https://gitee.com/alios-things/gcc-xtensa-lx106-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/gcc-xtensa-lx106-linux.git',
'OSX_url': 'https://gitee.com/alios-things/gcc-xtensa-lx106-osx.git',
},
'csky-abiv2': {
'name': 'gcc-csky-abiv2',
'path': 'build/compiler/gcc-csky-abiv2',
'command': 'csky-abiv2-elf-gcc',
'version': 'all',
'use_global': True,
'Win32_url': 'https://gitee.com/alios-things/gcc-csky-abiv2-win32.git',
'Linux32_url': 'https://gitee.com/alios-things/gcc-csky-abiv2-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/gcc-csky-abiv2-linux.git',
'OSX_url': '',
},
'arm-rockchip-linux-gnueabihf': {
'name': 'gcc-arm-rockchip-linux-gnueabihf',
'path': 'build/compiler/usr',
'path_specific': True,
'command': 'arm-rockchip-linux-gnueabihf-gcc',
'version': 'all',
'use_global': True,
'Win32_url': '',
'Linux32_url': 'https://gitee.com/alios-things/arm-rockchip-linux-gnueabihf-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/arm-rockchip-linux-gnueabihf-linux.git',
'OSX_url': '',
},
'nds32le-elf-newlib-v3': {
'name': 'nds32le-elf-newlib-v3',
'path': 'build/compiler/nds32le-elf-newlib-v3',
'path_specific': True,
'command': 'nds32le-elf-gcc',
'version': 'all',
'use_global': True,
'Win32_url': '',
'Linux32_url': 'https://gitee.com/alios-things/gcc-nds32le-linux.git',
'Linux64_url': 'https://gitee.com/alios-things/gcc-nds32le-linux.git',
'OSX_url': '',
},
'openocd': {
'name': 'OpenOCD',
'path': 'build/OpenOCD',
'command': 'openocd',
'version': '0.10.0',
'use_global': False,
'Win32_url': 'https://gitee.com/alios-things/openocd-win32.git',
'Linux32_url': '',
'Linux64_url': 'https://gitee.com/alios-things/openocd-linux64.git',
'OSX_url': 'https://gitee.com/alios-things/openocd-osx.git',
}
}
boards = {
'amebaz_dev':[toolchains['arm-none-eabi']],
'atsame54-xpro':[toolchains['arm-none-eabi']],
'b_l475e':[toolchains['arm-none-eabi']],
'bk7231devkitc':[toolchains['arm-none-eabi']],
'bk7231udevkitc':[toolchains['arm-none-eabi']],
'bk3435devkit':[toolchains['arm-none-eabi']],
'developerkit':[toolchains['arm-none-eabi']],
'eml3047':[toolchains['arm-none-eabi']],
'esp32devkitc':[toolchains['xtensa-esp32']],
'esp8266':[toolchains['xtensa-lx106']],
'frdmkl27z':[toolchains['arm-none-eabi']],
'hobbit1_evb':[toolchains['csky-abiv2']],
'dh5021a_evb':[toolchains['csky-abiv2']],
'cb2201':[toolchains['csky-abiv2']],
'lpcxpresso54102':[toolchains['arm-none-eabi']],
'mk1101':[toolchains['arm-none-eabi']],
'mk3060':[toolchains['arm-none-eabi']],
'mk3080':[toolchains['arm-none-eabi']],
'mk3165':[toolchains['arm-none-eabi']],
'mk3166':[toolchains['arm-none-eabi']],
'mk3239':[toolchains['arm-none-eabi']],
'pca10056':[toolchains['arm-none-eabi']],
'pca10040':[toolchains['arm-none-eabi']],
'starterkit':[toolchains['arm-none-eabi']],
'stm32f769i-discovery':[toolchains['arm-none-eabi']],
'stm32f412zg-nucleo':[toolchains['arm-none-eabi']],
'stm32l073rz-nucleo':[toolchains['arm-none-eabi']],
'stm32l432kc-nucleo':[toolchains['arm-none-eabi']],
'stm32l433rc-nucleo':[toolchains['arm-none-eabi']],
'stm32l476rg-nucleo':[toolchains['arm-none-eabi']],
'stm32l496g-discovery':[toolchains['arm-none-eabi']],
'sv6266_evb':[toolchains['nds32le-elf-newlib-v3']],
'msp432p4111launchpad':[toolchains['arm-none-eabi']],
'xr871evb':[toolchains['arm-none-eabi']],
'rk1108':[toolchains['arm-rockchip-linux-gnueabihf']],
'uno-91h':[toolchains['arm-none-eabi']],
'ch6121evb':[toolchains['arm-none-eabi']],
'tg7100b':[toolchains['arm-none-eabi']],
}
# verbose logging
verbose = False
very_verbose = False
install_requirements = True
cache_repositories = True
# stores current working directory for recursive operations
cwd_root = ""
APP_PATH = 'app_path'
PROGRAM_PATH = 'program_path'
AOS_SDK_PATH = 'AOS_SDK_PATH'
OS_PATH = 'os_path'
OS_NAME = 'AliOS-Things'
PATH_TYPE = 'path_type'
AOS_COMPONENT_BASE_URL = 'https://github.com/AliOS-Things'
CUBE_MAKEFILE = 'cube.mk'
CUBE_MODIFY = 'cube_modify'
REMOTE_PATH = 'remote'
OS_CONFIG = "project.ini"
COMP_INFO_DB_FILE = "component_info_publish.db"
OS_REPO = "http://116.62.245.240/AliOSThings-2-packages/"
#OS_REPO = "http://11.238.148.13:81/2_test/"
OS_CACHE = os.path.join(os.path.expanduser("~"), ".aoscache")
OS_DEF_COMPS = [ "buildsystem", "system_include"]
# aos ota config
OTA_SERVER = "116.62.245.240"
OTA_EMQ_PORT = 17173
OTA_EMQ_TOKEN = "QWxpT1MtVGhpbmdzLXVkZXZ8dWRldiFAIyQl"
OTA_WEBSERVER_PORT = 7001
OTA_UDEBUG_LIB = 'udev.a'
# Path to scripts in OS
CHECK_WRAPPER = os.path.sep.join(["build", "check", "check_wrapper.py"])
GEN_SAL_STAGING = os.path.sep.join(["build", "scripts", "gen_sal_staging.py"])
GEN_MAL_STAGING = os.path.sep.join(["build", "scripts", "gen_mal_staging.py"])
GEN_NEWPROJECT = os.path.sep.join(["build", "scripts", "gen_newproject.py"])
GEN_APPSOURCE = os.path.sep.join(["build", "scripts", "gen_appsource.py"])
GEN_NEW_COMPONENT = os.path.sep.join(["build", "scripts", "gen_new_component.py"])
# App config
APP_CONFIG = ".aos"
APP_UPDATE_MKFILE = os.path.sep.join(["build", "scripts", "app_update_aosmk.py"])
APP_GEN_INCLUDES = os.path.sep.join(["build", "scripts", "app_gen_comp_index.py"])
APP_INCLUDES = "aos_comp_index.json"
# File to store user's choice of whether ot nor to participate in the tool improve plan.
AOS_INVESTIGATION_FILE = os.path.join(os.path.expanduser("~"), ".aos", ".ucubeplan")
# AOS query/report server
AOS_SERVER_URL = "https://os-activation.iot.aliyun.com/cube"
AOS_HTTP_HEADER = "Content-Type:application/json"
AOS_HTTP_METHOD = "POST"
# print debug message or not, bool value
DEBUG_PRINT = False
# No SDK/SRC messages, widely used
_HINT_COMMON = "No AliOS Things source directory found. To make things work, please:\n\n"
_SET_SDK_HINT = "-> Set AOS_SDK_PATH environment variable to a valid\n"\
" AliOS-Things source directory as below:\n\n"\
" * Linux/MacOS/Git-Bash:\n"\
" $ export AOS_SDK_PATH=<path_to_AliOS_Things_src>\n"\
" * Windows CMD:\n"\
" > set AOS_SDK_PATH=<path_to_AliOS_Things_src>\n\n"\
" Please set it on system level if you want so.\n"
_RUN_INSIDE_SDK_HINT = "-> Run this command in AliOS Things source directory.\n"
NO_SDK_HINT = _HINT_COMMON + _SET_SDK_HINT
NOT_INSIDE_SDK_HINT = _HINT_COMMON + _RUN_INSIDE_SDK_HINT
NO_AOSSRC_HINT = _HINT_COMMON + _RUN_INSIDE_SDK_HINT + "\nOr,\n\n" + _SET_SDK_HINT | PypiClean |
/jupyter_contrib_nbextensions-0.7.0.tar.gz/jupyter_contrib_nbextensions-0.7.0/src/jupyter_contrib_nbextensions/nbextensions/spellchecker/typo/typo.js | define(['require'], function (requirejs) {
'use strict';
/**
* Typo is a JavaScript implementation of a spellchecker using hunspell-style
* dictionaries.
*/
/**
* Typo constructor.
*
* @param {String} [dictionary] The locale code of the dictionary being used. e.g.,
* "en_US". This is only used to auto-load dictionaries.
* @param {String} [affData] The data from the dictionary's .aff file. If omitted
* and Typo.js is being used in a Chrome extension, the .aff
* file will be loaded automatically from
* lib/typo/dictionaries/[dictionary]/[dictionary].aff
* In other environments, it will be loaded from
* [settings.dictionaryPath]/dictionaries/[dictionary]/[dictionary].aff
* @param {String} [wordsData] The data from the dictionary's .dic file. If omitted
* and Typo.js is being used in a Chrome extension, the .dic
* file will be loaded automatically from
* lib/typo/dictionaries/[dictionary]/[dictionary].dic
* In other environments, it will be loaded from
* [settings.dictionaryPath]/dictionaries/[dictionary]/[dictionary].dic
* @param {Object} [settings] Constructor settings. Available properties are:
* {String} [dictionaryPath]: path to load dictionary from in non-chrome
* environment.
* {Object} [flags]: flag information.
*
*
* @returns {Typo} A Typo object.
*/
var Typo = function (dictionary, affData, wordsData, settings) {
settings = settings || {};
this.dictionary = null;
this.rules = {};
this.dictionaryTable = {};
this.compoundRules = [];
this.compoundRuleCodes = {};
this.replacementTable = [];
this.flags = settings.flags || {};
if (dictionary) {
this.dictionary = dictionary;
if (typeof window !== 'undefined' && 'chrome' in window && 'extension' in window.chrome && 'getURL' in window.chrome.extension) {
if (!affData) affData = this._readFile(chrome.extension.getURL("lib/typo/dictionaries/" + dictionary + "/" + dictionary + ".aff"));
if (!wordsData) wordsData = this._readFile(chrome.extension.getURL("lib/typo/dictionaries/" + dictionary + "/" + dictionary + ".dic"));
} else {
if (settings.dictionaryPath) {
var path = settings.dictionaryPath;
}
else if (typeof __dirname !== 'undefined') {
var path = __dirname + '/dictionaries';
}
else {
var path = './dictionaries';
}
if (!affData) affData = this._readFile(path + "/" + dictionary + "/" + dictionary + ".aff");
if (!wordsData) wordsData = this._readFile(path + "/" + dictionary + "/" + dictionary + ".dic");
}
this.rules = this._parseAFF(affData);
// Save the rule codes that are used in compound rules.
this.compoundRuleCodes = {};
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
var rule = this.compoundRules[i];
for (var j = 0, _jlen = rule.length; j < _jlen; j++) {
this.compoundRuleCodes[rule[j]] = [];
}
}
// If we add this ONLYINCOMPOUND flag to this.compoundRuleCodes, then _parseDIC
// will do the work of saving the list of words that are compound-only.
if ("ONLYINCOMPOUND" in this.flags) {
this.compoundRuleCodes[this.flags.ONLYINCOMPOUND] = [];
}
this.dictionaryTable = this._parseDIC(wordsData);
// Get rid of any codes from the compound rule codes that are never used
// (or that were special regex characters). Not especially necessary...
for (var i in this.compoundRuleCodes) {
if (this.compoundRuleCodes[i].length == 0) {
delete this.compoundRuleCodes[i];
}
}
// Build the full regular expressions for each compound rule.
// I have a feeling (but no confirmation yet) that this method of
// testing for compound words is probably slow.
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
var ruleText = this.compoundRules[i];
var expressionText = "";
for (var j = 0, _jlen = ruleText.length; j < _jlen; j++) {
var character = ruleText[j];
if (character in this.compoundRuleCodes) {
expressionText += "(" + this.compoundRuleCodes[character].join("|") + ")";
}
else {
expressionText += character;
}
}
this.compoundRules[i] = new RegExp(expressionText, "i");
}
}
return this;
};
Typo.prototype = {
/**
* Loads a Typo instance from a hash of all of the Typo properties.
*
* @param object obj A hash of Typo properties, probably gotten from a JSON.parse(JSON.stringify(typo_instance)).
*/
load : function (obj) {
for (var i in obj) {
this[i] = obj[i];
}
return this;
},
/**
* Read the contents of a file.
*
* @param {String} path The path (relative) to the file.
* @param {String} [charset="ISO8859-1"] The expected charset of the file
* @returns string The file data.
*/
_readFile : function (path, charset) {
if (!charset) charset = "utf8";
path = requirejs.toUrl(path);
if (typeof XMLHttpRequest !== 'undefined') {
var req = new XMLHttpRequest();
req.open("GET", path, false);
if (req.overrideMimeType)
req.overrideMimeType("text/plain; charset=" + charset);
req.send(null);
return req.responseText;
}
else if (typeof requirejs !== 'undefined') {
// Node.js
var fs = requirejs("fs");
try {
if (fs.existsSync(path)) {
var stats = fs.statSync(path);
var fileDescriptor = fs.openSync(path, 'r');
var buffer = new Buffer(stats.size);
fs.readSync(fileDescriptor, buffer, 0, buffer.length, null);
return buffer.toString(charset, 0, buffer.length);
}
else {
console.log("Path " + path + " does not exist.");
}
} catch (e) {
console.log(e);
return '';
}
}
},
/**
* Parse the rules out from a .aff file.
*
* @param {String} data The contents of the affix file.
* @returns object The rules from the file.
*/
_parseAFF : function (data) {
var rules = {};
// Remove comment lines
data = this._removeAffixComments(data);
var lines = data.split("\n");
for (var i = 0, _len = lines.length; i < _len; i++) {
var line = lines[i];
var definitionParts = line.split(/\s+/);
var ruleType = definitionParts[0];
if (ruleType == "PFX" || ruleType == "SFX") {
var ruleCode = definitionParts[1];
var combineable = definitionParts[2];
var numEntries = parseInt(definitionParts[3], 10);
var entries = [];
for (var j = i + 1, _jlen = i + 1 + numEntries; j < _jlen; j++) {
var line = lines[j];
var lineParts = line.split(/\s+/);
var charactersToRemove = lineParts[2];
var additionParts = lineParts[3].split("/");
var charactersToAdd = additionParts[0];
if (charactersToAdd === "0") charactersToAdd = "";
var continuationClasses = this.parseRuleCodes(additionParts[1]);
var regexToMatch = lineParts[4];
var entry = {};
entry.add = charactersToAdd;
if (continuationClasses.length > 0) entry.continuationClasses = continuationClasses;
if (regexToMatch !== ".") {
if (ruleType === "SFX") {
entry.match = new RegExp(regexToMatch + "$");
}
else {
entry.match = new RegExp("^" + regexToMatch);
}
}
if (charactersToRemove != "0") {
if (ruleType === "SFX") {
entry.remove = new RegExp(charactersToRemove + "$");
}
else {
entry.remove = charactersToRemove;
}
}
entries.push(entry);
}
rules[ruleCode] = { "type" : ruleType, "combineable" : (combineable == "Y"), "entries" : entries };
i += numEntries;
}
else if (ruleType === "COMPOUNDRULE") {
var numEntries = parseInt(definitionParts[1], 10);
for (var j = i + 1, _jlen = i + 1 + numEntries; j < _jlen; j++) {
var line = lines[j];
var lineParts = line.split(/\s+/);
this.compoundRules.push(lineParts[1]);
}
i += numEntries;
}
else if (ruleType === "REP") {
var lineParts = line.split(/\s+/);
if (lineParts.length === 3) {
this.replacementTable.push([ lineParts[1], lineParts[2] ]);
}
}
else {
// ONLYINCOMPOUND
// COMPOUNDMIN
// FLAG
// KEEPCASE
// NEEDAFFIX
this.flags[ruleType] = definitionParts[1];
}
}
return rules;
},
/**
* Removes comment lines and then cleans up blank lines and trailing whitespace.
*
* @param {String} data The data from an affix file.
* @return {String} The cleaned-up data.
*/
_removeAffixComments : function (data) {
// Remove comments
data = data.replace(/#.*$/mg, "");
// Trim each line
data = data.replace(/^\s\s*/m, '').replace(/\s\s*$/m, '');
// Remove blank lines.
data = data.replace(/\n{2,}/g, "\n");
// Trim the entire string
data = data.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
return data;
},
/**
* Parses the words out from the .dic file.
*
* @param {String} data The data from the dictionary file.
* @returns object The lookup table containing all of the words and
* word forms from the dictionary.
*/
_parseDIC : function (data) {
data = this._removeDicComments(data);
var lines = data.split("\n");
var dictionaryTable = {};
function addWord(word, rules) {
// Some dictionaries will list the same word multiple times with different rule sets.
if (!(word in dictionaryTable) || typeof dictionaryTable[word] != 'object') {
dictionaryTable[word] = [];
}
dictionaryTable[word].push(rules);
}
// The first line is the number of words in the dictionary.
for (var i = 1, _len = lines.length; i < _len; i++) {
var line = lines[i];
var parts = line.split("/", 2);
var word = parts[0];
// Now for each affix rule, generate that form of the word.
if (parts.length > 1) {
var ruleCodesArray = this.parseRuleCodes(parts[1]);
// Save the ruleCodes for compound word situations.
if (!("NEEDAFFIX" in this.flags) || ruleCodesArray.indexOf(this.flags.NEEDAFFIX) == -1) {
addWord(word, ruleCodesArray);
}
for (var j = 0, _jlen = ruleCodesArray.length; j < _jlen; j++) {
var code = ruleCodesArray[j];
var rule = this.rules[code];
if (rule) {
var newWords = this._applyRule(word, rule);
for (var ii = 0, _iilen = newWords.length; ii < _iilen; ii++) {
var newWord = newWords[ii];
addWord(newWord, []);
if (rule.combineable) {
for (var k = j + 1; k < _jlen; k++) {
var combineCode = ruleCodesArray[k];
var combineRule = this.rules[combineCode];
if (combineRule) {
if (combineRule.combineable && (rule.type != combineRule.type)) {
var otherNewWords = this._applyRule(newWord, combineRule);
for (var iii = 0, _iiilen = otherNewWords.length; iii < _iiilen; iii++) {
var otherNewWord = otherNewWords[iii];
addWord(otherNewWord, []);
}
}
}
}
}
}
}
if (code in this.compoundRuleCodes) {
this.compoundRuleCodes[code].push(word);
}
}
}
else {
addWord(word.trim(), []);
}
}
return dictionaryTable;
},
/**
* Removes comment lines and then cleans up blank lines and trailing whitespace.
*
* @param {String} data The data from a .dic file.
* @return {String} The cleaned-up data.
*/
_removeDicComments : function (data) {
// I can't find any official documentation on it, but at least the de_DE
// dictionary uses tab-indented lines as comments.
// Remove comments
data = data.replace(/^\t.*$/mg, "");
return data;
},
parseRuleCodes : function (textCodes) {
if (!textCodes) {
return [];
}
else if (!("FLAG" in this.flags)) {
return textCodes.split("");
}
else if (this.flags.FLAG === "long") {
var flags = [];
for (var i = 0, _len = textCodes.length; i < _len; i += 2) {
flags.push(textCodes.substr(i, 2));
}
return flags;
}
else if (this.flags.FLAG === "num") {
return textCode.split(",");
}
},
/**
* Applies an affix rule to a word.
*
* @param {String} word The base word.
* @param {Object} rule The affix rule.
* @returns {String[]} The new words generated by the rule.
*/
_applyRule : function (word, rule) {
var entries = rule.entries;
var newWords = [];
for (var i = 0, _len = entries.length; i < _len; i++) {
var entry = entries[i];
if (!entry.match || word.match(entry.match)) {
var newWord = word;
if (entry.remove) {
newWord = newWord.replace(entry.remove, "");
}
if (rule.type === "SFX") {
newWord = newWord + entry.add;
}
else {
newWord = entry.add + newWord;
}
newWords.push(newWord);
if ("continuationClasses" in entry) {
for (var j = 0, _jlen = entry.continuationClasses.length; j < _jlen; j++) {
var continuationRule = this.rules[entry.continuationClasses[j]];
if (continuationRule) {
newWords = newWords.concat(this._applyRule(newWord, continuationRule));
}
/*
else {
// This shouldn't happen, but it does, at least in the de_DE dictionary.
// I think the author mistakenly supplied lower-case rule codes instead
// of upper-case.
}
*/
}
}
}
}
return newWords;
},
/**
* Checks whether a word or a capitalization variant exists in the current dictionary.
* The word is trimmed and several variations of capitalizations are checked.
* If you want to check a word without any changes made to it, call checkExact()
*
* @see http://blog.stevenlevithan.com/archives/faster-trim-javascript re:trimming function
*
* @param {String} aWord The word to check.
* @returns {Boolean}
*/
check : function (aWord) {
// Remove leading and trailing whitespace
var trimmedWord = aWord.replace(/^\s\s*/, '').replace(/\s\s*$/, '');
if (this.checkExact(trimmedWord)) {
return true;
}
// The exact word is not in the dictionary.
if (trimmedWord.toUpperCase() === trimmedWord) {
// The word was supplied in all uppercase.
// Check for a capitalized form of the word.
var capitalizedWord = trimmedWord[0] + trimmedWord.substring(1).toLowerCase();
if (this.hasFlag(capitalizedWord, "KEEPCASE")) {
// Capitalization variants are not allowed for this word.
return false;
}
if (this.checkExact(capitalizedWord)) {
return true;
}
}
var lowercaseWord = trimmedWord.toLowerCase();
if (lowercaseWord !== trimmedWord) {
if (this.hasFlag(lowercaseWord, "KEEPCASE")) {
// Capitalization variants are not allowed for this word.
return false;
}
// Check for a lowercase form
if (this.checkExact(lowercaseWord)) {
return true;
}
}
return false;
},
/**
* Checks whether a word exists in the current dictionary.
*
* @param {String} word The word to check.
* @returns {Boolean}
*/
checkExact : function (word) {
var ruleCodes = this.dictionaryTable[word];
if (typeof ruleCodes === 'undefined') {
// Check if this might be a compound word.
if ("COMPOUNDMIN" in this.flags && word.length >= this.flags.COMPOUNDMIN) {
for (var i = 0, _len = this.compoundRules.length; i < _len; i++) {
if (word.match(this.compoundRules[i])) {
return true;
}
}
}
return false;
}
else if (typeof ruleCodes === 'object') { // this.dictionary['hasOwnProperty'] will be a function.
for (var i = 0, _len = ruleCodes.length; i < _len; i++) {
if (!this.hasFlag(word, "ONLYINCOMPOUND", ruleCodes[i])) {
return true;
}
}
return false;
}
},
/**
* Looks up whether a given word is flagged with a given flag.
*
* @param {String} word The word in question.
* @param {String} flag The flag in question.
* @return {Boolean}
*/
hasFlag : function (word, flag, wordFlags) {
if (flag in this.flags) {
if (typeof wordFlags === 'undefined') {
var wordFlags = Array.prototype.concat.apply([], this.dictionaryTable[word]);
}
if (wordFlags && wordFlags.indexOf(this.flags[flag]) !== -1) {
return true;
}
}
return false;
},
/**
* Returns a list of suggestions for a misspelled word.
*
* @see http://www.norvig.com/spell-correct.html for the basis of this suggestor.
* This suggestor is primitive, but it works.
*
* @param {String} word The misspelling.
* @param {Number} [limit=5] The maximum number of suggestions to return.
* @returns {String[]} The array of suggestions.
*/
alphabet : "",
suggest : function (word, limit) {
if (!limit) limit = 5;
if (this.check(word)) return [];
// Check the replacement table.
for (var i = 0, _len = this.replacementTable.length; i < _len; i++) {
var replacementEntry = this.replacementTable[i];
if (word.indexOf(replacementEntry[0]) !== -1) {
var correctedWord = word.replace(replacementEntry[0], replacementEntry[1]);
if (this.check(correctedWord)) {
return [ correctedWord ];
}
}
}
var self = this;
self.alphabet = "abcdefghijklmnopqrstuvwxyz";
/*
if (!self.alphabet) {
// Use the alphabet as implicitly defined by the words in the dictionary.
var alphaHash = {};
for (var i in self.dictionaryTable) {
for (var j = 0, _len = i.length; j < _len; j++) {
alphaHash[i[j]] = true;
}
}
for (var i in alphaHash) {
self.alphabet += i;
}
var alphaArray = self.alphabet.split("");
alphaArray.sort();
self.alphabet = alphaArray.join("");
}
*/
function edits1(words) {
var rv = [];
for (var ii = 0, _iilen = words.length; ii < _iilen; ii++) {
var word = words[ii];
var splits = [];
for (var i = 0, _len = word.length + 1; i < _len; i++) {
splits.push([ word.substring(0, i), word.substring(i, word.length) ]);
}
var deletes = [];
for (var i = 0, _len = splits.length; i < _len; i++) {
var s = splits[i];
if (s[1]) {
deletes.push(s[0] + s[1].substring(1));
}
}
var transposes = [];
for (var i = 0, _len = splits.length; i < _len; i++) {
var s = splits[i];
if (s[1].length > 1) {
transposes.push(s[0] + s[1][1] + s[1][0] + s[1].substring(2));
}
}
var replaces = [];
for (var i = 0, _len = splits.length; i < _len; i++) {
var s = splits[i];
if (s[1]) {
for (var j = 0, _jlen = self.alphabet.length; j < _jlen; j++) {
replaces.push(s[0] + self.alphabet[j] + s[1].substring(1));
}
}
}
var inserts = [];
for (var i = 0, _len = splits.length; i < _len; i++) {
var s = splits[i];
if (s[1]) {
for (var j = 0, _jlen = self.alphabet.length; j < _jlen; j++) {
replaces.push(s[0] + self.alphabet[j] + s[1]);
}
}
}
rv = rv.concat(deletes);
rv = rv.concat(transposes);
rv = rv.concat(replaces);
rv = rv.concat(inserts);
}
return rv;
}
function known(words) {
var rv = [];
for (var i = 0; i < words.length; i++) {
if (self.check(words[i])) {
rv.push(words[i]);
}
}
return rv;
}
function correct(word) {
// Get the edit-distance-1 and edit-distance-2 forms of this word.
var ed1 = edits1([word]);
var ed2 = edits1(ed1);
var corrections = known(ed1).concat(known(ed2));
// Sort the edits based on how many different ways they were created.
var weighted_corrections = {};
for (var i = 0, _len = corrections.length; i < _len; i++) {
if (!(corrections[i] in weighted_corrections)) {
weighted_corrections[corrections[i]] = 1;
}
else {
weighted_corrections[corrections[i]] += 1;
}
}
var sorted_corrections = [];
for (var i in weighted_corrections) {
sorted_corrections.push([ i, weighted_corrections[i] ]);
}
function sorter(a, b) {
if (a[1] < b[1]) {
return -1;
}
return 1;
}
sorted_corrections.sort(sorter).reverse();
var rv = [];
for (var i = 0, _len = Math.min(limit, sorted_corrections.length); i < _len; i++) {
if (!self.hasFlag(sorted_corrections[i][0], "NOSUGGEST")) {
rv.push(sorted_corrections[i][0]);
}
}
return rv;
}
return correct(word);
}
};
// Support for use as a node.js module.
if (typeof module !== 'undefined') {
module.exports = Typo;
}
return Typo;
}); | PypiClean |
/aRez-0.2.4.tar.gz/aRez-0.2.4/arez/mixins.py | from __future__ import annotations
from math import nan, floor
from functools import wraps
from datetime import datetime
from abc import abstractmethod
from typing import (
Optional, Union, List, Tuple, Generator, Awaitable, TypeVar, Literal, cast, TYPE_CHECKING
)
from . import responses
from .enums import Queue, Region
if TYPE_CHECKING:
from .items import Device
from .champion import Champion, Skin
from .cache import DataCache, CacheEntry
from .player import PartialPlayer, Player
__all__ = [
"CacheClient",
"CacheObject",
"Expandable",
"WinLoseMixin",
"KDAMixin",
"MatchMixin",
"MatchPlayerMixin",
]
_A = TypeVar("_A")
class CacheClient:
"""
Abstract base class that has to be met by most (if not all) objects that interact with the API.
Provides access to the core of this wrapper, that is the `.request` method
and the cache system.
"""
def __init__(self, api: DataCache):
self._api = api
class CacheObject:
"""
Base class representing objects that can be returned from the data cache.
You will sometimes find these on objects returned from the API, when the cache was either
incomplete or disabled.
Attributes
----------
id : int
The object's ID.\n
Defaults to ``0`` if not set.
name : str
The object's name.\n
Defaults to ``Unknown`` if not set.
"""
def __init__(self, *, id: int = 0, name: str = "Unknown"):
self._id: int = id
self._name: str = name
self._hash: Optional[int] = None
@property
def id(self) -> int:
return self._id
@property
def name(self) -> str:
return self._name
def __repr__(self) -> str:
return f"{self.__class__.__name__}: {self._name}({self._id})"
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
if self._id != 0 and other._id != 0:
return self._id == other._id
elif self._name != "Unknown" and other._name != "Unknown":
return self._name == other._name
return NotImplemented
def __hash__(self) -> int:
if self._hash is None:
self._hash = hash((self.__class__.__name__, self._name, self._id))
return self._hash
class Expandable(Awaitable[_A]):
"""
An abstract class that can be used to make partial objects "expandable" to their full version.
Subclasses should overwrite the `_expand` method with proper implementation, returning
the full expanded object.
"""
# Subclasses will have their `_expand` method doc linked as the `__await__` doc.
def __init_subclass__(cls):
# Create a new await method
# Copy over the docstring and annotations
@wraps(cls._expand)
def __await__(self: Expandable[_A]):
return self._expand().__await__()
# Attach the method to the subclass
setattr(cls, "__await__", __await__)
# solely to satisfy MyPy
def __await__(self) -> Generator[_A, None, _A]:
raise NotImplementedError
@abstractmethod
async def _expand(self) -> _A:
raise NotImplementedError
class WinLoseMixin:
"""
Represents player's wins and losses. Contains useful helper attributes.
Attributes
----------
wins : int
The amount of wins.
losses : int
The amount of losses.
"""
def __init__(self, *, wins: int, losses: int):
self.wins = wins
self.losses = losses
@property
def matches_played(self) -> int:
"""
The amount of matches played. This is just ``wins + losses``.
:type: int
"""
return self.wins + self.losses
@property
def winrate(self) -> float:
"""
The calculated winrate as a fraction.\n
`nan` is returned if there was no matches played.
:type: float
"""
return self.wins / self.matches_played if self.matches_played > 0 else nan
@property
def winrate_text(self) -> str:
"""
The calculated winrate as a percentage string of up to 3 decimal places accuracy.\n
The format is: ``"48.213%"``\n
``"N/A"`` is returned if there was no matches played.
:type: str
"""
return f"{round(self.winrate * 100, 3)}%" if self.matches_played > 0 else "N/A"
class KDAMixin:
"""
Represents player's kills, deaths and assists. Contains useful helper attributes.
Attributes
----------
kills : int
The amount of kills.
deaths : int
The amount of deaths.
assists : int
The amount of assists.
"""
def __init__(self, *, kills: int, deaths: int, assists: int):
self.kills: int = kills
self.deaths: int = deaths
self.assists: int = assists
@property
def kda(self) -> float:
"""
The calculated KDA.\n
The formula is: ``(kills + assists / 2) / deaths``.\n
`nan` is returned if there was no deaths.
:type: float
"""
return (self.kills + self.assists / 2) / self.deaths if self.deaths > 0 else nan
@property
def kda2(self) -> float:
"""
The calculated KDA.\n
The formula is: ``(kills + assists / 2) / max(deaths, 1)``, treating 0 and 1 deaths
the same, meaning this will never return `nan`.
:type: float
"""
return (self.kills + self.assists / 2) / max(self.deaths, 1)
@property
def df(self) -> int:
"""
The Dominance Factor.\n
The formula is: ``kills * 2 + deaths * -3 + assists``.\n
The value signifies how "useful" the person was to the team overall.
Best used when scaled and compared between team members in a match (allied and enemy).
:type: int
"""
return self.kills * 2 + self.deaths * -3 + self.assists
@property
def kda_text(self) -> str:
"""
Kills, deaths and assists as a slash-delimited string.\n
The format is: ``kills/deaths/assists``, or ``1/2/3``.
:type: str
"""
return f"{self.kills}/{self.deaths}/{self.assists}"
class MatchMixin:
"""
Represents basic information about a match.
Attributes
----------
id : int
The match ID.
queue : Queue
The queue this match was played in.
region : Region
The region this match was played in.
timestamp : datetime.datetime
A timestamp of when this match happened.
duration : Duration
The duration of the match.
map_name : str
The name of the map played.
score : Tuple[int, int]
The match's ending score.
winning_team : Literal[1, 2]
The winning team of this match.
"""
def __init__(
self, match_data: Union[responses.MatchPlayerObject, responses.HistoryMatchObject]
):
self.id: int = match_data["Match"]
if "hasReplay" in match_data:
# we're in a full match data
match_data = cast(responses.MatchPlayerObject, match_data)
stamp = match_data["Entry_Datetime"]
queue = match_data["match_queue_id"]
score = (match_data["Team1Score"], match_data["Team2Score"])
else:
# we're in a partial (player history) match data
match_data = cast(responses.HistoryMatchObject, match_data)
stamp = match_data["Match_Time"]
queue = match_data["Match_Queue_Id"]
my_team = match_data["TaskForce"]
other_team = 1 if my_team == 2 else 2
score = (
match_data[f"Team{my_team}Score"], # type: ignore[misc]
match_data[f"Team{other_team}Score"], # type: ignore[misc]
)
self.queue = Queue(queue, _return_default=True)
self.region = Region(match_data["Region"], _return_default=True)
from .utils import _convert_timestamp, _convert_map_name, Duration # circular imports
self.timestamp: datetime = _convert_timestamp(stamp)
self.duration = Duration(seconds=match_data["Time_In_Match_Seconds"])
self.map_name: str = _convert_map_name(match_data["Map_Game"])
if self.queue.is_tdm():
# Score correction for TDM matches
score = (score[0] + 36, score[1] + 36)
self.score: Tuple[int, int] = score
self.winning_team: Literal[1, 2] = match_data["Winning_TaskForce"]
class MatchPlayerMixin(KDAMixin, CacheClient):
"""
Represents basic information about a player in a match.
Attributes
----------
player : Union[PartialPlayer, Player]
The player who participated in this match.\n
This is usually a new partial player object.\n
All attributes, Name, ID and Platform, should be present.
champion : Union[Champion, CacheObject]
The champion used by the player in this match.\n
With incomplete cache, this will be a `CacheObject` with the name and ID set.
loadout : MatchLoadout
The loadout used by the player in this match.
items : List[MatchItem]
A list of items bought by the player during this match.
credits : int
The amount of credits earned this match.
experience : int
The base amount of experience gained from this match.
kills : int
The amount of player kills.
deaths : int
The amount of deaths.
assists : int
The amount of assists.
damage_done : int
The amount of damage dealt.
damage_bot : int
The amount of damage done by the player's bot after they disconnected.
damage_taken : int
The amount of damage taken.
damage_mitigated : int
The amount of damage mitigated (shielding).
healing_done : int
The amount of healing done to other players.
healing_bot : int
The amount of healing done by the player's bot after they disconnected.
healing_self : int
The amount of healing done to self (self-sustain).
objective_time : int
The amount of objective time the player got, in seconds.
multikill_max : int
The maximum multikill player did during the match.
skin : Union[Skin, CacheObject]
The skin the player had equipped for this match.\n
With incomplete cache, this will be a `CacheObject` with the name and ID set.
team_number : Literal[1, 2]
The team this player belongs to.
team_score : int
The score of the player's team.
winner : bool
`True` if the player won this match, `False` otherwise.
"""
def __init__(
self,
player: Union[Player, PartialPlayer],
cache_entry: Optional[CacheEntry],
match_data: Union[responses.MatchPlayerObject, responses.HistoryMatchObject],
):
CacheClient.__init__(self, player._api)
if "hasReplay" in match_data:
# we're in a full match data
match_data = cast(responses.MatchPlayerObject, match_data)
creds = match_data["Gold_Earned"]
kills = match_data["Kills_Player"]
damage = match_data["Damage_Player"]
champion_name = match_data["Reference_Name"]
else:
# we're in a partial (player history) match data
match_data = cast(responses.HistoryMatchObject, match_data)
creds = match_data["Gold"]
kills = match_data["Kills"]
damage = match_data["Damage"]
champion_name = match_data["Champion"]
KDAMixin.__init__(
self, kills=kills, deaths=match_data["Deaths"], assists=match_data["Assists"]
)
# Champion
champion_id = match_data["ChampionId"]
champion: Optional[Union[Champion, CacheObject]] = None
if cache_entry is not None:
champion = cache_entry.champions.get(champion_id)
if champion is None:
champion = CacheObject(id=champion_id, name=champion_name)
self.champion: Union[Champion, CacheObject] = champion
# Skin
skin_id = match_data["SkinId"]
skin: Optional[Union[Skin, CacheObject]] = None
if cache_entry is not None:
skin = cache_entry.skins.get(skin_id)
if skin is None: # pragma: no cover
skin = CacheObject(id=skin_id, name=match_data["Skin"])
self.skin: Union[Skin, CacheObject] = skin
# Other
self.player: Union[Player, PartialPlayer] = player
self.credits: int = creds
self.damage_done: int = damage
self.damage_bot: int = match_data["Damage_Bot"]
self.damage_taken: int = match_data["Damage_Taken"]
self.damage_mitigated: int = match_data["Damage_Mitigated"]
self.healing_done: int = match_data["Healing"]
self.healing_bot: int = match_data["Healing_Bot"]
self.healing_self: int = match_data["Healing_Player_Self"]
self.objective_time: int = match_data["Objective_Assists"]
self.multikill_max: int = match_data["Multi_kill_Max"]
self.team_number: Literal[1, 2] = match_data["TaskForce"]
self.team_score: int = match_data[f"Team{self.team_number}Score"] # type: ignore[misc]
self.winner: bool = self.team_number == match_data["Winning_TaskForce"]
seconds: int = match_data["Time_In_Match_Seconds"]
self.experience: int = floor(seconds * (275/6) + (15000 if self.winner else 0))
from .items import MatchLoadout, MatchItem # cyclic imports
self.items: List[MatchItem] = []
for i in range(1, 5):
item_id = match_data[f"ActiveId{i}"] # type: ignore[misc]
if not item_id:
continue
item: Optional[Union[Device, CacheObject]] = None
if cache_entry is not None:
item = cache_entry.items.get(item_id)
if item is None:
if "hasReplay" in match_data:
# we're in a full match data
item_name = match_data[f"Item_Active_{i}"] # type: ignore[misc]
else:
# we're in a partial (player history) match data
item_name = match_data[f"Active_{i}"] # type: ignore[misc]
item = CacheObject(id=item_id, name=item_name)
if "hasReplay" in match_data:
# we're in a full match data
level = match_data[f"ActiveLevel{i}"] + 1 # type: ignore[misc]
else:
# we're in a partial (player history) match data
level = match_data[f"ActiveLevel{i}"] // 4 + 1 # type: ignore[misc]
self.items.append(MatchItem(item, level))
self.loadout = MatchLoadout(cache_entry, match_data)
@property
def shielding(self) -> int:
"""
This is an alias for the `damage_mitigated` attribute.
:type: int
"""
return self.damage_mitigated | PypiClean |
/flask_more-0.2.1.tar.gz/flask_more-0.2.1/docs/api.md | # @api
Basically, Flask-More does most of the work using the `@api` decorator, which does not disturb the existing routing view. The functionality adds validation of the request data, handles the request body data automatically, and helps you describe the api's functionality in more detail.
## Validation
```python
from flask import FLask
from flask_more import More, api
from pydantic import BaseModel
from models import User
app = Flask(__name)
More(app)
class UserSchema(BaseModel):
name: str
age: int
@app.post('/users')
@api
def add_user(user: UserSchema):
new_user = user.dict()
Users.create(**new_user)
return new_user
```
## OpenAPI
```python
from flask import FLask
from flask_more import More, api
from pydantic import BaseModel
from models import User
app = Flask(__name)
More(app)
class UserSchema(BaseModel):
name: str
age: int
@app.get('/users')
@api(
tags=["users"],
summary="get all users",
description="get all or query users",
)
def get_users(start: int = 0, limit: int = 10):
pass
@app.get('/others')
@api(tags=["others"])
def others():
pass
```
| PypiClean |
/smartpip-1.11.6.tar.gz/smartpip-1.11.6/airflow/hooks/http_hook.py |
from builtins import str
import requests
import tenacity
from airflow.hooks.base_hook import BaseHook
from airflow.exceptions import AirflowException
class HttpHook(BaseHook):
"""
Interact with HTTP servers.
:param http_conn_id: connection that has the base API url i.e https://www.google.com/
and optional authentication credentials. Default headers can also be specified in
the Extra field in json format.
:type http_conn_id: str
:param method: the API method to be called
:type method: str
"""
def __init__(
self,
method='POST',
http_conn_id='http_default'
):
self.http_conn_id = http_conn_id
self.method = method
self.base_url = None
self._retry_obj = None
# headers may be passed through directly or in the "extra" field in the connection
# definition
def get_conn(self, headers=None):
"""
Returns http session for use with requests
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
"""
conn = self.get_connection(self.http_conn_id)
session = requests.Session()
if "://" in conn.host:
self.base_url = conn.host
else:
# schema defaults to HTTP
schema = conn.schema if conn.schema else "http"
self.base_url = schema + "://" + conn.host
if conn.port:
self.base_url = self.base_url + ":" + str(conn.port)
if conn.login:
session.auth = (conn.login, conn.password)
if conn.extra:
try:
session.headers.update(conn.extra_dejson)
except TypeError:
self.log.warn('Connection to {} has invalid extra field.'.format(
conn.host))
if headers:
session.headers.update(headers)
return session
def run(self, endpoint, data=None, headers=None, extra_options=None):
"""
Performs the request
:param endpoint: the endpoint to be called i.e. resource/v1/query?
:type endpoint: str
:param data: payload to be uploaded or request parameters
:type data: dict
:param headers: additional headers to be passed through as a dictionary
:type headers: dict
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non
2XX or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
session = self.get_conn(headers)
if not self.base_url.endswith('/') and not endpoint.startswith('/'):
url = self.base_url + '/' + endpoint
else:
url = self.base_url + endpoint
req = None
if self.method == 'GET':
# GET uses params
req = requests.Request(self.method,
url,
params=data,
headers=headers)
elif self.method == 'HEAD':
# HEAD doesn't use params
req = requests.Request(self.method,
url,
headers=headers)
else:
# Others use data
req = requests.Request(self.method,
url,
data=data,
headers=headers)
prepped_request = session.prepare_request(req)
self.log.info("Sending '%s' to url: %s", self.method, url)
return self.run_and_check(session, prepped_request, extra_options)
def check_response(self, response):
"""
Checks the status code and raise an AirflowException exception on non 2XX or 3XX
status codes
:param response: A requests response object
:type response: requests.response
"""
try:
response.raise_for_status()
except requests.exceptions.HTTPError:
self.log.error("HTTP error: %s", response.reason)
if self.method not in ['GET', 'HEAD']:
self.log.error(response.text)
raise AirflowException(str(response.status_code) + ":" + response.reason)
def run_and_check(self, session, prepped_request, extra_options):
"""
Grabs extra options like timeout and actually runs the request,
checking for the result
:param session: the session to be used to execute the request
:type session: requests.Session
:param prepped_request: the prepared request generated in run()
:type prepped_request: session.prepare_request
:param extra_options: additional options to be used when executing the request
i.e. {'check_response': False} to avoid checking raising exceptions on non 2XX
or 3XX status codes
:type extra_options: dict
"""
extra_options = extra_options or {}
try:
response = session.send(
prepped_request,
stream=extra_options.get("stream", False),
verify=extra_options.get("verify", False),
proxies=extra_options.get("proxies", {}),
cert=extra_options.get("cert"),
timeout=extra_options.get("timeout"),
allow_redirects=extra_options.get("allow_redirects", True))
if extra_options.get('check_response', True):
self.check_response(response)
return response
except requests.exceptions.ConnectionError as ex:
self.log.warn(str(ex) + ' Tenacity will retry to execute the operation')
raise ex
def run_with_advanced_retry(self, _retry_args, *args, **kwargs):
"""
Runs Hook.run() with a Tenacity decorator attached to it. This is useful for
connectors which might be disturbed by intermittent issues and should not
instantly fail.
:param _retry_args: Arguments which define the retry behaviour.
See Tenacity documentation at https://github.com/jd/tenacity
:type _retry_args: dict
Example: ::
hook = HttpHook(http_conn_id='my_conn',method='GET')
retry_args = dict(
wait=tenacity.wait_exponential(),
stop=tenacity.stop_after_attempt(10),
retry=requests.exceptions.ConnectionError
)
hook.run_with_advanced_retry(
endpoint='v1/test',
_retry_args=retry_args
)
"""
self._retry_obj = tenacity.Retrying(
**_retry_args
)
self._retry_obj(self.run, *args, **kwargs) | PypiClean |
/alipay_sdk_python-3.6.740-py3-none-any.whl/alipay/aop/api/domain/TaskDetailResponse.py | import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.PrizeResponse import PrizeResponse
class TaskDetailResponse(object):
def __init__(self):
self._creator = None
self._creator_open_id = None
self._merchant_pid = None
self._prize_response = None
self._second_merchant_appid = None
self._task_condition = None
self._task_contract_period = None
self._task_end = None
self._task_id = None
self._task_join_count = None
self._task_name = None
self._task_start = None
self._task_status = None
self._task_title = None
@property
def creator(self):
return self._creator
@creator.setter
def creator(self, value):
self._creator = value
@property
def creator_open_id(self):
return self._creator_open_id
@creator_open_id.setter
def creator_open_id(self, value):
self._creator_open_id = value
@property
def merchant_pid(self):
return self._merchant_pid
@merchant_pid.setter
def merchant_pid(self, value):
self._merchant_pid = value
@property
def prize_response(self):
return self._prize_response
@prize_response.setter
def prize_response(self, value):
if isinstance(value, PrizeResponse):
self._prize_response = value
else:
self._prize_response = PrizeResponse.from_alipay_dict(value)
@property
def second_merchant_appid(self):
return self._second_merchant_appid
@second_merchant_appid.setter
def second_merchant_appid(self, value):
self._second_merchant_appid = value
@property
def task_condition(self):
return self._task_condition
@task_condition.setter
def task_condition(self, value):
self._task_condition = value
@property
def task_contract_period(self):
return self._task_contract_period
@task_contract_period.setter
def task_contract_period(self, value):
self._task_contract_period = value
@property
def task_end(self):
return self._task_end
@task_end.setter
def task_end(self, value):
self._task_end = value
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
@property
def task_join_count(self):
return self._task_join_count
@task_join_count.setter
def task_join_count(self, value):
self._task_join_count = value
@property
def task_name(self):
return self._task_name
@task_name.setter
def task_name(self, value):
self._task_name = value
@property
def task_start(self):
return self._task_start
@task_start.setter
def task_start(self, value):
self._task_start = value
@property
def task_status(self):
return self._task_status
@task_status.setter
def task_status(self, value):
self._task_status = value
@property
def task_title(self):
return self._task_title
@task_title.setter
def task_title(self, value):
self._task_title = value
def to_alipay_dict(self):
params = dict()
if self.creator:
if hasattr(self.creator, 'to_alipay_dict'):
params['creator'] = self.creator.to_alipay_dict()
else:
params['creator'] = self.creator
if self.creator_open_id:
if hasattr(self.creator_open_id, 'to_alipay_dict'):
params['creator_open_id'] = self.creator_open_id.to_alipay_dict()
else:
params['creator_open_id'] = self.creator_open_id
if self.merchant_pid:
if hasattr(self.merchant_pid, 'to_alipay_dict'):
params['merchant_pid'] = self.merchant_pid.to_alipay_dict()
else:
params['merchant_pid'] = self.merchant_pid
if self.prize_response:
if hasattr(self.prize_response, 'to_alipay_dict'):
params['prize_response'] = self.prize_response.to_alipay_dict()
else:
params['prize_response'] = self.prize_response
if self.second_merchant_appid:
if hasattr(self.second_merchant_appid, 'to_alipay_dict'):
params['second_merchant_appid'] = self.second_merchant_appid.to_alipay_dict()
else:
params['second_merchant_appid'] = self.second_merchant_appid
if self.task_condition:
if hasattr(self.task_condition, 'to_alipay_dict'):
params['task_condition'] = self.task_condition.to_alipay_dict()
else:
params['task_condition'] = self.task_condition
if self.task_contract_period:
if hasattr(self.task_contract_period, 'to_alipay_dict'):
params['task_contract_period'] = self.task_contract_period.to_alipay_dict()
else:
params['task_contract_period'] = self.task_contract_period
if self.task_end:
if hasattr(self.task_end, 'to_alipay_dict'):
params['task_end'] = self.task_end.to_alipay_dict()
else:
params['task_end'] = self.task_end
if self.task_id:
if hasattr(self.task_id, 'to_alipay_dict'):
params['task_id'] = self.task_id.to_alipay_dict()
else:
params['task_id'] = self.task_id
if self.task_join_count:
if hasattr(self.task_join_count, 'to_alipay_dict'):
params['task_join_count'] = self.task_join_count.to_alipay_dict()
else:
params['task_join_count'] = self.task_join_count
if self.task_name:
if hasattr(self.task_name, 'to_alipay_dict'):
params['task_name'] = self.task_name.to_alipay_dict()
else:
params['task_name'] = self.task_name
if self.task_start:
if hasattr(self.task_start, 'to_alipay_dict'):
params['task_start'] = self.task_start.to_alipay_dict()
else:
params['task_start'] = self.task_start
if self.task_status:
if hasattr(self.task_status, 'to_alipay_dict'):
params['task_status'] = self.task_status.to_alipay_dict()
else:
params['task_status'] = self.task_status
if self.task_title:
if hasattr(self.task_title, 'to_alipay_dict'):
params['task_title'] = self.task_title.to_alipay_dict()
else:
params['task_title'] = self.task_title
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TaskDetailResponse()
if 'creator' in d:
o.creator = d['creator']
if 'creator_open_id' in d:
o.creator_open_id = d['creator_open_id']
if 'merchant_pid' in d:
o.merchant_pid = d['merchant_pid']
if 'prize_response' in d:
o.prize_response = d['prize_response']
if 'second_merchant_appid' in d:
o.second_merchant_appid = d['second_merchant_appid']
if 'task_condition' in d:
o.task_condition = d['task_condition']
if 'task_contract_period' in d:
o.task_contract_period = d['task_contract_period']
if 'task_end' in d:
o.task_end = d['task_end']
if 'task_id' in d:
o.task_id = d['task_id']
if 'task_join_count' in d:
o.task_join_count = d['task_join_count']
if 'task_name' in d:
o.task_name = d['task_name']
if 'task_start' in d:
o.task_start = d['task_start']
if 'task_status' in d:
o.task_status = d['task_status']
if 'task_title' in d:
o.task_title = d['task_title']
return o | PypiClean |
/instauto2-1.0.0.tar.gz/instauto2-1.0.0/instauto/api/actions/profile.py | from requests import Session, Response
from typing import Callable, Union, Dict
from instauto.api.actions.stubs import _request
from ..structs import IGProfile, State, DeviceProfile, Method
from .structs.profile import SetGender, SetBiography, Update, Info
class ProfileMixin:
"""Handles everything related to updating an Instagram profile."""
_session: Session
ig_profile: IGProfile
state: State
device_profile: DeviceProfile
_request: _request
_gen_uuid: Callable
_generate_user_breadcrumb: Callable
def _profile_act(self, obj: Union[Update, SetBiography, SetGender]) -> Response:
# retrieve the existing data for all profile data fields
current_data = self._request('accounts/current_user/', Method.GET, query={'edit': 'true'}).json()
# ensure we don't overwrite existing data to nothing
if obj.phone_number is None: obj.phone_number = current_data['user']['phone_number']
if obj.first_name is None: obj.first_name = current_data['user']['full_name']
if obj.external_url is None: obj.external_url = current_data['user']['external_url']
if obj.email is None: obj.email = current_data['user']['email']
if obj.biography is None: obj.biography = current_data['user']['biography']
if obj.username is None: obj.username = current_data['user']['trusted_username']
endpoint = 'accounts/edit_profile/'
return self._request(endpoint, Method.POST, data=obj.to_dict(), signed=True)
def profile_set_biography(self, obj: SetBiography) -> Response:
"""Sets the biography of the currently logged in user"""
return self._request('accounts/set_biography/', Method.POST, data=obj.to_dict())
def profile_set_gender(self, obj: SetGender) -> Response:
"""Sets the gender of the currently logged in user"""
return self._request('accounts/set_gender/', Method.POST, data=obj.to_dict(), signed=False)
def profile_update(self, obj: Update):
"""Updates the name, username, email, phone number and url for the currently logged in user."""
self._profile_act(obj)
def profile_info(self, obj: Info) -> Union[Dict, int]:
if obj.user_id is None:
obj.user_id = self.state.user_id
data = self._request(f'users/{obj.user_id}/info/', Method.GET).json()
if data['status'] == 'ok':
return data['user']
return data['status'] | PypiClean |
/DI_engine-0.4.9-py3-none-any.whl/ding/rl_utils/td.py | import copy
import numpy as np
from collections import namedtuple
from typing import Union, Optional, Callable
import torch
import torch.nn as nn
import torch.nn.functional as F
from ding.hpc_rl import hpc_wrapper
from ding.rl_utils.value_rescale import value_transform, value_inv_transform
from ding.torch_utils import to_tensor
q_1step_td_data = namedtuple('q_1step_td_data', ['q', 'next_q', 'act', 'next_act', 'reward', 'done', 'weight'])
def discount_cumsum(x, gamma: float = 1.0) -> np.ndarray:
assert abs(gamma - 1.) < 1e-5, "gamma equals to 1.0 in original decision transformer paper"
disc_cumsum = np.zeros_like(x)
disc_cumsum[-1] = x[-1]
for t in reversed(range(x.shape[0] - 1)):
disc_cumsum[t] = x[t] + gamma * disc_cumsum[t + 1]
return disc_cumsum
def q_1step_td_error(
data: namedtuple,
gamma: float,
criterion: torch.nn.modules = nn.MSELoss(reduction='none') # noqa
) -> torch.Tensor:
q, next_q, act, next_act, reward, done, weight = data
assert len(act.shape) == 1, act.shape
assert len(reward.shape) == 1, reward.shape
batch_range = torch.arange(act.shape[0])
if weight is None:
weight = torch.ones_like(reward)
q_s_a = q[batch_range, act]
target_q_s_a = next_q[batch_range, next_act]
target_q_s_a = gamma * (1 - done) * target_q_s_a + reward
return (criterion(q_s_a, target_q_s_a.detach()) * weight).mean()
m_q_1step_td_data = namedtuple('m_q_1step_td_data', ['q', 'target_q', 'next_q', 'act', 'reward', 'done', 'weight'])
def m_q_1step_td_error(
data: namedtuple,
gamma: float,
tau: float,
alpha: float,
criterion: torch.nn.modules = nn.MSELoss(reduction='none') # noqa
) -> torch.Tensor:
"""
Overview:
Munchausen td_error for DQN algorithm, support 1 step td error.
Arguments:
- data (:obj:`m_q_1step_td_data`): The input data, m_q_1step_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- tau (:obj:`float`): Entropy factor for Munchausen DQN
- alpha (:obj:`float`): Discount factor for Munchausen term
- criterion (:obj:`torch.nn.modules`): Loss function criterion
Returns:
- loss (:obj:`torch.Tensor`): 1step td error, 0-dim tensor
Shapes:
- data (:obj:`m_q_1step_td_data`): the m_q_1step_td_data containing\
['q', 'target_q', 'next_q', 'act', 'reward', 'done', 'weight']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- target_q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- act (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`( , B)`
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- weight (:obj:`torch.FloatTensor` or None): :math:`(B, )`, the training sample weight
"""
q, target_q, next_q, act, reward, done, weight = data
lower_bound = -1
assert len(act.shape) == 1, act.shape
assert len(reward.shape) == 1, reward.shape
batch_range = torch.arange(act.shape[0])
if weight is None:
weight = torch.ones_like(reward)
q_s_a = q[batch_range, act]
# calculate muchausen addon
# replay_log_policy
target_v_s = target_q[batch_range].max(1)[0].unsqueeze(-1)
logsum = torch.logsumexp((target_q - target_v_s) / tau, 1).unsqueeze(-1)
log_pi = target_q - target_v_s - tau * logsum
act_get = act.unsqueeze(-1)
# same to the last second tau_log_pi_a
munchausen_addon = log_pi.gather(1, act_get)
muchausen_term = alpha * torch.clamp(munchausen_addon, min=lower_bound, max=1)
# replay_next_log_policy
target_v_s_next = next_q[batch_range].max(1)[0].unsqueeze(-1)
logsum_next = torch.logsumexp((next_q - target_v_s_next) / tau, 1).unsqueeze(-1)
tau_log_pi_next = next_q - target_v_s_next - tau * logsum_next
# do stable softmax == replay_next_policy
pi_target = F.softmax((next_q - target_v_s_next) / tau)
target_q_s_a = (gamma * (pi_target * (next_q - tau_log_pi_next) * (1 - done.unsqueeze(-1))).sum(1)).unsqueeze(-1)
target_q_s_a = reward.unsqueeze(-1) + muchausen_term + target_q_s_a
td_error_per_sample = criterion(q_s_a.unsqueeze(-1), target_q_s_a.detach()).squeeze(-1)
# calculate action_gap and clipfrac
with torch.no_grad():
top2_q_s = target_q[batch_range].topk(2, dim=1, largest=True, sorted=True)[0]
action_gap = (top2_q_s[:, 0] - top2_q_s[:, 1]).mean()
clipped = munchausen_addon.gt(1) | munchausen_addon.lt(lower_bound)
clipfrac = torch.as_tensor(clipped).float()
return (td_error_per_sample * weight).mean(), td_error_per_sample, action_gap, clipfrac
q_v_1step_td_data = namedtuple('q_v_1step_td_data', ['q', 'v', 'act', 'reward', 'done', 'weight'])
def q_v_1step_td_error(
data: namedtuple, gamma: float, criterion: torch.nn.modules = nn.MSELoss(reduction='none')
) -> torch.Tensor:
# we will use this function in discrete sac algorithm to calculate td error between q and v value.
"""
Overview:
td_error between q and v value for SAC algorithm, support 1 step td error.
Arguments:
- data (:obj:`q_v_1step_td_data`): The input data, q_v_1step_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- criterion (:obj:`torch.nn.modules`): Loss function criterion
Returns:
- loss (:obj:`torch.Tensor`): 1step td error, 0-dim tensor
Shapes:
- data (:obj:`q_v_1step_td_data`): the q_v_1step_td_data containing\
['q', 'v', 'act', 'reward', 'done', 'weight']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- v (:obj:`torch.FloatTensor`): :math:`(B, )`
- act (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`( , B)`
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- weight (:obj:`torch.FloatTensor` or None): :math:`(B, )`, the training sample weight
"""
q, v, act, reward, done, weight = data
if len(act.shape) == 1:
assert len(reward.shape) == 1, reward.shape
batch_range = torch.arange(act.shape[0])
if weight is None:
weight = torch.ones_like(reward)
q_s_a = q[batch_range, act]
target_q_s_a = gamma * (1 - done) * v + reward
else:
assert len(reward.shape) == 1, reward.shape
batch_range = torch.arange(act.shape[0])
actor_range = torch.arange(act.shape[1])
batch_actor_range = torch.arange(act.shape[0] * act.shape[1])
if weight is None:
weight = torch.ones_like(act)
temp_q = q.reshape(act.shape[0] * act.shape[1], -1)
temp_act = act.reshape(act.shape[0] * act.shape[1])
q_s_a = temp_q[batch_actor_range, temp_act]
q_s_a = q_s_a.reshape(act.shape[0], act.shape[1])
target_q_s_a = gamma * (1 - done).unsqueeze(1) * v + reward.unsqueeze(1)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample
def view_similar(x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
size = list(x.shape) + [1 for _ in range(len(target.shape) - len(x.shape))]
return x.view(*size)
nstep_return_data = namedtuple('nstep_return_data', ['reward', 'next_value', 'done'])
def nstep_return(data: namedtuple, gamma: Union[float, list], nstep: int, value_gamma: Optional[torch.Tensor] = None):
reward, next_value, done = data
assert reward.shape[0] == nstep
device = reward.device
if isinstance(gamma, float):
reward_factor = torch.ones(nstep).to(device)
for i in range(1, nstep):
reward_factor[i] = gamma * reward_factor[i - 1]
reward_factor = view_similar(reward_factor, reward)
return_tmp = reward.mul(reward_factor).sum(0)
if value_gamma is None:
return_ = return_tmp + (gamma ** nstep) * next_value * (1 - done)
else:
return_ = return_tmp + value_gamma * next_value * (1 - done)
elif isinstance(gamma, list):
# if gamma is list, for NGU policy case
reward_factor = torch.ones([nstep + 1, done.shape[0]]).to(device)
for i in range(1, nstep + 1):
reward_factor[i] = torch.stack(gamma, dim=0).to(device) * reward_factor[i - 1]
reward_factor = view_similar(reward_factor, reward)
return_tmp = reward.mul(reward_factor[:nstep]).sum(0)
return_ = return_tmp + reward_factor[nstep] * next_value * (1 - done)
else:
raise TypeError("The type of gamma should be float or list")
return return_
dist_1step_td_data = namedtuple(
'dist_1step_td_data', ['dist', 'next_dist', 'act', 'next_act', 'reward', 'done', 'weight']
)
def dist_1step_td_error(
data: namedtuple,
gamma: float,
v_min: float,
v_max: float,
n_atom: int,
) -> torch.Tensor:
"""
Overview:
1 step td_error for distributed q-learning based algorithm
Arguments:
- data (:obj:`dist_1step_td_data`): The input data, dist_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`dist_1step_td_data`): the dist_1step_td_data containing\
['dist', 'next_n_dist', 'act', 'reward', 'done', 'weight']
- dist (:obj:`torch.FloatTensor`): :math:`(B, N, n_atom)` i.e. [batch_size, action_dim, n_atom]
- next_dist (:obj:`torch.FloatTensor`): :math:`(B, N, n_atom)`
- act (:obj:`torch.LongTensor`): :math:`(B, )`
- next_act (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(, B)`
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- weight (:obj:`torch.FloatTensor` or None): :math:`(B, )`, the training sample weight
"""
dist, next_dist, act, next_act, reward, done, weight = data
device = reward.device
assert len(reward.shape) == 1, reward.shape
support = torch.linspace(v_min, v_max, n_atom).to(device)
delta_z = (v_max - v_min) / (n_atom - 1)
if len(act.shape) == 1:
reward = reward.unsqueeze(-1)
done = done.unsqueeze(-1)
batch_size = act.shape[0]
batch_range = torch.arange(batch_size)
if weight is None:
weight = torch.ones_like(reward)
next_dist = next_dist[batch_range, next_act].detach()
else:
reward = reward.unsqueeze(-1).repeat(1, act.shape[1])
done = done.unsqueeze(-1).repeat(1, act.shape[1])
batch_size = act.shape[0] * act.shape[1]
batch_range = torch.arange(act.shape[0] * act.shape[1])
action_dim = dist.shape[2]
dist = dist.reshape(act.shape[0] * act.shape[1], action_dim, -1)
reward = reward.reshape(act.shape[0] * act.shape[1], -1)
done = done.reshape(act.shape[0] * act.shape[1], -1)
next_dist = next_dist.reshape(act.shape[0] * act.shape[1], action_dim, -1)
next_act = next_act.reshape(act.shape[0] * act.shape[1])
next_dist = next_dist[batch_range, next_act].detach()
next_dist = next_dist.reshape(act.shape[0] * act.shape[1], -1)
act = act.reshape(act.shape[0] * act.shape[1])
if weight is None:
weight = torch.ones_like(reward)
target_z = reward + (1 - done) * gamma * support
target_z = target_z.clamp(min=v_min, max=v_max)
b = (target_z - v_min) / delta_z
l = b.floor().long()
u = b.ceil().long()
# Fix disappearing probability mass when l = b = u (b is int)
l[(u > 0) * (l == u)] -= 1
u[(l < (n_atom - 1)) * (l == u)] += 1
proj_dist = torch.zeros_like(next_dist)
offset = torch.linspace(0, (batch_size - 1) * n_atom, batch_size).unsqueeze(1).expand(batch_size,
n_atom).long().to(device)
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_dist * (b - l.float())).view(-1))
log_p = torch.log(dist[batch_range, act])
loss = -(log_p * proj_dist * weight).sum(-1).mean()
return loss
dist_nstep_td_data = namedtuple(
'dist_1step_td_data', ['dist', 'next_n_dist', 'act', 'next_n_act', 'reward', 'done', 'weight']
)
def shape_fn_dntd(args, kwargs):
r"""
Overview:
Return dntd shape for hpc
Returns:
shape: [T, B, N, n_atom]
"""
if len(args) <= 0:
tmp = [kwargs['data'].reward.shape[0]]
tmp.extend(list(kwargs['data'].dist.shape))
else:
tmp = [args[0].reward.shape[0]]
tmp.extend(list(args[0].dist.shape))
return tmp
@hpc_wrapper(
shape_fn=shape_fn_dntd,
namedtuple_data=True,
include_args=[0, 1, 2, 3],
include_kwargs=['data', 'gamma', 'v_min', 'v_max']
)
def dist_nstep_td_error(
data: namedtuple,
gamma: float,
v_min: float,
v_max: float,
n_atom: int,
nstep: int = 1,
value_gamma: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error for distributed q-learning based algorithm, support single\
agent case and multi agent case.
Arguments:
- data (:obj:`dist_nstep_td_data`): The input data, dist_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`dist_nstep_td_data`): the dist_nstep_td_data containing\
['dist', 'next_n_dist', 'act', 'reward', 'done', 'weight']
- dist (:obj:`torch.FloatTensor`): :math:`(B, N, n_atom)` i.e. [batch_size, action_dim, n_atom]
- next_n_dist (:obj:`torch.FloatTensor`): :math:`(B, N, n_atom)`
- act (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_act (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
"""
dist, next_n_dist, act, next_n_act, reward, done, weight = data
device = reward.device
reward_factor = torch.ones(nstep).to(device)
for i in range(1, nstep):
reward_factor[i] = gamma * reward_factor[i - 1]
reward = torch.matmul(reward_factor, reward)
support = torch.linspace(v_min, v_max, n_atom).to(device)
delta_z = (v_max - v_min) / (n_atom - 1)
if len(act.shape) == 1:
reward = reward.unsqueeze(-1)
done = done.unsqueeze(-1)
batch_size = act.shape[0]
batch_range = torch.arange(batch_size)
if weight is None:
weight = torch.ones_like(reward)
elif isinstance(weight, float):
weight = torch.tensor(weight)
next_n_dist = next_n_dist[batch_range, next_n_act].detach()
else:
reward = reward.unsqueeze(-1).repeat(1, act.shape[1])
done = done.unsqueeze(-1).repeat(1, act.shape[1])
batch_size = act.shape[0] * act.shape[1]
batch_range = torch.arange(act.shape[0] * act.shape[1])
action_dim = dist.shape[2]
dist = dist.reshape(act.shape[0] * act.shape[1], action_dim, -1)
reward = reward.reshape(act.shape[0] * act.shape[1], -1)
done = done.reshape(act.shape[0] * act.shape[1], -1)
next_n_dist = next_n_dist.reshape(act.shape[0] * act.shape[1], action_dim, -1)
next_n_act = next_n_act.reshape(act.shape[0] * act.shape[1])
next_n_dist = next_n_dist[batch_range, next_n_act].detach()
next_n_dist = next_n_dist.reshape(act.shape[0] * act.shape[1], -1)
act = act.reshape(act.shape[0] * act.shape[1])
if weight is None:
weight = torch.ones_like(reward)
elif isinstance(weight, float):
weight = torch.tensor(weight)
if value_gamma is None:
target_z = reward + (1 - done) * (gamma ** nstep) * support
elif isinstance(value_gamma, float):
value_gamma = torch.tensor(value_gamma).unsqueeze(-1)
target_z = reward + (1 - done) * value_gamma * support
else:
value_gamma = value_gamma.unsqueeze(-1)
target_z = reward + (1 - done) * value_gamma * support
target_z = target_z.clamp(min=v_min, max=v_max)
b = (target_z - v_min) / delta_z
l = b.floor().long()
u = b.ceil().long()
# Fix disappearing probability mass when l = b = u (b is int)
l[(u > 0) * (l == u)] -= 1
u[(l < (n_atom - 1)) * (l == u)] += 1
proj_dist = torch.zeros_like(next_n_dist)
offset = torch.linspace(0, (batch_size - 1) * n_atom, batch_size).unsqueeze(1).expand(batch_size,
n_atom).long().to(device)
proj_dist.view(-1).index_add_(0, (l + offset).view(-1), (next_n_dist * (u.float() - b)).view(-1))
proj_dist.view(-1).index_add_(0, (u + offset).view(-1), (next_n_dist * (b - l.float())).view(-1))
assert (dist[batch_range, act] > 0.0).all(), ("dist act", dist[batch_range, act], "dist:", dist)
log_p = torch.log(dist[batch_range, act])
if len(weight.shape) == 1:
weight = weight.unsqueeze(-1)
td_error_per_sample = -(log_p * proj_dist).sum(-1)
loss = -(log_p * proj_dist * weight).sum(-1).mean()
return loss, td_error_per_sample
v_1step_td_data = namedtuple('v_1step_td_data', ['v', 'next_v', 'reward', 'done', 'weight'])
def v_1step_td_error(
data: namedtuple,
gamma: float,
criterion: torch.nn.modules = nn.MSELoss(reduction='none') # noqa
) -> torch.Tensor:
v, next_v, reward, done, weight = data
if weight is None:
weight = torch.ones_like(v)
if len(v.shape) == len(reward.shape):
if done is not None:
target_v = gamma * (1 - done) * next_v + reward
else:
target_v = gamma * next_v + reward
else:
if done is not None:
target_v = gamma * (1 - done).unsqueeze(1) * next_v + reward.unsqueeze(1)
else:
target_v = gamma * next_v + reward.unsqueeze(1)
td_error_per_sample = criterion(v, target_v.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample
v_nstep_td_data = namedtuple('v_nstep_td_data', ['v', 'next_n_v', 'reward', 'done', 'weight', 'value_gamma'])
def v_nstep_td_error(
data: namedtuple,
gamma: float,
nstep: int = 1,
criterion: torch.nn.modules = nn.MSELoss(reduction='none') # noqa
) -> torch.Tensor:
r"""
Overview:
Multistep (n step) td_error for distributed value based algorithm
Arguments:
- data (:obj:`dist_nstep_td_data`): The input data, v_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`dist_nstep_td_data`): The v_nstep_td_data containing\
['v', 'next_n_v', 'reward', 'done', 'weight', 'value_gamma']
- v (:obj:`torch.FloatTensor`): :math:`(B, )` i.e. [batch_size, ]
- next_v (:obj:`torch.FloatTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- weight (:obj:`torch.FloatTensor` or None): :math:`(B, )`, the training sample weight
- value_gamma (:obj:`torch.Tensor`): If the remaining data in the buffer is less than n_step\
we use value_gamma as the gamma discount value for next_v rather than gamma**n_step
"""
v, next_n_v, reward, done, weight, value_gamma = data
if weight is None:
weight = torch.ones_like(v)
target_v = nstep_return(nstep_return_data(reward, next_n_v, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(v, target_v.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample
q_nstep_td_data = namedtuple(
'q_nstep_td_data', ['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'weight']
)
dqfd_nstep_td_data = namedtuple(
'dqfd_nstep_td_data', [
'q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'done_one_step', 'weight', 'new_n_q_one_step',
'next_n_action_one_step', 'is_expert'
]
)
def shape_fn_qntd(args, kwargs):
r"""
Overview:
Return qntd shape for hpc
Returns:
shape: [T, B, N]
"""
if len(args) <= 0:
tmp = [kwargs['data'].reward.shape[0]]
tmp.extend(list(kwargs['data'].q.shape))
else:
tmp = [args[0].reward.shape[0]]
tmp.extend(list(args[0].q.shape))
return tmp
@hpc_wrapper(shape_fn=shape_fn_qntd, namedtuple_data=True, include_args=[0, 1], include_kwargs=['data', 'gamma'])
def q_nstep_td_error(
data: namedtuple,
gamma: Union[float, list],
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error for q-learning based algorithm
Arguments:
- data (:obj:`q_nstep_td_data`): The input data, q_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- cum_reward (:obj:`bool`): Whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): Gamma discount value for target q_value
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): nstep td error, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
"""
q, next_n_q, action, next_n_action, reward, done, weight = data
if weight is None:
weight = torch.ones_like(reward)
if len(action.shape) > 1: # MARL case
reward = reward.unsqueeze(-1)
weight = weight.unsqueeze(-1)
done = done.unsqueeze(-1)
if value_gamma is not None:
value_gamma = value_gamma.unsqueeze(-1)
q_s_a = q.gather(-1, action.unsqueeze(-1)).squeeze(-1)
target_q_s_a = next_n_q.gather(-1, next_n_action.unsqueeze(-1)).squeeze(-1)
if cum_reward:
if value_gamma is None:
target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done)
else:
target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done)
else:
target_q_s_a = nstep_return(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample
def bdq_nstep_td_error(
data: namedtuple,
gamma: Union[float, list],
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error for BDQ algorithm, \
referenced paper Action Branching Architectures for Deep Reinforcement Learning \
<https://arxiv.org/pdf/1711.08946>
In fact, the original paper only provides the 1-step TD-error calculation method, \
and here we extend the calculation method of n-step.
TD-error:
y_d = \sigma_{t=0}^{nstep} \gamma^t * r_t + \gamma^{nstep} * Q_d'(s', argmax Q_d(s', a_d))
TD-error = \frac{1}{D} * (y_d - Q_d(s, a_d))^2
Loss = mean(TD-error)
Arguments:
- data (:obj:`q_nstep_td_data`): The input data, q_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- cum_reward (:obj:`bool`): Whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): Gamma discount value for target q_value
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): nstep td error, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, D, N)` i.e. [batch_size, branch_num, action_bins_per_branch]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, D, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, D)`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, D)`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
"""
q, next_n_q, action, next_n_action, reward, done, weight = data
if weight is None:
weight = torch.ones_like(reward)
reward = reward.unsqueeze(-1)
done = done.unsqueeze(-1)
if value_gamma is not None:
value_gamma = value_gamma.unsqueeze(-1)
q_s_a = q.gather(-1, action.unsqueeze(-1)).squeeze(-1)
target_q_s_a = next_n_q.gather(-1, next_n_action.unsqueeze(-1)).squeeze(-1)
if cum_reward:
if value_gamma is None:
target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done)
else:
target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done)
else:
target_q_s_a = nstep_return(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
td_error_per_sample = td_error_per_sample.mean(-1)
return (td_error_per_sample * weight).mean(), td_error_per_sample
def shape_fn_qntd_rescale(args, kwargs):
r"""
Overview:
Return qntd_rescale shape for hpc
Returns:
shape: [T, B, N]
"""
if len(args) <= 0:
tmp = [kwargs['data'].reward.shape[0]]
tmp.extend(list(kwargs['data'].q.shape))
else:
tmp = [args[0].reward.shape[0]]
tmp.extend(list(args[0].q.shape))
return tmp
@hpc_wrapper(
shape_fn=shape_fn_qntd_rescale, namedtuple_data=True, include_args=[0, 1], include_kwargs=['data', 'gamma']
)
def q_nstep_td_error_with_rescale(
data: namedtuple,
gamma: Union[float, list],
nstep: int = 1,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
trans_fn: Callable = value_transform,
inv_trans_fn: Callable = value_inv_transform,
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error with value rescaling
Arguments:
- data (:obj:`q_nstep_td_data`): The input data, q_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- trans_fn (:obj:`Callable`): Value transfrom function, default to value_transform\
(refer to rl_utils/value_rescale.py)
- inv_trans_fn (:obj:`Callable`): Value inverse transfrom function, default to value_inv_transform\
(refer to rl_utils/value_rescale.py)
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
"""
q, next_n_q, action, next_n_action, reward, done, weight = data
assert len(action.shape) == 1, action.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
q_s_a = q[batch_range, action]
target_q_s_a = next_n_q[batch_range, next_n_action]
target_q_s_a = inv_trans_fn(target_q_s_a)
target_q_s_a = nstep_return(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
target_q_s_a = trans_fn(target_q_s_a)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample
def dqfd_nstep_td_error(
data: namedtuple,
gamma: float,
lambda_n_step_td: float,
lambda_supervised_loss: float,
margin_function: float,
lambda_one_step_td: float = 1.,
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
) -> torch.Tensor:
"""
Overview:
Multistep n step td_error + 1 step td_error + supervised margin loss or dqfd
Arguments:
- data (:obj:`dqfd_nstep_td_data`): The input data, dqfd_nstep_td_data to calculate loss
- gamma (:obj:`float`): discount factor
- cum_reward (:obj:`bool`): Whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): Gamma discount value for target q_value
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- nstep (:obj:`int`): nstep num, default set to 10
Returns:
- loss (:obj:`torch.Tensor`): Multistep n step td_error + 1 step td_error + supervised margin loss, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): Multistep n step td_error + 1 step td_error\
+ supervised margin loss, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): the q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'weight'\
, 'new_n_q_one_step', 'next_n_action_one_step', 'is_expert']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
- new_n_q_one_step (:obj:`torch.FloatTensor`): :math:`(B, N)`
- next_n_action_one_step (:obj:`torch.LongTensor`): :math:`(B, )`
- is_expert (:obj:`int`) : 0 or 1
"""
q, next_n_q, action, next_n_action, reward, done, done_one_step, weight, new_n_q_one_step, next_n_action_one_step, \
is_expert = data # set is_expert flag(expert 1, agent 0)
assert len(action.shape) == 1, action.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
q_s_a = q[batch_range, action]
target_q_s_a = next_n_q[batch_range, next_n_action]
target_q_s_a_one_step = new_n_q_one_step[batch_range, next_n_action_one_step]
# calculate n-step TD-loss
if cum_reward:
if value_gamma is None:
target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done)
else:
target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done)
else:
target_q_s_a = nstep_return(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
# calculate 1-step TD-loss
nstep = 1
reward = reward[0].unsqueeze(0) # get the one-step reward
value_gamma = None
if cum_reward:
if value_gamma is None:
target_q_s_a_one_step = reward + (gamma ** nstep) * target_q_s_a_one_step * (1 - done_one_step)
else:
target_q_s_a_one_step = reward + value_gamma * target_q_s_a_one_step * (1 - done_one_step)
else:
target_q_s_a_one_step = nstep_return(
nstep_return_data(reward, target_q_s_a_one_step, done_one_step), gamma, nstep, value_gamma
)
td_error_one_step_per_sample = criterion(q_s_a, target_q_s_a_one_step.detach())
device = q_s_a.device
device_cpu = torch.device('cpu')
# calculate the supervised loss
l = margin_function * torch.ones_like(q).to(device_cpu) # q shape (B, A), action shape (B, )
l.scatter_(1, torch.LongTensor(action.unsqueeze(1).to(device_cpu)), torch.zeros_like(q, device=device_cpu))
# along the first dimension. for the index of the action, fill the corresponding position in l with 0
JE = is_expert * (torch.max(q + l.to(device), dim=1)[0] - q_s_a)
return (
(
(
lambda_n_step_td * td_error_per_sample + lambda_one_step_td * td_error_one_step_per_sample +
lambda_supervised_loss * JE
) * weight
).mean(), lambda_n_step_td * td_error_per_sample.abs() +
lambda_one_step_td * td_error_one_step_per_sample.abs() + lambda_supervised_loss * JE.abs(),
(td_error_per_sample.mean(), td_error_one_step_per_sample.mean(), JE.mean())
)
def dqfd_nstep_td_error_with_rescale(
data: namedtuple,
gamma: float,
lambda_n_step_td: float,
lambda_supervised_loss: float,
lambda_one_step_td: float,
margin_function: float,
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
trans_fn: Callable = value_transform,
inv_trans_fn: Callable = value_inv_transform,
) -> torch.Tensor:
"""
Overview:
Multistep n step td_error + 1 step td_error + supervised margin loss or dqfd
Arguments:
- data (:obj:`dqfd_nstep_td_data`): The input data, dqfd_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- cum_reward (:obj:`bool`): Whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): Gamma discount value for target q_value
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- nstep (:obj:`int`): nstep num, default set to 10
Returns:
- loss (:obj:`torch.Tensor`): Multistep n step td_error + 1 step td_error + supervised margin loss, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): Multistep n step td_error + 1 step td_error\
+ supervised margin loss, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'weight'\
, 'new_n_q_one_step', 'next_n_action_one_step', 'is_expert']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
- new_n_q_one_step (:obj:`torch.FloatTensor`): :math:`(B, N)`
- next_n_action_one_step (:obj:`torch.LongTensor`): :math:`(B, )`
- is_expert (:obj:`int`) : 0 or 1
"""
q, next_n_q, action, next_n_action, reward, done, done_one_step, weight, new_n_q_one_step, next_n_action_one_step, \
is_expert = data # set is_expert flag(expert 1, agent 0)
assert len(action.shape) == 1, action.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
q_s_a = q[batch_range, action]
target_q_s_a = next_n_q[batch_range, next_n_action]
target_q_s_a = inv_trans_fn(target_q_s_a) # rescale
target_q_s_a_one_step = new_n_q_one_step[batch_range, next_n_action_one_step]
target_q_s_a_one_step = inv_trans_fn(target_q_s_a_one_step) # rescale
# calculate n-step TD-loss
if cum_reward:
if value_gamma is None:
target_q_s_a = reward + (gamma ** nstep) * target_q_s_a * (1 - done)
else:
target_q_s_a = reward + value_gamma * target_q_s_a * (1 - done)
else:
# to use value_gamma in n-step TD-loss
target_q_s_a = nstep_return(nstep_return_data(reward, target_q_s_a, done), gamma, nstep, value_gamma)
target_q_s_a = trans_fn(target_q_s_a) # rescale
td_error_per_sample = criterion(q_s_a, target_q_s_a.detach())
# calculate 1-step TD-loss
nstep = 1
reward = reward[0].unsqueeze(0) # get the one-step reward
value_gamma = None # This is very important, to use gamma in 1-step TD-loss
if cum_reward:
if value_gamma is None:
target_q_s_a_one_step = reward + (gamma ** nstep) * target_q_s_a_one_step * (1 - done_one_step)
else:
target_q_s_a_one_step = reward + value_gamma * target_q_s_a_one_step * (1 - done_one_step)
else:
target_q_s_a_one_step = nstep_return(
nstep_return_data(reward, target_q_s_a_one_step, done_one_step), gamma, nstep, value_gamma
)
target_q_s_a_one_step = trans_fn(target_q_s_a_one_step) # rescale
td_error_one_step_per_sample = criterion(q_s_a, target_q_s_a_one_step.detach())
device = q_s_a.device
device_cpu = torch.device('cpu')
# calculate the supervised loss
l = margin_function * torch.ones_like(q).to(device_cpu) # q shape (B, A), action shape (B, )
l.scatter_(1, torch.LongTensor(action.unsqueeze(1).to(device_cpu)), torch.zeros_like(q, device=device_cpu))
# along the first dimension. for the index of the action, fill the corresponding position in l with 0
JE = is_expert * (torch.max(q + l.to(device), dim=1)[0] - q_s_a)
return (
(
(
lambda_n_step_td * td_error_per_sample + lambda_one_step_td * td_error_one_step_per_sample +
lambda_supervised_loss * JE
) * weight
).mean(), lambda_n_step_td * td_error_per_sample.abs() +
lambda_one_step_td * td_error_one_step_per_sample.abs() + lambda_supervised_loss * JE.abs(),
(td_error_per_sample.mean(), td_error_one_step_per_sample.mean(), JE.mean())
)
qrdqn_nstep_td_data = namedtuple(
'qrdqn_nstep_td_data', ['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'tau', 'weight']
)
def qrdqn_nstep_td_error(
data: namedtuple,
gamma: float,
nstep: int = 1,
value_gamma: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error with in QRDQN
Arguments:
- data (:obj:`iqn_nstep_td_data`): The input data, iqn_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(tau, B, N)` i.e. [tau x batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(tau', B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
"""
q, next_n_q, action, next_n_action, reward, done, tau, weight = data
assert len(action.shape) == 1, action.shape
assert len(next_n_action.shape) == 1, next_n_action.shape
assert len(done.shape) == 1, done.shape
assert len(q.shape) == 3, q.shape
assert len(next_n_q.shape) == 3, next_n_q.shape
assert len(reward.shape) == 2, reward.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
# shape: batch_size x num x 1
q_s_a = q[batch_range, action, :].unsqueeze(2)
# shape: batch_size x 1 x num
target_q_s_a = next_n_q[batch_range, next_n_action, :].unsqueeze(1)
assert reward.shape[0] == nstep
reward_factor = torch.ones(nstep).to(reward)
for i in range(1, nstep):
reward_factor[i] = gamma * reward_factor[i - 1]
# shape: batch_size
reward = torch.matmul(reward_factor, reward)
# shape: batch_size x 1 x num
if value_gamma is None:
target_q_s_a = reward.unsqueeze(-1).unsqueeze(-1) + (gamma ** nstep
) * target_q_s_a * (1 - done).unsqueeze(-1).unsqueeze(-1)
else:
target_q_s_a = reward.unsqueeze(-1).unsqueeze(
-1
) + value_gamma.unsqueeze(-1).unsqueeze(-1) * target_q_s_a * (1 - done).unsqueeze(-1).unsqueeze(-1)
# shape: batch_size x num x num
u = F.smooth_l1_loss(target_q_s_a, q_s_a, reduction="none")
# shape: batch_size
loss = (u * (tau - (target_q_s_a - q_s_a).detach().le(0.).float()).abs()).sum(-1).mean(1)
return (loss * weight).mean(), loss
def q_nstep_sql_td_error(
data: namedtuple,
gamma: float,
alpha: float,
nstep: int = 1,
cum_reward: bool = False,
value_gamma: Optional[torch.Tensor] = None,
criterion: torch.nn.modules = nn.MSELoss(reduction='none'),
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error for q-learning based algorithm
Arguments:
- data (:obj:`q_nstep_td_data`): The input data, q_nstep_sql_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- Alpha (:obj:`float`): A parameter to weight entropy term in a policy equation
- cum_reward (:obj:`bool`): Whether to use cumulative nstep reward, which is figured out when collecting data
- value_gamma (:obj:`torch.Tensor`): Gamma discount value for target soft_q_value
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- nstep (:obj:`int`): nstep num, default set to 1
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
- td_error_per_sample (:obj:`torch.Tensor`): nstep td error, 1-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, N)` i.e. [batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- td_error_per_sample (:obj:`torch.FloatTensor`): :math:`(B, )`
"""
q, next_n_q, action, next_n_action, reward, done, weight = data
assert len(action.shape) == 1, action.shape
if weight is None:
weight = torch.ones_like(action)
batch_range = torch.arange(action.shape[0])
q_s_a = q[batch_range, action]
# target_q_s_a = next_n_q[batch_range, next_n_action]
target_v = alpha * torch.logsumexp(
next_n_q / alpha, 1
) # target_v = alpha * torch.log(torch.sum(torch.exp(next_n_q / alpha), 1))
target_v[target_v == float("Inf")] = 20
target_v[target_v == float("-Inf")] = -20
# For an appropriate hyper-parameter alpha, these hardcodes can be removed.
# However, algorithms may face the danger of explosion for other alphas.
# The hardcodes above are to prevent this situation from happening
record_target_v = copy.deepcopy(target_v)
# print(target_v)
if cum_reward:
if value_gamma is None:
target_v = reward + (gamma ** nstep) * target_v * (1 - done)
else:
target_v = reward + value_gamma * target_v * (1 - done)
else:
target_v = nstep_return(nstep_return_data(reward, target_v, done), gamma, nstep, value_gamma)
td_error_per_sample = criterion(q_s_a, target_v.detach())
return (td_error_per_sample * weight).mean(), td_error_per_sample, record_target_v
iqn_nstep_td_data = namedtuple(
'iqn_nstep_td_data', ['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'replay_quantiles', 'weight']
)
def iqn_nstep_td_error(
data: namedtuple,
gamma: float,
nstep: int = 1,
kappa: float = 1.0,
value_gamma: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error with in IQN, \
referenced paper Implicit Quantile Networks for Distributional Reinforcement Learning \
<https://arxiv.org/pdf/1806.06923.pdf>
Arguments:
- data (:obj:`iqn_nstep_td_data`): The input data, iqn_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- beta_function (:obj:`Callable`): The risk function
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(tau, B, N)` i.e. [tau x batch_size, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(tau', B, N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
"""
q, next_n_q, action, next_n_action, reward, done, replay_quantiles, weight = data
assert len(action.shape) == 1, action.shape
assert len(next_n_action.shape) == 1, next_n_action.shape
assert len(done.shape) == 1, done.shape
assert len(q.shape) == 3, q.shape
assert len(next_n_q.shape) == 3, next_n_q.shape
assert len(reward.shape) == 2, reward.shape
if weight is None:
weight = torch.ones_like(action)
batch_size = done.shape[0]
tau = q.shape[0]
tau_prime = next_n_q.shape[0]
action = action.repeat([tau, 1]).unsqueeze(-1)
next_n_action = next_n_action.repeat([tau_prime, 1]).unsqueeze(-1)
# shape: batch_size x tau x a
q_s_a = torch.gather(q, -1, action).permute([1, 0, 2])
# shape: batch_size x tau_prim x 1
target_q_s_a = torch.gather(next_n_q, -1, next_n_action).permute([1, 0, 2])
assert reward.shape[0] == nstep
device = torch.device("cuda" if reward.is_cuda else "cpu")
reward_factor = torch.ones(nstep).to(device)
for i in range(1, nstep):
reward_factor[i] = gamma * reward_factor[i - 1]
reward = torch.matmul(reward_factor, reward)
if value_gamma is None:
target_q_s_a = reward.unsqueeze(-1) + (gamma ** nstep) * target_q_s_a.squeeze(-1) * (1 - done).unsqueeze(-1)
else:
target_q_s_a = reward.unsqueeze(-1) + value_gamma.unsqueeze(-1) * target_q_s_a.squeeze(-1) * (1 - done
).unsqueeze(-1)
target_q_s_a = target_q_s_a.unsqueeze(-1)
# shape: batch_size x tau' x tau x 1.
bellman_errors = (target_q_s_a[:, :, None, :] - q_s_a[:, None, :, :])
# The huber loss (see Section 2.3 of the paper) is defined via two cases:
huber_loss = torch.where(
bellman_errors.abs() <= kappa, 0.5 * bellman_errors ** 2, kappa * (bellman_errors.abs() - 0.5 * kappa)
)
# Reshape replay_quantiles to batch_size x num_tau_samples x 1
replay_quantiles = replay_quantiles.reshape([tau, batch_size, 1]).permute([1, 0, 2])
# shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.
replay_quantiles = replay_quantiles[:, None, :, :].repeat([1, tau_prime, 1, 1])
# shape: batch_size x tau_prime x tau x 1.
quantile_huber_loss = (torch.abs(replay_quantiles - ((bellman_errors < 0).float()).detach()) * huber_loss) / kappa
# shape: batch_size
loss = quantile_huber_loss.sum(dim=2).mean(dim=1)[:, 0]
return (loss * weight).mean(), loss
fqf_nstep_td_data = namedtuple(
'fqf_nstep_td_data', ['q', 'next_n_q', 'action', 'next_n_action', 'reward', 'done', 'quantiles_hats', 'weight']
)
def fqf_nstep_td_error(
data: namedtuple,
gamma: float,
nstep: int = 1,
kappa: float = 1.0,
value_gamma: Optional[torch.Tensor] = None,
) -> torch.Tensor:
"""
Overview:
Multistep (1 step or n step) td_error with in FQF, \
referenced paper Fully Parameterized Quantile Function for Distributional Reinforcement Learning \
<https://arxiv.org/pdf/1911.02140.pdf>
Arguments:
- data (:obj:`fqf_nstep_td_data`): The input data, fqf_nstep_td_data to calculate loss
- gamma (:obj:`float`): Discount factor
- nstep (:obj:`int`): nstep num, default set to 1
- criterion (:obj:`torch.nn.modules`): Loss function criterion
- beta_function (:obj:`Callable`): The risk function
Returns:
- loss (:obj:`torch.Tensor`): nstep td error, 0-dim tensor
Shapes:
- data (:obj:`q_nstep_td_data`): The q_nstep_td_data containing\
['q', 'next_n_q', 'action', 'reward', 'done']
- q (:obj:`torch.FloatTensor`): :math:`(B, tau, N)` i.e. [batch_size, tau, action_dim]
- next_n_q (:obj:`torch.FloatTensor`): :math:`(B, tau', N)`
- action (:obj:`torch.LongTensor`): :math:`(B, )`
- next_n_action (:obj:`torch.LongTensor`): :math:`(B, )`
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, where T is timestep(nstep)
- done (:obj:`torch.BoolTensor`) :math:`(B, )`, whether done in last timestep
- quantiles_hats (:obj:`torch.FloatTensor`): :math:`(B, tau)`
"""
q, next_n_q, action, next_n_action, reward, done, quantiles_hats, weight = data
assert len(action.shape) == 1, action.shape
assert len(next_n_action.shape) == 1, next_n_action.shape
assert len(done.shape) == 1, done.shape
assert len(q.shape) == 3, q.shape
assert len(next_n_q.shape) == 3, next_n_q.shape
assert len(reward.shape) == 2, reward.shape
if weight is None:
weight = torch.ones_like(action)
batch_size = done.shape[0]
tau = q.shape[1]
tau_prime = next_n_q.shape[1]
# shape: batch_size x tau x 1
q_s_a = evaluate_quantile_at_action(q, action)
# shape: batch_size x tau_prime x 1
target_q_s_a = evaluate_quantile_at_action(next_n_q, next_n_action)
assert reward.shape[0] == nstep
reward_factor = torch.ones(nstep).to(reward.device)
for i in range(1, nstep):
reward_factor[i] = gamma * reward_factor[i - 1]
reward = torch.matmul(reward_factor, reward) # [batch_size]
if value_gamma is None:
target_q_s_a = reward.unsqueeze(-1) + (gamma ** nstep) * target_q_s_a.squeeze(-1) * (1 - done).unsqueeze(-1)
else:
target_q_s_a = reward.unsqueeze(-1) + value_gamma.unsqueeze(-1) * target_q_s_a.squeeze(-1) * (1 - done
).unsqueeze(-1)
target_q_s_a = target_q_s_a.unsqueeze(-1)
# shape: batch_size x tau' x tau x 1.
bellman_errors = (target_q_s_a.unsqueeze(2) - q_s_a.unsqueeze(1))
# shape: batch_size x tau' x tau x 1
huber_loss = F.smooth_l1_loss(target_q_s_a.unsqueeze(2), q_s_a.unsqueeze(1), reduction="none")
# shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.
quantiles_hats = quantiles_hats[:, None, :, None].repeat([1, tau_prime, 1, 1])
# shape: batch_size x tau_prime x tau x 1.
quantile_huber_loss = (torch.abs(quantiles_hats - ((bellman_errors < 0).float()).detach()) * huber_loss) / kappa
# shape: batch_size
loss = quantile_huber_loss.sum(dim=2).mean(dim=1)[:, 0]
return (loss * weight).mean(), loss
def evaluate_quantile_at_action(q_s, actions):
assert q_s.shape[0] == actions.shape[0]
batch_size, num_quantiles = q_s.shape[:2]
# Expand actions into (batch_size, num_quantiles, 1).
action_index = actions[:, None, None].expand(batch_size, num_quantiles, 1)
# Calculate quantile values at specified actions.
q_s_a = q_s.gather(dim=2, index=action_index)
return q_s_a
def fqf_calculate_fraction_loss(q_tau_i, q_value, quantiles, actions):
"""
Shapes:
- q_tau_i (:obj:`torch.FloatTensor`) :math:`(batch_size, num_quantiles-1, action_dim)`
- q_value (:obj:`torch.FloatTensor`) :math:`(batch_size, num_quantiles, action_dim)`
- quantiles (:obj:`torch.FloatTensor`) :math:`(batch_size, num_quantiles+1)`
- actions (:obj:`torch.LongTensor`) :math:`(batch_size, )`
"""
assert q_value.requires_grad
batch_size = q_value.shape[0]
num_quantiles = q_value.shape[1]
with torch.no_grad():
sa_quantiles = evaluate_quantile_at_action(q_tau_i, actions)
assert sa_quantiles.shape == (batch_size, num_quantiles - 1, 1)
q_s_a_hats = evaluate_quantile_at_action(q_value, actions) # [batch_size, num_quantiles, 1]
assert q_s_a_hats.shape == (batch_size, num_quantiles, 1)
assert not q_s_a_hats.requires_grad
# NOTE: Proposition 1 in the paper requires F^{-1} is non-decreasing.
# I relax this requirements and calculate gradients of quantiles even when
# F^{-1} is not non-decreasing.
values_1 = sa_quantiles - q_s_a_hats[:, :-1]
signs_1 = sa_quantiles > torch.cat([q_s_a_hats[:, :1], sa_quantiles[:, :-1]], dim=1)
assert values_1.shape == signs_1.shape
values_2 = sa_quantiles - q_s_a_hats[:, 1:]
signs_2 = sa_quantiles < torch.cat([sa_quantiles[:, 1:], q_s_a_hats[:, -1:]], dim=1)
assert values_2.shape == signs_2.shape
gradient_of_taus = (torch.where(signs_1, values_1, -values_1) +
torch.where(signs_2, values_2, -values_2)).view(batch_size, num_quantiles - 1)
assert not gradient_of_taus.requires_grad
assert gradient_of_taus.shape == quantiles[:, 1:-1].shape
# Gradients of the network parameters and corresponding loss
# are calculated using chain rule.
fraction_loss = (gradient_of_taus * quantiles[:, 1:-1]).sum(dim=1).mean()
return fraction_loss
td_lambda_data = namedtuple('td_lambda_data', ['value', 'reward', 'weight'])
def shape_fn_td_lambda(args, kwargs):
r"""
Overview:
Return td_lambda shape for hpc
Returns:
shape: [T, B]
"""
if len(args) <= 0:
tmp = kwargs['data'].reward.shape[0]
else:
tmp = args[0].reward.shape
return tmp
@hpc_wrapper(
shape_fn=shape_fn_td_lambda,
namedtuple_data=True,
include_args=[0, 1, 2],
include_kwargs=['data', 'gamma', 'lambda_']
)
def td_lambda_error(data: namedtuple, gamma: float = 0.9, lambda_: float = 0.8) -> torch.Tensor:
"""
Overview:
Computing TD(lambda) loss given constant gamma and lambda.
There is no special handling for terminal state value,
if some state has reached the terminal, just fill in zeros for values and rewards beyond terminal
(*including the terminal state*, values[terminal] should also be 0)
Arguments:
- data (:obj:`namedtuple`): td_lambda input data with fields ['value', 'reward', 'weight']
- gamma (:obj:`float`): Constant discount factor gamma, should be in [0, 1], defaults to 0.9
- lambda (:obj:`float`): Constant lambda, should be in [0, 1], defaults to 0.8
Returns:
- loss (:obj:`torch.Tensor`): Computed MSE loss, averaged over the batch
Shapes:
- value (:obj:`torch.FloatTensor`): :math:`(T+1, B)`, where T is trajectory length and B is batch,\
which is the estimation of the state value at step 0 to T
- reward (:obj:`torch.FloatTensor`): :math:`(T, B)`, the returns from time step 0 to T-1
- weight (:obj:`torch.FloatTensor` or None): :math:`(B, )`, the training sample weight
- loss (:obj:`torch.FloatTensor`): :math:`()`, 0-dim tensor
"""
value, reward, weight = data
if weight is None:
weight = torch.ones_like(reward)
with torch.no_grad():
return_ = generalized_lambda_returns(value, reward, gamma, lambda_)
# discard the value at T as it should be considered in the next slice
loss = 0.5 * (F.mse_loss(return_, value[:-1], reduction='none') * weight).mean()
return loss
def generalized_lambda_returns(
bootstrap_values: torch.Tensor,
rewards: torch.Tensor,
gammas: float,
lambda_: float,
done: Optional[torch.Tensor] = None
) -> torch.Tensor:
r"""
Overview:
Functional equivalent to trfl.value_ops.generalized_lambda_returns
https://github.com/deepmind/trfl/blob/2c07ac22512a16715cc759f0072be43a5d12ae45/trfl/value_ops.py#L74
Passing in a number instead of tensor to make the value constant for all samples in batch
Arguments:
- bootstrap_values (:obj:`torch.Tensor` or :obj:`float`):
estimation of the value at step 0 to *T*, of size [T_traj+1, batchsize]
- rewards (:obj:`torch.Tensor`): The returns from 0 to T-1, of size [T_traj, batchsize]
- gammas (:obj:`torch.Tensor` or :obj:`float`):
Discount factor for each step (from 0 to T-1), of size [T_traj, batchsize]
- lambda (:obj:`torch.Tensor` or :obj:`float`): Determining the mix of bootstrapping
vs further accumulation of multistep returns at each timestep, of size [T_traj, batchsize]
- done (:obj:`torch.Tensor` or :obj:`float`):
Whether the episode done at current step (from 0 to T-1), of size [T_traj, batchsize]
Returns:
- return (:obj:`torch.Tensor`): Computed lambda return value
for each state from 0 to T-1, of size [T_traj, batchsize]
"""
if not isinstance(gammas, torch.Tensor):
gammas = gammas * torch.ones_like(rewards)
if not isinstance(lambda_, torch.Tensor):
lambda_ = lambda_ * torch.ones_like(rewards)
bootstrap_values_tp1 = bootstrap_values[1:, :]
return multistep_forward_view(bootstrap_values_tp1, rewards, gammas, lambda_, done)
def multistep_forward_view(
bootstrap_values: torch.Tensor,
rewards: torch.Tensor,
gammas: float,
lambda_: float,
done: Optional[torch.Tensor] = None
) -> torch.Tensor:
r"""
Overview:
Same as trfl.sequence_ops.multistep_forward_view
Implementing (12.18) in Sutton & Barto
```
result[T-1] = rewards[T-1] + gammas[T-1] * bootstrap_values[T]
for t in 0...T-2 :
result[t] = rewards[t] + gammas[t]*(lambdas[t]*result[t+1] + (1-lambdas[t])*bootstrap_values[t+1])
```
Assuming the first dim of input tensors correspond to the index in batch
Arguments:
- bootstrap_values (:obj:`torch.Tensor`): Estimation of the value at *step 1 to T*, of size [T_traj, batchsize]
- rewards (:obj:`torch.Tensor`): The returns from 0 to T-1, of size [T_traj, batchsize]
- gammas (:obj:`torch.Tensor`): Discount factor for each step (from 0 to T-1), of size [T_traj, batchsize]
- lambda (:obj:`torch.Tensor`): Determining the mix of bootstrapping vs further accumulation of \
multistep returns at each timestep of size [T_traj, batchsize], the element for T-1 is ignored \
and effectively set to 0, as there is no information about future rewards.
- done (:obj:`torch.Tensor` or :obj:`float`):
Whether the episode done at current step (from 0 to T-1), of size [T_traj, batchsize]
Returns:
- ret (:obj:`torch.Tensor`): Computed lambda return value \
for each state from 0 to T-1, of size [T_traj, batchsize]
"""
result = torch.empty_like(rewards)
if done is None:
done = torch.zeros_like(rewards)
# Forced cutoff at the last one
result[-1, :] = rewards[-1, :] + (1 - done[-1, :]) * gammas[-1, :] * bootstrap_values[-1, :]
discounts = gammas * lambda_
for t in reversed(range(rewards.size()[0] - 1)):
result[t, :] = rewards[t, :] + (1 - done[t, :]) * \
(
discounts[t, :] * result[t + 1, :] +
(gammas[t, :] - discounts[t, :]) * bootstrap_values[t, :]
)
return result | PypiClean |
/nonebot_plugin_docs-2.0.1-py3-none-any.whl/nonebot_plugin_docs/dist/assets/js/cee8f112.8439d3bb.js | "use strict";(self.webpackChunknonebot=self.webpackChunknonebot||[]).push([[2889],{35318:function(e,n,r){r.r(n),r.d(n,{MDXContext:function(){return p},MDXProvider:function(){return s},mdx:function(){return x},useMDXComponents:function(){return u},withMDXComponents:function(){return m}});var t=r(27378);function a(e,n,r){return n in e?Object.defineProperty(e,n,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[n]=r,e}function d(){return d=Object.assign||function(e){for(var n=1;n<arguments.length;n++){var r=arguments[n];for(var t in r)Object.prototype.hasOwnProperty.call(r,t)&&(e[t]=r[t])}return e},d.apply(this,arguments)}function l(e,n){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var t=Object.getOwnPropertySymbols(e);n&&(t=t.filter((function(n){return Object.getOwnPropertyDescriptor(e,n).enumerable}))),r.push.apply(r,t)}return r}function i(e){for(var n=1;n<arguments.length;n++){var r=null!=arguments[n]?arguments[n]:{};n%2?l(Object(r),!0).forEach((function(n){a(e,n,r[n])})):Object.getOwnPropertyDescriptors?Object.defineProperties(e,Object.getOwnPropertyDescriptors(r)):l(Object(r)).forEach((function(n){Object.defineProperty(e,n,Object.getOwnPropertyDescriptor(r,n))}))}return e}function o(e,n){if(null==e)return{};var r,t,a=function(e,n){if(null==e)return{};var r,t,a={},d=Object.keys(e);for(t=0;t<d.length;t++)r=d[t],n.indexOf(r)>=0||(a[r]=e[r]);return a}(e,n);if(Object.getOwnPropertySymbols){var d=Object.getOwnPropertySymbols(e);for(t=0;t<d.length;t++)r=d[t],n.indexOf(r)>=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(a[r]=e[r])}return a}var p=t.createContext({}),m=function(e){return function(n){var r=u(n.components);return t.createElement(e,d({},n,{components:r}))}},u=function(e){var n=t.useContext(p),r=n;return e&&(r="function"==typeof e?e(n):i(i({},n),e)),r},s=function(e){var n=u(e.components);return t.createElement(p.Provider,{value:n},e.children)},c={inlineCode:"code",wrapper:function(e){var n=e.children;return t.createElement(t.Fragment,{},n)}},h=t.forwardRef((function(e,n){var r=e.components,a=e.mdxType,d=e.originalType,l=e.parentName,p=o(e,["components","mdxType","originalType","parentName"]),m=u(r),s=a,h=m["".concat(l,".").concat(s)]||m[s]||c[s]||d;return r?t.createElement(h,i(i({ref:n},p),{},{components:r})):t.createElement(h,i({ref:n},p))}));function x(e,n){var r=arguments,a=n&&n.mdxType;if("string"==typeof e||a){var d=r.length,l=new Array(d);l[0]=h;var i={};for(var o in n)hasOwnProperty.call(n,o)&&(i[o]=n[o]);i.originalType=e,i.mdxType="string"==typeof e?e:a,l[1]=i;for(var p=2;p<d;p++)l[p]=r[p];return t.createElement.apply(null,l)}return t.createElement.apply(null,r)}h.displayName="MDXCreateElement"},94262:function(e,n,r){r.r(n),r.d(n,{frontMatter:function(){return i},contentTitle:function(){return o},metadata:function(){return p},toc:function(){return m},default:function(){return s}});var t=r(25773),a=r(30808),d=(r(27378),r(35318)),l=["components"],i={sidebar_position:0,description:"\u5b9a\u65f6\u6267\u884c\u4efb\u52a1"},o="\u5b9a\u65f6\u4efb\u52a1",p={unversionedId:"best-practice/scheduler",id:"version-2.0.1/best-practice/scheduler",isDocsHomePage:!1,title:"\u5b9a\u65f6\u4efb\u52a1",description:"\u5b9a\u65f6\u6267\u884c\u4efb\u52a1",source:"@site/versioned_docs/version-2.0.1/best-practice/scheduler.md",sourceDirName:"best-practice",slug:"/best-practice/scheduler",permalink:"/website/docs/best-practice/scheduler",editUrl:"https://github.com/nonebot/nonebot2/edit/master/website/versioned_docs/version-2.0.1/best-practice/scheduler.md",tags:[],version:"2.0.1",lastUpdatedBy:"noneflow[bot]",lastUpdatedAt:1690100659,formattedLastUpdatedAt:"2023/7/23",sidebarPosition:0,frontMatter:{sidebar_position:0,description:"\u5b9a\u65f6\u6267\u884c\u4efb\u52a1"},sidebar:"version-2.0.1/tutorial",previous:{title:"\u4e8b\u4ef6\u54cd\u5e94\u5668\u5b58\u50a8",permalink:"/website/docs/advanced/matcher-provider"},next:{title:"\u6570\u636e\u5b58\u50a8",permalink:"/website/docs/best-practice/data-storing"}},m=[{value:"\u5b89\u88c5\u63d2\u4ef6",id:"\u5b89\u88c5\u63d2\u4ef6",children:[],level:2},{value:"\u4f7f\u7528\u63d2\u4ef6",id:"\u4f7f\u7528\u63d2\u4ef6",children:[{value:"\u5bfc\u5165\u8c03\u5ea6\u5668",id:"\u5bfc\u5165\u8c03\u5ea6\u5668",children:[],level:3},{value:"\u6dfb\u52a0\u5b9a\u65f6\u4efb\u52a1",id:"\u6dfb\u52a0\u5b9a\u65f6\u4efb\u52a1",children:[],level:3},{value:"\u914d\u7f6e\u9879",id:"\u914d\u7f6e\u9879",children:[{value:"apscheduler_autostart",id:"apscheduler_autostart",children:[],level:4},{value:"apscheduler_log_level",id:"apscheduler_log_level",children:[],level:4},{value:"apscheduler_config",id:"apscheduler_config",children:[],level:4}],level:3}],level:2}],u={toc:m};function s(e){var n=e.components,r=(0,a.Z)(e,l);return(0,d.mdx)("wrapper",(0,t.Z)({},u,r,{components:n,mdxType:"MDXLayout"}),(0,d.mdx)("h1",{id:"\u5b9a\u65f6\u4efb\u52a1"},"\u5b9a\u65f6\u4efb\u52a1"),(0,d.mdx)("p",null,(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/3.x/"},"APScheduler")," (Advanced Python Scheduler) \u662f\u4e00\u4e2a Python \u7b2c\u4e09\u65b9\u5e93\uff0c\u5176\u5f3a\u5927\u7684\u5b9a\u65f6\u4efb\u52a1\u529f\u80fd\u88ab\u5e7f\u6cdb\u5e94\u7528\u4e8e\u5404\u4e2a\u573a\u666f\u3002\u5728 NoneBot \u4e2d\uff0c\u5b9a\u65f6\u4efb\u52a1\u4f5c\u4e3a\u4e00\u4e2a\u989d\u5916\u529f\u80fd\uff0c\u4f9d\u8d56\u4e8e\u57fa\u4e8e APScheduler \u5f00\u53d1\u7684 ",(0,d.mdx)("a",{parentName:"p",href:"https://github.com/nonebot/plugin-apscheduler"},(0,d.mdx)("inlineCode",{parentName:"a"},"nonebot-plugin-apscheduler"))," \u63d2\u4ef6\u8fdb\u884c\u652f\u6301\u3002"),(0,d.mdx)("h2",{id:"\u5b89\u88c5\u63d2\u4ef6"},"\u5b89\u88c5\u63d2\u4ef6"),(0,d.mdx)("p",null,"\u5728\u4f7f\u7528\u524d\u8bf7\u5148\u5b89\u88c5 ",(0,d.mdx)("inlineCode",{parentName:"p"},"nonebot-plugin-apscheduler")," \u63d2\u4ef6\u81f3\u9879\u76ee\u73af\u5883\u4e2d\uff0c\u53ef\u53c2\u8003",(0,d.mdx)("a",{parentName:"p",href:"/website/docs/tutorial/store#%E5%AE%89%E8%A3%85%E6%8F%92%E4%BB%B6"},"\u83b7\u53d6\u5546\u5e97\u63d2\u4ef6"),"\u6765\u4e86\u89e3\u5e76\u9009\u62e9\u5b89\u88c5\u63d2\u4ef6\u7684\u65b9\u5f0f\u3002\u5982\uff1a"),(0,d.mdx)("p",null,"\u5728",(0,d.mdx)("strong",{parentName:"p"},"\u9879\u76ee\u76ee\u5f55"),"\u4e0b\u6267\u884c\u4ee5\u4e0b\u547d\u4ee4\uff1a"),(0,d.mdx)("pre",null,(0,d.mdx)("code",{parentName:"pre",className:"language-bash"},"nb plugin install nonebot-plugin-apscheduler\n")),(0,d.mdx)("h2",{id:"\u4f7f\u7528\u63d2\u4ef6"},"\u4f7f\u7528\u63d2\u4ef6"),(0,d.mdx)("p",null,(0,d.mdx)("inlineCode",{parentName:"p"},"nonebot-plugin-apscheduler")," \u672c\u8d28\u4e0a\u662f\u5bf9 ",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/3.x/"},"APScheduler")," \u8fdb\u884c\u4e86\u5c01\u88c5\u4ee5\u9002\u7528\u4e8e NoneBot \u5f00\u53d1\uff0c\u56e0\u6b64\u5176\u4f7f\u7528\u65b9\u5f0f\u4e0e APScheduler \u672c\u8eab\u5e76\u65e0\u663e\u8457\u533a\u522b\u3002\u5728\u6b64\u6211\u4eec\u4f1a\u7b80\u8981\u4ecb\u7ecd\u5176\u8c03\u7528\u65b9\u6cd5\uff0c\u66f4\u591a\u7684\u4f7f\u7528\u65b9\u9762\u7684\u529f\u80fd\u8bf7\u53c2\u8003",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/3.x/userguide.html"},"APScheduler \u5b98\u65b9\u6587\u6863"),"\u3002"),(0,d.mdx)("h3",{id:"\u5bfc\u5165\u8c03\u5ea6\u5668"},"\u5bfc\u5165\u8c03\u5ea6\u5668"),(0,d.mdx)("p",null,"\u7531\u4e8e ",(0,d.mdx)("inlineCode",{parentName:"p"},"nonebot_plugin_apscheduler")," \u4f5c\u4e3a\u63d2\u4ef6\uff0c\u56e0\u6b64\u9700\u8981\u5728\u4f7f\u7528\u524d\u5bf9\u5176\u8fdb\u884c",(0,d.mdx)("strong",{parentName:"p"},"\u52a0\u8f7d"),"\u5e76",(0,d.mdx)("strong",{parentName:"p"},"\u5bfc\u5165"),"\u5176\u4e2d\u7684 ",(0,d.mdx)("inlineCode",{parentName:"p"},"scheduler")," \u8c03\u5ea6\u5668\u6765\u521b\u5efa\u5b9a\u65f6\u4efb\u52a1\u3002\u4f7f\u7528 ",(0,d.mdx)("inlineCode",{parentName:"p"},"require")," \u65b9\u6cd5\u53ef\u8f7b\u677e\u5b8c\u6210\u8fd9\u4e00\u8fc7\u7a0b\uff0c\u53ef\u53c2\u8003 ",(0,d.mdx)("a",{parentName:"p",href:"/website/docs/advanced/requiring"},"\u8de8\u63d2\u4ef6\u8bbf\u95ee")," \u4e00\u8282\u8fdb\u884c\u4e86\u89e3\u3002"),(0,d.mdx)("pre",null,(0,d.mdx)("code",{parentName:"pre",className:"language-python"},'from nonebot import require\n\nrequire("nonebot_plugin_apscheduler")\n\nfrom nonebot_plugin_apscheduler import scheduler\n')),(0,d.mdx)("h3",{id:"\u6dfb\u52a0\u5b9a\u65f6\u4efb\u52a1"},"\u6dfb\u52a0\u5b9a\u65f6\u4efb\u52a1"),(0,d.mdx)("p",null,"\u5728 ",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/3.x/userguide.html#adding-jobs"},"APScheduler \u5b98\u65b9\u6587\u6863")," \u4e2d\u63d0\u4f9b\u4e86\u4ee5\u4e0b\u4e24\u79cd\u76f4\u63a5\u6dfb\u52a0\u4efb\u52a1\u7684\u65b9\u5f0f\uff1a"),(0,d.mdx)("pre",null,(0,d.mdx)("code",{parentName:"pre",className:"language-python"},'from nonebot import require\n\nrequire("nonebot_plugin_apscheduler")\n\nfrom nonebot_plugin_apscheduler import scheduler\n\n# \u57fa\u4e8e\u88c5\u9970\u5668\u7684\u65b9\u5f0f\[email protected]_job("cron", hour="*/2", id="job_0", args=[1], kwargs={arg2: 2})\nasync def run_every_2_hour(arg1: int, arg2: int):\n pass\n\n# \u57fa\u4e8e add_job \u65b9\u6cd5\u7684\u65b9\u5f0f\ndef run_every_day(arg1: int, arg2: int):\n pass\n\nscheduler.add_job(\n run_every_day, "interval", days=1, id="job_1", args=[1], kwargs={arg2: 2}\n)\n')),(0,d.mdx)("div",{className:"admonition admonition-warning alert alert--danger"},(0,d.mdx)("div",{parentName:"div",className:"admonition-heading"},(0,d.mdx)("h5",{parentName:"div"},(0,d.mdx)("span",{parentName:"h5",className:"admonition-icon"},(0,d.mdx)("svg",{parentName:"span",xmlns:"http://www.w3.org/2000/svg",width:"12",height:"16",viewBox:"0 0 12 16"},(0,d.mdx)("path",{parentName:"svg",fillRule:"evenodd",d:"M5.05.31c.81 2.17.41 3.38-.52 4.31C3.55 5.67 1.98 6.45.9 7.98c-1.45 2.05-1.7 6.53 3.53 7.7-2.2-1.16-2.67-4.52-.3-6.61-.61 2.03.53 3.33 1.94 2.86 1.39-.47 2.3.53 2.27 1.67-.02.78-.31 1.44-1.13 1.81 3.42-.59 4.78-3.42 4.78-5.56 0-2.84-2.53-3.22-1.25-5.61-1.52.13-2.03 1.13-1.89 2.75.09 1.08-1.02 1.8-1.86 1.33-.67-.41-.66-1.19-.06-1.78C8.18 5.31 8.68 2.45 5.05.32L5.03.3l.02.01z"}))),"\u6ce8\u610f")),(0,d.mdx)("div",{parentName:"div",className:"admonition-content"},(0,d.mdx)("p",{parentName:"div"},"\u7531\u4e8e APScheduler \u7684\u5b9a\u65f6\u4efb\u52a1\u5e76\u4e0d\u662f",(0,d.mdx)("strong",{parentName:"p"},"\u7531\u4e8b\u4ef6\u54cd\u5e94\u5668\u6240\u89e6\u53d1\u7684\u4e8b\u4ef6"),"\uff0c\u56e0\u6b64\u5176\u4efb\u52a1\u51fd\u6570\u65e0\u6cd5\u540c",(0,d.mdx)("a",{parentName:"p",href:"/website/docs/tutorial/handler#%E4%BA%8B%E4%BB%B6%E5%A4%84%E7%90%86%E5%87%BD%E6%95%B0"},"\u4e8b\u4ef6\u5904\u7406\u51fd\u6570"),"\u4e00\u6837\u901a\u8fc7",(0,d.mdx)("a",{parentName:"p",href:"/website/docs/tutorial/event-data#%E8%AE%A4%E8%AF%86%E4%BE%9D%E8%B5%96%E6%B3%A8%E5%85%A5"},"\u4f9d\u8d56\u6ce8\u5165"),"\u83b7\u53d6\u4e0a\u4e0b\u6587\u4fe1\u606f\uff0c\u4e5f\u65e0\u6cd5\u901a\u8fc7\u4e8b\u4ef6\u54cd\u5e94\u5668\u5bf9\u8c61\u7684\u65b9\u6cd5\u8fdb\u884c\u4efb\u4f55\u64cd\u4f5c\uff0c\u56e0\u6b64\u6211\u4eec\u9700\u8981\u4f7f\u7528",(0,d.mdx)("a",{parentName:"p",href:"/website/docs/appendices/api-calling#%E8%B0%83%E7%94%A8%E5%B9%B3%E5%8F%B0-api"},"\u8c03\u7528\u5e73\u53f0 API"),"\u7684\u65b9\u5f0f\u6765\u83b7\u53d6\u4fe1\u606f\u6216\u6536\u53d1\u6d88\u606f\u3002"),(0,d.mdx)("p",{parentName:"div"},"\u76f8\u5bf9\u4e8e\u4e8b\u4ef6\u5904\u7406\u4f9d\u8d56\u800c\u8a00\uff0c\u7f16\u5199\u5b9a\u65f6\u4efb\u52a1\u66f4\u50cf\u662f\u7f16\u5199\u666e\u901a\u7684\u51fd\u6570\uff0c\u9700\u8981\u6211\u4eec\u81ea\u884c\u83b7\u53d6\u4fe1\u606f\u4ee5\u53ca\u53d1\u9001\u4fe1\u606f\uff0c\u8bf7",(0,d.mdx)("strong",{parentName:"p"},"\u4e0d\u8981"),"\u5c06\u4e8b\u4ef6\u5904\u7406\u4f9d\u8d56\u7684\u7279\u6b8a\u8bed\u6cd5\u7528\u4e8e\u5b9a\u65f6\u4efb\u52a1\uff01"))),(0,d.mdx)("p",null,"\u5173\u4e8e APScheduler \u7684\u66f4\u591a\u4f7f\u7528\u65b9\u6cd5\uff0c\u53ef\u4ee5\u53c2\u8003 ",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/3.x/index.html"},"APScheduler \u5b98\u65b9\u6587\u6863")," \u8fdb\u884c\u4e86\u89e3\u3002"),(0,d.mdx)("h3",{id:"\u914d\u7f6e\u9879"},"\u914d\u7f6e\u9879"),(0,d.mdx)("h4",{id:"apscheduler_autostart"},"apscheduler_autostart"),(0,d.mdx)("ul",null,(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u7c7b\u578b"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},"bool")),(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u9ed8\u8ba4\u503c"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},"True"))),(0,d.mdx)("p",null,"\u662f\u5426\u81ea\u52a8\u542f\u52a8 ",(0,d.mdx)("inlineCode",{parentName:"p"},"scheduler")," \uff0c\u82e5\u4e0d\u542f\u52a8\u9700\u8981\u81ea\u884c\u8c03\u7528 ",(0,d.mdx)("inlineCode",{parentName:"p"},"scheduler.start()"),"\u3002"),(0,d.mdx)("h4",{id:"apscheduler_log_level"},"apscheduler_log_level"),(0,d.mdx)("ul",null,(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u7c7b\u578b"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},"int")),(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u9ed8\u8ba4\u503c"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},"30"))),(0,d.mdx)("p",null,"apscheduler \u8f93\u51fa\u7684\u65e5\u5fd7\u7b49\u7ea7"),(0,d.mdx)("ul",null,(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("inlineCode",{parentName:"li"},"WARNING")," = ",(0,d.mdx)("inlineCode",{parentName:"li"},"30")," (\u9ed8\u8ba4)"),(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("inlineCode",{parentName:"li"},"INFO")," = ",(0,d.mdx)("inlineCode",{parentName:"li"},"20")),(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("inlineCode",{parentName:"li"},"DEBUG")," = ",(0,d.mdx)("inlineCode",{parentName:"li"},"10")," (\u53ea\u6709\u5728\u5f00\u542f nonebot \u7684 debug \u6a21\u5f0f\u624d\u4f1a\u663e\u793a debug \u65e5\u5fd7)")),(0,d.mdx)("h4",{id:"apscheduler_config"},"apscheduler_config"),(0,d.mdx)("ul",null,(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u7c7b\u578b"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},"dict")),(0,d.mdx)("li",{parentName:"ul"},(0,d.mdx)("strong",{parentName:"li"},"\u9ed8\u8ba4\u503c"),": ",(0,d.mdx)("inlineCode",{parentName:"li"},'{ "apscheduler.timezone": "Asia/Shanghai" }'))),(0,d.mdx)("p",null,(0,d.mdx)("inlineCode",{parentName:"p"},"apscheduler")," \u7684\u76f8\u5173\u914d\u7f6e\u3002\u53c2\u8003",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/latest/userguide.html#scheduler-config"},"\u914d\u7f6e\u8c03\u5ea6\u5668"),", ",(0,d.mdx)("a",{parentName:"p",href:"https://apscheduler.readthedocs.io/en/latest/modules/schedulers/base.html#apscheduler.schedulers.base.BaseScheduler"},"\u914d\u7f6e\u53c2\u6570")),(0,d.mdx)("p",null,"\u914d\u7f6e\u9700\u8981\u5305\u542b ",(0,d.mdx)("inlineCode",{parentName:"p"},"apscheduler.")," \u4f5c\u4e3a\u524d\u7f00\uff0c\u4f8b\u5982 ",(0,d.mdx)("inlineCode",{parentName:"p"},"apscheduler.timezone"),"\u3002"))}s.isMDXComponent=!0}}]); | PypiClean |
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/prime_remastered/objects/PointOfInterestMP1.py | import dataclasses
import struct
import typing
from retro_data_structures.game_check import Game
from retro_data_structures.properties.base_property import BaseProperty
import base64
@dataclasses.dataclass()
class PointOfInterestMP1(BaseProperty):
unknown_properties: dict[int, bytes] = dataclasses.field(default_factory=dict)
@classmethod
def game(cls) -> Game:
return Game.PRIME_REMASTER
@classmethod
def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None):
property_count = struct.unpack("<H", data.read(2))[0]
if default_override is None and (result := _fast_decode(data, property_count)) is not None:
return result
present_fields = default_override or {}
present_fields["unknown_properties"] = {}
for _ in range(property_count):
property_id, property_size = struct.unpack("<LH", data.read(6))
start = data.tell()
try:
property_name, decoder = _property_decoder[property_id]
present_fields[property_name] = decoder(data, property_size)
except KeyError:
present_fields["unknown_properties"][property_id] = data.read(property_size)
assert data.tell() - start == property_size
return cls(**present_fields)
def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None):
default_override = default_override or {}
data.write(struct.pack("<H", 0 + len(self.unknown_properties)))
for property_id, property_data in self.unknown_properties.items():
data.write(struct.pack("<LH", property_id, len(property_data)))
data.write(property_data)
@classmethod
def from_json(cls, data: dict):
return cls(
unknown_properties={
int(property_id, 16): base64.b64decode(property_data)
for property_id, property_data in data["unknown_properties"].items()
},
)
def to_json(self) -> dict:
return {
'unknown_properties': {
hex(property_id): base64.b64encode(property_data)
for property_id, property_data in self.unknown_properties.items()
}
}
_FAST_FORMAT = None
_FAST_IDS = ()
def _fast_decode(data: typing.BinaryIO, property_count: int) -> typing.Optional[PointOfInterestMP1]:
if property_count != 0:
return None
global _FAST_FORMAT
if _FAST_FORMAT is None:
_FAST_FORMAT = struct.Struct('<')
before = data.tell()
dec = _FAST_FORMAT.unpack(data.read(0))
if () != _FAST_IDS:
data.seek(before)
return None
return PointOfInterestMP1(
)
_property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = {
} | PypiClean |
/cdk_cloudformation_datadog_monitors_downtime-3.1.0a7-py3-none-any.whl/cdk_cloudformation_datadog_monitors_downtime/__init__.py | import abc
import builtins
import datetime
import enum
import typing
import jsii
import publication
import typing_extensions
from typeguard import check_type
from ._jsii import *
import aws_cdk as _aws_cdk_ceddda9d
import constructs as _constructs_77d1e7e8
class CfnDowntime(
_aws_cdk_ceddda9d.CfnResource,
metaclass=jsii.JSIIMeta,
jsii_type="@cdk-cloudformation/datadog-monitors-downtime.CfnDowntime",
):
'''A CloudFormation ``Datadog::Monitors::Downtime``.
:cloudformationResource: Datadog::Monitors::Downtime
:link: http://unknown-url
'''
def __init__(
self,
scope_: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
scope: typing.Sequence[builtins.str],
disabled: typing.Optional[builtins.bool] = None,
end: typing.Optional[jsii.Number] = None,
message: typing.Optional[builtins.str] = None,
monitor_id: typing.Optional[jsii.Number] = None,
monitor_tags: typing.Optional[typing.Sequence[builtins.str]] = None,
start: typing.Optional[jsii.Number] = None,
timezone: typing.Optional[builtins.str] = None,
) -> None:
'''Create a new ``Datadog::Monitors::Downtime``.
:param scope_: - scope in which this resource is defined.
:param id: - scoped id of the resource.
:param scope: The scope(s) to which the downtime applies.
:param disabled: Whether or not this downtime is disabled.
:param end: POSIX timestamp to end the downtime. If not provided, the downtime is in effect indefinitely (i.e. until you cancel it).
:param message: Message on the downtime.
:param monitor_id: A single monitor to which the downtime applies. If not provided, the downtime applies to all monitors.
:param monitor_tags: A comma-separated list of monitor tags, to which the downtime applies. The resulting downtime applies to monitors that match ALL provided monitor tags.
:param start: POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
:param timezone: The timezone for the downtime.
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__401b47c4e5e781ce81560cef6085e141285dc5ae53879fedb7a3d3e375b987ad)
check_type(argname="argument scope_", value=scope_, expected_type=type_hints["scope_"])
check_type(argname="argument id", value=id, expected_type=type_hints["id"])
props = CfnDowntimeProps(
scope=scope,
disabled=disabled,
end=end,
message=message,
monitor_id=monitor_id,
monitor_tags=monitor_tags,
start=start,
timezone=timezone,
)
jsii.create(self.__class__, self, [scope_, id, props])
@jsii.python.classproperty
@jsii.member(jsii_name="CFN_RESOURCE_TYPE_NAME")
def CFN_RESOURCE_TYPE_NAME(cls) -> builtins.str:
'''The CloudFormation resource type name for this resource class.'''
return typing.cast(builtins.str, jsii.sget(cls, "CFN_RESOURCE_TYPE_NAME"))
@builtins.property
@jsii.member(jsii_name="attrActive")
def attr_active(self) -> _aws_cdk_ceddda9d.IResolvable:
'''Attribute ``Datadog::Monitors::Downtime.Active``.
:link: http://unknown-url
'''
return typing.cast(_aws_cdk_ceddda9d.IResolvable, jsii.get(self, "attrActive"))
@builtins.property
@jsii.member(jsii_name="attrCanceled")
def attr_canceled(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.Canceled``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrCanceled"))
@builtins.property
@jsii.member(jsii_name="attrCreatorId")
def attr_creator_id(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.CreatorId``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrCreatorId"))
@builtins.property
@jsii.member(jsii_name="attrDowntimeType")
def attr_downtime_type(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.DowntimeType``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrDowntimeType"))
@builtins.property
@jsii.member(jsii_name="attrId")
def attr_id(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.Id``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrId"))
@builtins.property
@jsii.member(jsii_name="attrParentId")
def attr_parent_id(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.ParentId``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrParentId"))
@builtins.property
@jsii.member(jsii_name="attrUpdaterId")
def attr_updater_id(self) -> jsii.Number:
'''Attribute ``Datadog::Monitors::Downtime.UpdaterId``.
:link: http://unknown-url
'''
return typing.cast(jsii.Number, jsii.get(self, "attrUpdaterId"))
@builtins.property
@jsii.member(jsii_name="props")
def props(self) -> "CfnDowntimeProps":
'''Resource props.'''
return typing.cast("CfnDowntimeProps", jsii.get(self, "props"))
@jsii.data_type(
jsii_type="@cdk-cloudformation/datadog-monitors-downtime.CfnDowntimeProps",
jsii_struct_bases=[],
name_mapping={
"scope": "scope",
"disabled": "disabled",
"end": "end",
"message": "message",
"monitor_id": "monitorId",
"monitor_tags": "monitorTags",
"start": "start",
"timezone": "timezone",
},
)
class CfnDowntimeProps:
def __init__(
self,
*,
scope: typing.Sequence[builtins.str],
disabled: typing.Optional[builtins.bool] = None,
end: typing.Optional[jsii.Number] = None,
message: typing.Optional[builtins.str] = None,
monitor_id: typing.Optional[jsii.Number] = None,
monitor_tags: typing.Optional[typing.Sequence[builtins.str]] = None,
start: typing.Optional[jsii.Number] = None,
timezone: typing.Optional[builtins.str] = None,
) -> None:
'''Datadog Monitors Downtime 3.1.0.
:param scope: The scope(s) to which the downtime applies.
:param disabled: Whether or not this downtime is disabled.
:param end: POSIX timestamp to end the downtime. If not provided, the downtime is in effect indefinitely (i.e. until you cancel it).
:param message: Message on the downtime.
:param monitor_id: A single monitor to which the downtime applies. If not provided, the downtime applies to all monitors.
:param monitor_tags: A comma-separated list of monitor tags, to which the downtime applies. The resulting downtime applies to monitors that match ALL provided monitor tags.
:param start: POSIX timestamp to start the downtime. If not provided, the downtime starts the moment it is created.
:param timezone: The timezone for the downtime.
:schema: CfnDowntimeProps
'''
if __debug__:
type_hints = typing.get_type_hints(_typecheckingstub__dc9758a4ea882bbedf1786d023260c8d3059e66155da2abe67683f21106408ca)
check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"])
check_type(argname="argument disabled", value=disabled, expected_type=type_hints["disabled"])
check_type(argname="argument end", value=end, expected_type=type_hints["end"])
check_type(argname="argument message", value=message, expected_type=type_hints["message"])
check_type(argname="argument monitor_id", value=monitor_id, expected_type=type_hints["monitor_id"])
check_type(argname="argument monitor_tags", value=monitor_tags, expected_type=type_hints["monitor_tags"])
check_type(argname="argument start", value=start, expected_type=type_hints["start"])
check_type(argname="argument timezone", value=timezone, expected_type=type_hints["timezone"])
self._values: typing.Dict[builtins.str, typing.Any] = {
"scope": scope,
}
if disabled is not None:
self._values["disabled"] = disabled
if end is not None:
self._values["end"] = end
if message is not None:
self._values["message"] = message
if monitor_id is not None:
self._values["monitor_id"] = monitor_id
if monitor_tags is not None:
self._values["monitor_tags"] = monitor_tags
if start is not None:
self._values["start"] = start
if timezone is not None:
self._values["timezone"] = timezone
@builtins.property
def scope(self) -> typing.List[builtins.str]:
'''The scope(s) to which the downtime applies.
:schema: CfnDowntimeProps#Scope
'''
result = self._values.get("scope")
assert result is not None, "Required property 'scope' is missing"
return typing.cast(typing.List[builtins.str], result)
@builtins.property
def disabled(self) -> typing.Optional[builtins.bool]:
'''Whether or not this downtime is disabled.
:schema: CfnDowntimeProps#Disabled
'''
result = self._values.get("disabled")
return typing.cast(typing.Optional[builtins.bool], result)
@builtins.property
def end(self) -> typing.Optional[jsii.Number]:
'''POSIX timestamp to end the downtime.
If not provided, the downtime is in effect indefinitely (i.e. until you cancel it).
:schema: CfnDowntimeProps#End
'''
result = self._values.get("end")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def message(self) -> typing.Optional[builtins.str]:
'''Message on the downtime.
:schema: CfnDowntimeProps#Message
'''
result = self._values.get("message")
return typing.cast(typing.Optional[builtins.str], result)
@builtins.property
def monitor_id(self) -> typing.Optional[jsii.Number]:
'''A single monitor to which the downtime applies.
If not provided, the downtime applies to all monitors.
:schema: CfnDowntimeProps#MonitorId
'''
result = self._values.get("monitor_id")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def monitor_tags(self) -> typing.Optional[typing.List[builtins.str]]:
'''A comma-separated list of monitor tags, to which the downtime applies.
The resulting downtime applies to monitors that match ALL provided monitor tags.
:schema: CfnDowntimeProps#MonitorTags
'''
result = self._values.get("monitor_tags")
return typing.cast(typing.Optional[typing.List[builtins.str]], result)
@builtins.property
def start(self) -> typing.Optional[jsii.Number]:
'''POSIX timestamp to start the downtime.
If not provided, the downtime starts the moment it is created.
:schema: CfnDowntimeProps#Start
'''
result = self._values.get("start")
return typing.cast(typing.Optional[jsii.Number], result)
@builtins.property
def timezone(self) -> typing.Optional[builtins.str]:
'''The timezone for the downtime.
:schema: CfnDowntimeProps#Timezone
'''
result = self._values.get("timezone")
return typing.cast(typing.Optional[builtins.str], result)
def __eq__(self, rhs: typing.Any) -> builtins.bool:
return isinstance(rhs, self.__class__) and rhs._values == self._values
def __ne__(self, rhs: typing.Any) -> builtins.bool:
return not (rhs == self)
def __repr__(self) -> str:
return "CfnDowntimeProps(%s)" % ", ".join(
k + "=" + repr(v) for k, v in self._values.items()
)
__all__ = [
"CfnDowntime",
"CfnDowntimeProps",
]
publication.publish()
def _typecheckingstub__401b47c4e5e781ce81560cef6085e141285dc5ae53879fedb7a3d3e375b987ad(
scope_: _constructs_77d1e7e8.Construct,
id: builtins.str,
*,
scope: typing.Sequence[builtins.str],
disabled: typing.Optional[builtins.bool] = None,
end: typing.Optional[jsii.Number] = None,
message: typing.Optional[builtins.str] = None,
monitor_id: typing.Optional[jsii.Number] = None,
monitor_tags: typing.Optional[typing.Sequence[builtins.str]] = None,
start: typing.Optional[jsii.Number] = None,
timezone: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass
def _typecheckingstub__dc9758a4ea882bbedf1786d023260c8d3059e66155da2abe67683f21106408ca(
*,
scope: typing.Sequence[builtins.str],
disabled: typing.Optional[builtins.bool] = None,
end: typing.Optional[jsii.Number] = None,
message: typing.Optional[builtins.str] = None,
monitor_id: typing.Optional[jsii.Number] = None,
monitor_tags: typing.Optional[typing.Sequence[builtins.str]] = None,
start: typing.Optional[jsii.Number] = None,
timezone: typing.Optional[builtins.str] = None,
) -> None:
"""Type checking stubs"""
pass | PypiClean |
/distributions_lex-0.1.tar.gz/distributions_lex-0.1/distributions_lex/Binomialdistribution.py | import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Binomial(Distribution):
""" Binomial distribution class for calculating and
visualizing a Binomial distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats to be extracted from the data file
p (float) representing the probability of an event occurring
n (int) number of trials
TODO: Fill out all functions below
"""
def __init__(self, prob=.5, size=20):
self.n = size
self.p = prob
Distribution.__init__(self, self.calculate_mean(), self.calculate_stdev())
def calculate_mean(self):
"""Function to calculate the mean from p and n
Args:
None
Returns:
float: mean of the data set
"""
self.mean = self.p * self.n
return self.mean
def calculate_stdev(self):
"""Function to calculate the standard deviation from p and n.
Args:
None
Returns:
float: standard deviation of the data set
"""
self.stdev = math.sqrt(self.n * self.p * (1 - self.p))
return self.stdev
def replace_stats_with_data(self):
"""Function to calculate p and n from the data set
Args:
None
Returns:
float: the p value
float: the n value
"""
self.n = len(self.data)
self.p = 1.0 * sum(self.data) / len(self.data)
self.mean = self.calculate_mean()
self.stdev = self.calculate_stdev()
def plot_bar(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.bar(x = ['0', '1'], height = [(1 - self.p) * self.n, self.p * self.n])
plt.title('Bar Chart of Data')
plt.xlabel('outcome')
plt.ylabel('count')
def pdf(self, k):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
a = math.factorial(self.n) / (math.factorial(k) * (math.factorial(self.n - k)))
b = (self.p ** k) * (1 - self.p) ** (self.n - k)
return a * b
def plot_bar_pdf(self):
"""Function to plot the pdf of the binomial distribution
Args:
None
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
x = []
y = []
# calculate the x values to visualize
for i in range(self.n + 1):
x.append(i)
y.append(self.pdf(i))
# make the plots
plt.bar(x, y)
plt.title('Distribution of Outcomes')
plt.ylabel('Probability')
plt.xlabel('Outcome')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Binomial distributions with equal p
Args:
other (Binomial): Binomial instance
Returns:
Binomial: Binomial distribution
"""
try:
assert self.p == other.p, 'p values are not equal'
except AssertionError as error:
raise
result = Binomial()
result.n = self.n + other.n
result.p = self.p
result.calculate_mean()
result.calculate_stdev()
return result
def __repr__(self):
"""Function to output the characteristics of the Binomial instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}, p {}, n {}".\
format(self.mean, self.stdev, self.p, self.n) | PypiClean |
/pyocd_hx-0.0.2.tar.gz/pyocd_hx-0.0.2/pyocd/core/options_manager.py |
import logging
from functools import partial
from typing import (Any, Callable, Dict, List, Mapping, NamedTuple, Optional)
from .options import OPTIONS_INFO
from ..utility.notification import Notifier
LOG = logging.getLogger(__name__)
class OptionChangeInfo(NamedTuple):
"""@brief Data for an option value change notification.
Instances of this class are used for the data attribute of the @ref
pyocd.utility.notification.Notification "Notification" sent to subscribers when an option's value
is changed.
An instance of this class has two attributes:
- `new_value`: The new, current value of the option.
- `old_value`: The previous value of the option.
"""
new_value: Any
old_value: Any
class OptionsManager(Notifier):
"""@brief Handles session option management for a session.
The options manager supports multiple layers of option priority. When an option's value is
accessed, the highest priority layer that contains a value for the option is used. This design
makes it easy to load options from multiple sources. The default value specified for an option
in the OPTIONS_INFO dictionary provides a layer with an infinitely low priority.
Users can subscribe to notifications for changes to option values by calling the subscribe()
method. The notification events are the option names themselves. The source for notifications is
always the options manager instance. The notification data is an instance of OptionChangeInfo
with `new_value` and `old_value` attributes. If the option was not previously set, then the
old value is the option's default.
"""
LayerType = Mapping[str, Any]
_layers: List[Dict[str, Any]]
def __init__(self) -> None:
"""@brief Option manager constructor.
"""
super().__init__()
self._layers = []
def _update_layers(self, new_options: Optional[LayerType], update_operation: Callable[[LayerType], None]) -> None:
"""@brief Internal method to add a new layer dictionary.
@param self
@param new_options Dictionary of option values.
@param update_operation Callable to add the layer. Must accept a single parameter, which is
the filtered _new_options_ dictionary.
"""
if new_options is None:
return
filtered_options = self._convert_options(new_options)
previous_values = {name: self.get(name) for name in filtered_options.keys()}
update_operation(filtered_options)
new_values = {name: self.get(name) for name in filtered_options.keys()}
self._notify_changes(previous_values, new_values)
def add_front(self, new_options: Optional[LayerType]) -> None:
"""@brief Add a new highest priority layer of option values.
@param self
@param new_options Dictionary of option values.
"""
self._update_layers(new_options, partial(self._layers.insert, 0))
def add_back(self, new_options: Optional[LayerType]) -> None:
"""@brief Add a new lowest priority layer of option values.
@param self
@param new_options Dictionary of option values.
"""
self._update_layers(new_options, self._layers.append)
def _convert_options(self, new_options: LayerType) -> LayerType:
"""@brief Prepare a dictionary of session options for use by the manager.
1. Strip dictionary entries with a value of None.
2. Replace double-underscores ("__") with a dot (".").
3. Convert option names to all-lowercase.
"""
output = {}
for name, value in new_options.items():
if value is None:
continue
else:
name = name.replace("__", ".").lower()
output[name] = value
return output
def is_set(self, key: str) -> bool:
"""@brief Return whether a value is set for the specified option.
This method returns True as long as any layer has a value set for the option, even if the
value is the same as the default value. If the option is not set in any layer, then False is
returned regardless of whether the default value is None.
"""
for layer in self._layers:
if key in layer:
return True
return False
def get_default(self, key: str) -> Any:
"""@brief Return the default value for the specified option."""
if key in OPTIONS_INFO:
return OPTIONS_INFO[key].default
else:
return None
def get(self, key: str) -> Any:
"""@brief Return the highest priority value for the option, or its default."""
for layer in self._layers:
if key in layer:
return layer[key]
return self.get_default(key)
def set(self, key: str, value: Any) -> None:
"""@brief Set an option in the current highest priority layer."""
self.update({key: value})
def update(self, new_options: LayerType) -> None:
"""@brief Set multiple options in the current highest priority layer."""
filtered_options = self._convert_options(new_options)
previous_values = {name: self.get(name) for name in filtered_options.keys()}
self._layers[0].update(filtered_options)
self._notify_changes(previous_values, filtered_options)
def _notify_changes(self, previous: LayerType, options: LayerType) -> None:
"""@brief Send notifications that the specified options have changed."""
for name, new_value in options.items():
previous_value = previous[name]
if new_value != previous_value:
self.notify(name, data=OptionChangeInfo(new_value, previous_value))
def __contains__(self, key: str) -> bool:
"""@brief Returns whether the named option has a non-default value."""
return self.is_set(key)
def __getitem__(self, key: str) -> Any:
"""@brief Return the highest priority value for the option, or its default."""
return self.get(key)
def __setitem__(self, key: str, value: Any) -> None:
"""@brief Set an option in the current highest priority layer."""
self.set(key, value) | PypiClean |
/address-net-1.0.tar.gz/address-net-1.0/addressnet/dataset.py | from typing import Optional, Union, Callable, List
from collections import OrderedDict
import random
import tensorflow as tf
import numpy as np
import string
import addressnet.lookups as lookups
from addressnet.typo import generate_typo
# Schema used to decode data from the TFRecord file
_features = OrderedDict([
('building_name', tf.FixedLenFeature([], tf.string)),
('lot_number_prefix', tf.FixedLenFeature([], tf.string)),
('lot_number', tf.FixedLenFeature([], tf.string)),
('lot_number_suffix', tf.FixedLenFeature([], tf.string)),
('flat_number_prefix', tf.FixedLenFeature([], tf.string)),
('flat_number_suffix', tf.FixedLenFeature([], tf.string)),
('level_number_prefix', tf.FixedLenFeature([], tf.string)),
('level_number_suffix', tf.FixedLenFeature([], tf.string)),
('number_first_prefix', tf.FixedLenFeature([], tf.string)),
('number_first_suffix', tf.FixedLenFeature([], tf.string)),
('number_last_prefix', tf.FixedLenFeature([], tf.string)),
('number_last_suffix', tf.FixedLenFeature([], tf.string)),
('street_name', tf.FixedLenFeature([], tf.string)),
('locality_name', tf.FixedLenFeature([], tf.string)),
('postcode', tf.FixedLenFeature([], tf.string)),
('flat_number', tf.FixedLenFeature([], tf.int64)),
('level_number', tf.FixedLenFeature([], tf.int64)),
('number_first', tf.FixedLenFeature([], tf.int64)),
('number_last', tf.FixedLenFeature([], tf.int64)),
('flat_type', tf.FixedLenFeature([], tf.int64)),
('level_type', tf.FixedLenFeature([], tf.int64)),
('street_type_code', tf.FixedLenFeature([], tf.int64)),
('street_suffix_code', tf.FixedLenFeature([], tf.int64)),
('state_abbreviation', tf.FixedLenFeature([], tf.int64)),
('latitude', tf.FixedLenFeature([], tf.float32)),
('longitude', tf.FixedLenFeature([], tf.float32))
])
# List of fields used as labels in the training data
labels_list = [
'building_name', # 1
'level_number_prefix', # 2
'level_number', # 3
'level_number_suffix', # 4
'level_type', # 5
'flat_number_prefix', # 6
'flat_number', # 7
'flat_number_suffix', # 8
'flat_type', # 9
'number_first_prefix', # 10
'number_first', # 11
'number_first_suffix', # 12
'number_last_prefix', # 13
'number_last', # 14
'number_last_suffix', # 15
'street_name', # 16
'street_suffix_code', # 17
'street_type_code', # 18
'locality_name', # 19
'state_abbreviation', # 20
'postcode' # 21
]
# Number of labels in total (+1 for the blank category)
n_labels = len(labels_list) + 1
# Allowable characters for the encoded representation
vocab = list(string.digits + string.ascii_lowercase + string.punctuation + string.whitespace)
def vocab_lookup(characters: str) -> (int, np.ndarray):
"""
Converts a string into a list of vocab indices
:param characters: the string to convert
:param training: if True, artificial typos will be introduced
:return: the string length and an array of vocab indices
"""
result = list()
for c in characters.lower():
try:
result.append(vocab.index(c) + 1)
except ValueError:
result.append(0)
return len(characters), np.array(result, dtype=np.int64)
def decode_data(record: List[Union[str, int, float]]) -> Union[str, int, float]:
"""
Decodes a record from the tfrecord file by converting all strings to UTF-8 encoding, and any numeric field with
a value of -1 to None.
:param record: the record to decode
:return: an iterator for yielding the decoded fields
"""
for item in record:
try:
# Attempt to treat the item in the record as a string
yield item.decode("UTF-8")
except AttributeError:
# Treat the item as a number and encode -1 as None (see generate_tf_records.py)
yield item if item != -1 else None
def labels(text: Union[str, int], field_name: Optional[str], mutate: bool = True) -> (str, np.ndarray):
"""
Generates a numpy matrix labelling each character by field type. Strings have artificial typos introduced if
mutate == True
:param text: the text to label
:param field_name: the name of the field to which the text belongs, or None if the label is blank
:param mutate: introduce artificial typos
:return: the original text and the numpy matrix of labels
"""
# Ensure the input is a string, encoding None to an empty to string
if text is None:
text = ''
else:
# Introduce artificial typos if mutate == True
text = generate_typo(str(text)) if mutate else str(text)
labels_matrix = np.zeros((len(text), n_labels), dtype=np.bool)
# If no field is supplied, then encode the label using the blank category
if field_name is None:
labels_matrix[:, 0] = True
else:
labels_matrix[:, labels_list.index(field_name) + 1] = True
return text, labels_matrix
def random_separator(min_length: int = 1, max_length: int = 3, possible_sep_chars: Optional[str] = r",./\ ") -> str:
"""
Generates a space-padded separator of random length using a random character from possible_sep_chars
:param min_length: minimum length of the separator
:param max_length: maximum length of the separator
:param possible_sep_chars: string of possible characters to use for the separator
:return: the separator string
"""
chars = [" "] * random.randint(min_length, max_length)
if len(chars) > 0 and possible_sep_chars:
sep_char = random.choice(possible_sep_chars)
chars[random.randrange(len(chars))] = sep_char
return ''.join(chars)
def join_labels(lbls: [np.ndarray], sep: Union[str, Callable[..., str]] = " ") -> np.ndarray:
"""
Concatenates a series of label matrices with a separator
:param lbls: a list of numpy matrices
:param sep: the separator string or function that returns the sep string
:return: the concatenated labels
"""
if len(lbls) < 2:
return lbls
joined_labels = None
sep_str = None
# if `sep` is not a function, set the separator (`sep_str`) to `sep`, otherwise leave as None
if not callable(sep):
sep_str = sep
for l in lbls:
if joined_labels is None:
joined_labels = l
else:
# If `sep` is a function, call it on each iteration
if callable(sep):
sep_str = sep()
# Skip zero-length labels
if l.shape[0] == 0:
continue
elif sep_str is not None and len(sep_str) > 0 and joined_labels.shape[0] > 0:
# Join using sep_str if it's present and non-zero in length
joined_labels = np.concatenate([joined_labels, labels(sep_str, None, mutate=False)[1], l], axis=0)
else:
# Otherwise, directly concatenate the labels
joined_labels = np.concatenate([joined_labels, l], axis=0)
assert joined_labels is not None, "No labels were joined!"
assert joined_labels.shape[1] == n_labels, "The number of labels generated was unexpected: got %i but wanted %i" % (
joined_labels.shape[1], n_labels)
return joined_labels
def join_str_and_labels(parts: [(str, np.ndarray)], sep: Union[str, Callable[..., str]] = " ") -> (str, np.ndarray):
"""
Joins the strings and labels using the given separator
:param parts: a list of string/label tuples
:param sep: a string or function that returns the string to be used as a separator
:return: the joined string and labels
"""
# Keep only the parts with strings of length > 0
parts = [p for p in parts if len(p[0]) > 0]
# If there are no parts at all, return an empty string an array of shape (0, n_labels)
if len(parts) == 0:
return '', np.zeros((0, n_labels))
# If there's only one part, just give it back as-is
elif len(parts) == 1:
return parts[0]
# Pre-generate the separators - this is important if `sep` is a function returning non-deterministic results
n_sep = len(parts) - 1
if callable(sep):
seps = [sep() for _ in range(n_sep)]
else:
seps = [sep] * n_sep
seps += ['']
# Join the strings using the list of separators
strings = ''.join(sum([(s[0][0], s[1]) for s in zip(parts, seps)], ()))
# Join the labels using an iterator function
sep_iter = iter(seps)
lbls = join_labels([s[1] for s in parts], sep=lambda: next(sep_iter))
assert len(strings) == lbls.shape[0], "string length %i (%s), label length %i using sep %s" % (
len(strings), strings, lbls.shape[0], seps)
return strings, lbls
def choose(option1: Callable = lambda: None, option2: Callable = lambda: None):
"""
Randomly run either option 1 or option 2
:param option1: a possible function to run
:param option2: another possible function to run
:return: the result of the function
"""
if random.getrandbits(1):
return option1()
else:
return option2()
def synthesise_address(*record) -> (int, np.ndarray, np.ndarray):
"""
Uses the record information to construct a formatted address with labels. The addresses generated involve
semi-random permutations and corruptions to help avoid over-fitting.
:param record: the decoded item from the TFRecord file
:return: the address string length, encoded text and labels
"""
fields = dict(zip(_features.keys(), decode_data(record)))
# Generate the individual address components:
if fields['level_type'] > 0:
level = generate_level_number(fields['level_type'], fields['level_number_prefix'], fields['level_number'],
fields['level_number_suffix'])
else:
level = ('', np.zeros((0, n_labels)))
if fields['flat_type'] > 0:
flat_number = generate_flat_number(
fields['flat_type'], fields['flat_number_prefix'], fields['flat_number'], fields['flat_number_suffix'])
else:
flat_number = ('', np.zeros((0, n_labels)))
street_number = generate_street_number(fields['number_first_prefix'], fields['number_first'],
fields['number_first_suffix'], fields['number_last_prefix'],
fields['number_last'], fields['number_last_suffix'])
street = generate_street_name(fields['street_name'], fields['street_suffix_code'], fields['street_type_code'])
suburb = labels(fields['locality_name'], 'locality_name')
state = generate_state(fields['state_abbreviation'])
postcode = labels(fields['postcode'], 'postcode')
building_name = labels(fields['building_name'], 'building_name')
# Begin composing the formatted address, building up the `parts` variable...
suburb_state_postcode = list()
# Keep the suburb?
choose(lambda: suburb_state_postcode.append(suburb))
# Keep state?
choose(lambda: suburb_state_postcode.append(state))
# Keep postcode?
choose(lambda: suburb_state_postcode.append(postcode))
random.shuffle(suburb_state_postcode)
parts = [[building_name], [level]]
# Keep the street number? (If street number is dropped, the flat number is also dropped)
def keep_street_number():
# force flat number to be next to street number only if the flat number is only digits (i.e. does not have a
# flat type)
if flat_number[0].isdigit():
parts.append([flat_number, street_number, street])
else:
parts.append([flat_number])
parts.append([street_number, street])
choose(keep_street_number, lambda: parts.append([street]))
random.shuffle(parts)
# Suburb, state, postcode is always at the end of an address
parts.append(suburb_state_postcode)
# Flatten the address components into an unnested list
parts = sum(parts, [])
# Join each address component/label with a random separator
address, address_lbl = join_str_and_labels(parts, sep=lambda: random_separator(1, 3))
# Encode
length, text_encoded = vocab_lookup(address)
return length, text_encoded, address_lbl
def generate_state(state_abbreviation: int) -> (str, np.ndarray):
"""
Generates the string and labels for the state, randomly abbreviated
:param state_abbreviation: the state code
:return: string and labels
"""
state = lookups.lookup_state(state_abbreviation, reverse_lookup=True)
return labels(choose(lambda: lookups.expand_state(state), lambda: state), 'state_abbreviation')
def generate_level_number(level_type: int, level_number_prefix: str, level_number: int, level_number_suffix: str) -> (
str, np.ndarray):
"""
Generates the level number for the address
:param level_type: level type code
:param level_number_prefix: number prefix
:param level_number: level number
:param level_number_suffix: level number suffix
:return: string and labels
"""
level_type = labels(lookups.lookup_level_type(level_type, reverse_lookup=True), 'level_type')
# Decide whether to transform the level number
def do_transformation():
if not level_number_prefix and not level_number_suffix and level_type[0]:
# If there is no prefix/suffix, decide whether to convert to ordinal numbers (1st, 2nd, etc.)
def use_ordinal_numbers(lvl_num, lvl_type):
# Use ordinal words (first, second, third) or numbers (1st, 2nd, 3rd)?
lvl_num = choose(lambda: lookups.num2word(lvl_num, output='ordinal_words'),
lambda: lookups.num2word(lvl_num, output='ordinal'))
lvl_num = labels(lvl_num, 'level_number')
return join_str_and_labels([lvl_num, lvl_type],
sep=lambda: random_separator(1, 3, possible_sep_chars=None))
def use_cardinal_numbers(lvl_num, lvl_type):
# Treat level 1 as GROUND?
if lvl_num == 1:
lvl_num = choose(lambda: "GROUND", lambda: 1)
else:
lvl_num = lookups.num2word(lvl_num, output='cardinal')
lvl_num = labels(lvl_num, 'level_number')
return join_str_and_labels([lvl_type, lvl_num],
sep=lambda: random_separator(1, 3, possible_sep_chars=None))
return choose(lambda: use_ordinal_numbers(level_number, level_type),
lambda: use_cardinal_numbers(level_number, level_type))
transformed_value = choose(do_transformation)
if transformed_value:
return transformed_value
else:
level_number_prefix = labels(level_number_prefix, 'level_number_prefix')
level_number = labels(level_number, 'level_number')
level_number_suffix = labels(level_number_suffix, 'level_number_suffix')
return join_str_and_labels([level_type, level_number_prefix, level_number, level_number_suffix],
sep=lambda: random_separator(1, 3, possible_sep_chars=None))
def generate_flat_number(
flat_type: int, flat_number_prefix: str, flat_number: int, flat_number_suffix: str) -> (str, np.ndarray):
"""
Generates the flat number for the address
:param flat_type: flat type code
:param flat_number_prefix: number prefix
:param flat_number: number
:param flat_number_suffix: number suffix
:return: string and labels
"""
flat_type = labels(lookups.lookup_flat_type(flat_type, reverse_lookup=True), 'flat_type')
flat_number_prefix = labels(flat_number_prefix, 'flat_number_prefix')
flat_number = labels(flat_number, 'flat_number')
flat_number_suffix = labels(flat_number_suffix, 'flat_number_suffix')
flat_number = join_str_and_labels([flat_number_prefix, flat_number, flat_number_suffix],
sep=lambda: random_separator(0, 2, possible_sep_chars=None))
return choose(
lambda: join_str_and_labels([flat_type, flat_number], sep=random_separator(0, 2, possible_sep_chars=None)),
lambda: flat_number)
def generate_street_number(number_first_prefix: str, number_first: int, number_first_suffix,
number_last_prefix, number_last, number_last_suffix) -> (str, np.ndarray):
"""
Generates a street number using the prefix, suffix, first and last number components
:param number_first_prefix: prefix to the first street number
:param number_first: first street number
:param number_first_suffix: suffix to the first street number
:param number_last_prefix: prefix to the last street number
:param number_last: last street number
:param number_last_suffix: suffix to the last street number
:return: the street number
"""
number_first_prefix = labels(number_first_prefix, 'number_first_prefix')
number_first = labels(number_first, 'number_first')
number_first_suffix = labels(number_first_suffix, 'number_first_suffix')
number_last_prefix = labels(number_last_prefix, 'number_last_prefix')
number_last = labels(number_last, 'number_last')
number_last_suffix = labels(number_last_suffix, 'number_last_suffix')
a = join_str_and_labels([number_first_prefix, number_first, number_first_suffix],
lambda: random_separator(0, 2, possible_sep_chars=None))
b = join_str_and_labels([number_last_prefix, number_last, number_last_suffix],
lambda: random_separator(0, 2, possible_sep_chars=None))
return join_str_and_labels([a, b], sep=random_separator(1, 3, possible_sep_chars=r"---- \/"))
def generate_street_name(street_name: str, street_suffix_code: str, street_type_code: str) -> (str, np.ndarray):
"""
Generates a possible street name variation
:param street_name: the street's name
:param street_suffix_code: the street suffix code
:param street_type_code: the street type code
:return: string and labels
"""
street_name, street_name_lbl = labels(street_name, 'street_name')
street_type = lookups.lookup_street_type(street_type_code, reverse_lookup=True)
street_type = choose(lambda: lookups.abbreviate_street_type(street_type), lambda: street_type)
street_type, street_type_lbl = labels(street_type, 'street_type_code')
street_suffix = lookups.lookup_street_suffix(street_suffix_code, reverse_lookup=True)
street_suffix = choose(lambda: lookups.expand_street_type_suffix(street_suffix), lambda: street_suffix)
street_suffix, street_suffix_lbl = labels(street_suffix, 'street_suffix_code')
return choose(lambda: join_str_and_labels([
(street_name, street_name_lbl),
(street_suffix, street_suffix_lbl),
(street_type, street_type_lbl)
]), lambda: join_str_and_labels([
(street_name, street_name_lbl),
(street_type, street_type_lbl),
(street_suffix, street_suffix_lbl)
]))
def dataset(filenames: [str], batch_size: int = 10, shuffle_buffer: int = 1000, prefetch_buffer_size: int = 10000,
num_parallel_calls: int = 8) -> Callable:
"""
Creates a Tensorflow dataset and iterator operations
:param filenames: the tfrecord filenames
:param batch_size: training batch size
:param shuffle_buffer: shuffle buffer size
:param prefetch_buffer_size: size of the prefetch buffer
:param num_parallel_calls: number of parallel calls for the mapping functions
:return: the input_fn
"""
def input_fn() -> tf.data.Dataset:
ds = tf.data.TFRecordDataset(filenames, compression_type="GZIP")
ds = ds.shuffle(buffer_size=shuffle_buffer)
ds = ds.map(lambda record: tf.parse_single_example(record, features=_features), num_parallel_calls=8)
ds = ds.map(
lambda record: tf.py_func(synthesise_address, [record[k] for k in _features.keys()],
[tf.int64, tf.int64, tf.bool],
stateful=False),
num_parallel_calls=num_parallel_calls
)
ds = ds.padded_batch(batch_size, ([], [None], [None, n_labels]))
ds = ds.map(
lambda _lengths, _encoded_text, _labels: ({'lengths': _lengths, 'encoded_text': _encoded_text}, _labels),
num_parallel_calls=num_parallel_calls
)
ds = ds.prefetch(buffer_size=prefetch_buffer_size)
return ds
return input_fn
def predict_input_fn(input_text: str) -> Callable:
"""
An input function for one prediction example
:param input_text: the input text
:return:
"""
def input_fn() -> tf.data.Dataset:
length, text = vocab_lookup(input_text)
text = np.expand_dims(text, 0)
length = np.array([length])
predict_ds = tf.data.Dataset.from_tensor_slices((length, text))
predict_ds = predict_ds.batch(1)
predict_ds = predict_ds.map(
lambda lengths, encoded_text: {'lengths': lengths, 'encoded_text': encoded_text}
)
return predict_ds
return input_fn | PypiClean |
/v1/model/extracted_data.py |
import pprint
import re
import six
class ExtractedData:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'math_info': 'MathInfo'
}
attribute_map = {
'math_info': 'math_info'
}
def __init__(self, math_info=None):
"""ExtractedData - a model defined in huaweicloud sdk"""
self._math_info = None
self.discriminator = None
if math_info is not None:
self.math_info = math_info
@property
def math_info(self):
"""Gets the math_info of this ExtractedData.
:return: The math_info of this ExtractedData.
:rtype: MathInfo
"""
return self._math_info
@math_info.setter
def math_info(self, math_info):
"""Sets the math_info of this ExtractedData.
:param math_info: The math_info of this ExtractedData.
:type: MathInfo
"""
self._math_info = math_info
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ExtractedData):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/rpi_torchvision-0.7.0-cp37-cp37m-linux_armv7l.whl/torchvision/datasets/sbd.py | import os
import shutil
from .vision import VisionDataset
import numpy as np
from PIL import Image
from .utils import download_url, verify_str_arg
from .voc import download_extract
class SBDataset(VisionDataset):
"""`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_
The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset.
.. note ::
Please note that the train and val splits included with this dataset are different from
the splits in the PASCAL VOC dataset. In particular some "train" images might be part of
VOC2012 val.
If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`,
which excludes all val images.
.. warning::
This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format.
Args:
root (string): Root directory of the Semantic Boundaries Dataset
image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``.
Image set ``train_noval`` excludes VOC 2012 val images.
mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'.
In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`,
where `num_classes=20`.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transforms (callable, optional): A function/transform that takes input sample and its target as entry
and returns a transformed version. Input sample is PIL image and target is a numpy array
if `mode='boundaries'` or PIL image if `mode='segmentation'`.
"""
url = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz"
md5 = "82b4d87ceb2ed10f6038a1cba92111cb"
filename = "benchmark.tgz"
voc_train_url = "http://home.bharathh.info/pubs/codes/SBD/train_noval.txt"
voc_split_filename = "train_noval.txt"
voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722"
def __init__(self,
root,
image_set='train',
mode='boundaries',
download=False,
transforms=None):
try:
from scipy.io import loadmat
self._loadmat = loadmat
except ImportError:
raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: "
"pip install scipy")
super(SBDataset, self).__init__(root, transforms)
self.image_set = verify_str_arg(image_set, "image_set",
("train", "val", "train_noval"))
self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries"))
self.num_classes = 20
sbd_root = self.root
image_dir = os.path.join(sbd_root, 'img')
mask_dir = os.path.join(sbd_root, 'cls')
if download:
download_extract(self.url, self.root, self.filename, self.md5)
extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset")
for f in ["cls", "img", "inst", "train.txt", "val.txt"]:
old_path = os.path.join(extracted_ds_root, f)
shutil.move(old_path, sbd_root)
download_url(self.voc_train_url, sbd_root, self.voc_split_filename,
self.voc_split_md5)
if not os.path.isdir(sbd_root):
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
split_f = os.path.join(sbd_root, image_set.rstrip('\n') + '.txt')
with open(os.path.join(split_f), "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names]
assert (len(self.images) == len(self.masks))
self._get_target = self._get_segmentation_target \
if self.mode == "segmentation" else self._get_boundaries_target
def _get_segmentation_target(self, filepath):
mat = self._loadmat(filepath)
return Image.fromarray(mat['GTcls'][0]['Segmentation'][0])
def _get_boundaries_target(self, filepath):
mat = self._loadmat(filepath)
return np.concatenate([np.expand_dims(mat['GTcls'][0]['Boundaries'][0][i][0].toarray(), axis=0)
for i in range(self.num_classes)], axis=0)
def __getitem__(self, index):
img = Image.open(self.images[index]).convert('RGB')
target = self._get_target(self.masks[index])
if self.transforms is not None:
img, target = self.transforms(img, target)
return img, target
def __len__(self):
return len(self.images)
def extra_repr(self):
lines = ["Image set: {image_set}", "Mode: {mode}"]
return '\n'.join(lines).format(**self.__dict__) | PypiClean |
/iroin-python-pptx-0.6.20.tar.gz/iroin-python-pptx-0.6.20/pptx/shapes/freeform.py | from __future__ import absolute_import, division, print_function, unicode_literals
from pptx.compat import Sequence
from pptx.util import lazyproperty
class FreeformBuilder(Sequence):
"""Allows a freeform shape to be specified and created.
The initial pen position is provided on construction. From there, drawing
proceeds using successive calls to draw line segments. The freeform shape
may be closed by calling the :meth:`close` method.
A shape may have more than one contour, in which case overlapping areas
are "subtracted". A contour is a sequence of line segments beginning with
a "move-to" operation. A move-to operation is automatically inserted in
each new freeform; additional move-to ops can be inserted with the
`.move_to()` method.
"""
def __init__(self, shapes, start_x, start_y, x_scale, y_scale):
super(FreeformBuilder, self).__init__()
self._shapes = shapes
self._start_x = start_x
self._start_y = start_y
self._x_scale = x_scale
self._y_scale = y_scale
def __getitem__(self, idx):
return self._drawing_operations.__getitem__(idx)
def __iter__(self):
return self._drawing_operations.__iter__()
def __len__(self):
return self._drawing_operations.__len__()
@classmethod
def new(cls, shapes, start_x, start_y, x_scale, y_scale):
"""Return a new |FreeformBuilder| object.
The initial pen location is specified (in local coordinates) by
(*start_x*, *start_y*).
"""
return cls(shapes, int(round(start_x)), int(round(start_y)), x_scale, y_scale)
def add_line_segments(self, vertices, close=True):
"""Add a straight line segment to each point in *vertices*.
*vertices* must be an iterable of (x, y) pairs (2-tuples). Each x and
y value is rounded to the nearest integer before use. The optional
*close* parameter determines whether the resulting contour is
*closed* or left *open*.
Returns this |FreeformBuilder| object so it can be used in chained
calls.
"""
for x, y in vertices:
self._add_line_segment(x, y)
if close:
self._add_close()
return self
def convert_to_shape(self, origin_x=0, origin_y=0):
"""Return new freeform shape positioned relative to specified offset.
*origin_x* and *origin_y* locate the origin of the local coordinate
system in slide coordinates (EMU), perhaps most conveniently by use
of a |Length| object.
Note that this method may be called more than once to add multiple
shapes of the same geometry in different locations on the slide.
"""
sp = self._add_freeform_sp(origin_x, origin_y)
path = self._start_path(sp)
for drawing_operation in self:
drawing_operation.apply_operation_to(path)
return self._shapes._shape_factory(sp)
def move_to(self, x, y):
"""Move pen to (x, y) (local coordinates) without drawing line.
Returns this |FreeformBuilder| object so it can be used in chained
calls.
"""
self._drawing_operations.append(_MoveTo.new(self, x, y))
return self
@property
def shape_offset_x(self):
"""Return x distance of shape origin from local coordinate origin.
The returned integer represents the leftmost extent of the freeform
shape, in local coordinates. Note that the bounding box of the shape
need not start at the local origin.
"""
min_x = self._start_x
for drawing_operation in self:
if hasattr(drawing_operation, "x"):
min_x = min(min_x, drawing_operation.x)
return min_x
@property
def shape_offset_y(self):
"""Return y distance of shape origin from local coordinate origin.
The returned integer represents the topmost extent of the freeform
shape, in local coordinates. Note that the bounding box of the shape
need not start at the local origin.
"""
min_y = self._start_y
for drawing_operation in self:
if hasattr(drawing_operation, "y"):
min_y = min(min_y, drawing_operation.y)
return min_y
def _add_close(self):
"""Add a close |_Close| operation to the drawing sequence."""
self._drawing_operations.append(_Close.new())
def _add_freeform_sp(self, origin_x, origin_y):
"""Add a freeform `p:sp` element having no drawing elements.
*origin_x* and *origin_y* are specified in slide coordinates, and
represent the location of the local coordinates origin on the slide.
"""
spTree = self._shapes._spTree
return spTree.add_freeform_sp(
origin_x + self._left, origin_y + self._top, self._width, self._height
)
def _add_line_segment(self, x, y):
"""Add a |_LineSegment| operation to the drawing sequence."""
self._drawing_operations.append(_LineSegment.new(self, x, y))
@lazyproperty
def _drawing_operations(self):
"""Return the sequence of drawing operation objects for freeform."""
return []
@property
def _dx(self):
"""Return integer width of this shape's path in local units."""
min_x = max_x = self._start_x
for drawing_operation in self:
if hasattr(drawing_operation, "x"):
min_x = min(min_x, drawing_operation.x)
max_x = max(max_x, drawing_operation.x)
return max_x - min_x
@property
def _dy(self):
"""Return integer height of this shape's path in local units."""
min_y = max_y = self._start_y
for drawing_operation in self:
if hasattr(drawing_operation, "y"):
min_y = min(min_y, drawing_operation.y)
max_y = max(max_y, drawing_operation.y)
return max_y - min_y
@property
def _height(self):
"""Return vertical size of this shape's path in slide coordinates.
This value is based on the actual extents of the shape and does not
include any positioning offset.
"""
return int(round(self._dy * self._y_scale))
@property
def _left(self):
"""Return leftmost extent of this shape's path in slide coordinates.
Note that this value does not include any positioning offset; it
assumes the drawing (local) coordinate origin is at (0, 0) on the
slide.
"""
return int(round(self.shape_offset_x * self._x_scale))
def _local_to_shape(self, local_x, local_y):
"""Translate local coordinates point to shape coordinates.
Shape coordinates have the same unit as local coordinates, but are
offset such that the origin of the shape coordinate system (0, 0) is
located at the top-left corner of the shape bounding box.
"""
return (local_x - self.shape_offset_x, local_y - self.shape_offset_y)
def _start_path(self, sp):
"""Return a newly created `a:path` element added to *sp*.
The returned `a:path` element has an `a:moveTo` element representing
the shape starting point as its only child.
"""
path = sp.add_path(w=self._dx, h=self._dy)
path.add_moveTo(*self._local_to_shape(self._start_x, self._start_y))
return path
@property
def _top(self):
"""Return topmost extent of this shape's path in slide coordinates.
Note that this value does not include any positioning offset; it
assumes the drawing (local) coordinate origin is located at slide
coordinates (0, 0) (top-left corner of slide).
"""
return int(round(self.shape_offset_y * self._y_scale))
@property
def _width(self):
"""Return width of this shape's path in slide coordinates.
This value is based on the actual extents of the shape path and does
not include any positioning offset.
"""
return int(round(self._dx * self._x_scale))
class _BaseDrawingOperation(object):
"""Base class for freeform drawing operations.
A drawing operation has at least one location (x, y) in local
coordinates.
"""
def __init__(self, freeform_builder, x, y):
super(_BaseDrawingOperation, self).__init__()
self._freeform_builder = freeform_builder
self._x = x
self._y = y
def apply_operation_to(self, path):
"""Add the XML element(s) implementing this operation to *path*.
Must be implemented by each subclass.
"""
raise NotImplementedError("must be implemented by each subclass")
@property
def x(self):
"""Return the horizontal (x) target location of this operation.
The returned value is an integer in local coordinates.
"""
return self._x
@property
def y(self):
"""Return the vertical (y) target location of this operation.
The returned value is an integer in local coordinates.
"""
return self._y
class _Close(object):
"""Specifies adding a `<a:close/>` element to the current contour."""
@classmethod
def new(cls):
"""Return a new _Close object."""
return cls()
def apply_operation_to(self, path):
"""Add `a:close` element to *path*."""
return path.add_close()
class _LineSegment(_BaseDrawingOperation):
"""Specifies a straight line segment ending at the specified point."""
@classmethod
def new(cls, freeform_builder, x, y):
"""Return a new _LineSegment object ending at point *(x, y)*.
Both *x* and *y* are rounded to the nearest integer before use.
"""
return cls(freeform_builder, int(round(x)), int(round(y)))
def apply_operation_to(self, path):
"""Add `a:lnTo` element to *path* for this line segment.
Returns the `a:lnTo` element newly added to the path.
"""
return path.add_lnTo(
self._x - self._freeform_builder.shape_offset_x,
self._y - self._freeform_builder.shape_offset_y,
)
class _MoveTo(_BaseDrawingOperation):
"""Specifies a new pen position."""
@classmethod
def new(cls, freeform_builder, x, y):
"""Return a new _MoveTo object for move to point *(x, y)*.
Both *x* and *y* are rounded to the nearest integer before use.
"""
return cls(freeform_builder, int(round(x)), int(round(y)))
def apply_operation_to(self, path):
"""Add `a:moveTo` element to *path* for this line segment."""
return path.add_moveTo(
self._x - self._freeform_builder.shape_offset_x,
self._y - self._freeform_builder.shape_offset_y,
) | PypiClean |
/mlx9064x-driver-1.3.0.tar.gz/mlx9064x-driver-1.3.0/mlx/hw_i2c_hal.py | import struct
class HwI2cHalMlx90640:
support_buffer = False
sensor_type = None
pass
def connect(self):
print("connect function not implemented")
def i2c_read(self, i2c_addr, addr, count=1):
print("i2c_read function not implemented")
return bytes([0] * count), 0
def i2c_write(self, i2c_addr, addr, data):
print("i2c_write function not implemented")
return 0
def get_sensor_type(self, i2c_addr):
sensor, stat = self.i2c_read(i2c_addr, 0x240A, 2)
sensor = struct.unpack(">H",sensor)[0]
self.sensor_type = (sensor & 0x40) >> 6
return self.sensor_type
def read_frame(self, i2c_addr):
# 1. wait until new data is available
# 2. read frame data
# 3. clear new data available bit.
# 1. wait until new data is available
new_data_available = False
sub_page = 0
while not new_data_available:
status_reg, status = self.i2c_read(i2c_addr, 0x8000, 2)
status_reg = struct.unpack(">H", status_reg[0:2])[0]
if status_reg & 0x0008:
new_data_available = True
sub_page = status_reg & 0x0001
# 2. read frame data
self.i2c_write(i2c_addr, 0x8000, struct.pack("<H", 0x0030))
if self.sensor_type is None:
self.get_sensor_type(i2c_addr)
if self.sensor_type == 0:
frame_data, status = self.i2c_read(i2c_addr, 0x0400, 832*2) # 32 * 26 * 2
frame_data = list(struct.unpack(">832h", frame_data))
else:
frame_data, status = self.i2c_read(i2c_addr, 0x0400, 256*2) # 16 * 16 * 2
frame_data = list(struct.unpack(">256h", frame_data))
# 3. clear new data available bit.
self.i2c_write(i2c_addr, 0x8000, struct.pack("<H", status_reg & ~0x0008))
control_reg1, status = self.i2c_read(i2c_addr, 0x800D, 2)
control_reg1 = struct.unpack(">H", control_reg1[0:2])[0]
return frame_data + [control_reg1, status_reg]
def get_hardware_id(self):
return "HardWare Abstraction Layer (dummy)" | PypiClean |
/Flask-CAS-1.0.2.tar.gz/Flask-CAS-1.0.2/flask_cas/routing.py | import flask
from xmltodict import parse
from flask import current_app
from .cas_urls import create_cas_login_url
from .cas_urls import create_cas_logout_url
from .cas_urls import create_cas_validate_url
try:
from urllib import urlopen
except ImportError:
from urllib.request import urlopen
blueprint = flask.Blueprint('cas', __name__)
@blueprint.route('/login/')
def login():
"""
This route has two purposes. First, it is used by the user
to login. Second, it is used by the CAS to respond with the
`ticket` after the user logs in successfully.
When the user accesses this url, they are redirected to the CAS
to login. If the login was successful, the CAS will respond to this
route with the ticket in the url. The ticket is then validated.
If validation was successful the logged in username is saved in
the user's session under the key `CAS_USERNAME_SESSION_KEY` and
the user's attributes are saved under the key
'CAS_USERNAME_ATTRIBUTE_KEY'
"""
cas_token_session_key = current_app.config['CAS_TOKEN_SESSION_KEY']
redirect_url = create_cas_login_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGIN_ROUTE'],
flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True))
if 'ticket' in flask.request.args:
flask.session[cas_token_session_key] = flask.request.args['ticket']
if cas_token_session_key in flask.session:
if validate(flask.session[cas_token_session_key]):
if 'CAS_AFTER_LOGIN_SESSION_URL' in flask.session:
redirect_url = flask.session.pop('CAS_AFTER_LOGIN_SESSION_URL')
elif flask.request.args.get('origin'):
redirect_url = flask.request.args['origin']
else:
redirect_url = flask.url_for(
current_app.config['CAS_AFTER_LOGIN'])
else:
del flask.session[cas_token_session_key]
current_app.logger.debug('Redirecting to: {0}'.format(redirect_url))
return flask.redirect(redirect_url)
@blueprint.route('/logout/')
def logout():
"""
When the user accesses this route they are logged out.
"""
cas_username_session_key = current_app.config['CAS_USERNAME_SESSION_KEY']
cas_attributes_session_key = current_app.config['CAS_ATTRIBUTES_SESSION_KEY']
if cas_username_session_key in flask.session:
del flask.session[cas_username_session_key]
if cas_attributes_session_key in flask.session:
del flask.session[cas_attributes_session_key]
if(current_app.config['CAS_AFTER_LOGOUT'] is not None):
redirect_url = create_cas_logout_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGOUT_ROUTE'],
current_app.config['CAS_AFTER_LOGOUT'])
else:
redirect_url = create_cas_logout_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_LOGOUT_ROUTE'])
current_app.logger.debug('Redirecting to: {0}'.format(redirect_url))
return flask.redirect(redirect_url)
def validate(ticket):
"""
Will attempt to validate the ticket. If validation fails, then False
is returned. If validation is successful, then True is returned
and the validated username is saved in the session under the
key `CAS_USERNAME_SESSION_KEY` while tha validated attributes dictionary
is saved under the key 'CAS_ATTRIBUTES_SESSION_KEY'.
"""
cas_username_session_key = current_app.config['CAS_USERNAME_SESSION_KEY']
cas_attributes_session_key = current_app.config['CAS_ATTRIBUTES_SESSION_KEY']
current_app.logger.debug("validating token {0}".format(ticket))
cas_validate_url = create_cas_validate_url(
current_app.config['CAS_SERVER'],
current_app.config['CAS_VALIDATE_ROUTE'],
flask.url_for('.login', origin=flask.session.get('CAS_AFTER_LOGIN_SESSION_URL'), _external=True),
ticket)
current_app.logger.debug("Making GET request to {0}".format(
cas_validate_url))
xml_from_dict = {}
isValid = False
try:
xmldump = urlopen(cas_validate_url).read().strip().decode('utf8', 'ignore')
xml_from_dict = parse(xmldump)
isValid = True if "cas:authenticationSuccess" in xml_from_dict["cas:serviceResponse"] else False
except ValueError:
current_app.logger.error("CAS returned unexpected result")
if isValid:
current_app.logger.debug("valid")
xml_from_dict = xml_from_dict["cas:serviceResponse"]["cas:authenticationSuccess"]
username = xml_from_dict["cas:user"]
flask.session[cas_username_session_key] = username
if "cas:attributes" in xml_from_dict:
attributes = xml_from_dict["cas:attributes"]
if "cas:memberOf" in attributes:
if not isinstance(attributes["cas:memberOf"], list):
attributes["cas:memberOf"] = attributes["cas:memberOf"].lstrip('[').rstrip(']').split(',')
for group_number in range(0, len(attributes['cas:memberOf'])):
attributes['cas:memberOf'][group_number] = attributes['cas:memberOf'][group_number].lstrip(' ').rstrip(' ')
else:
for index in range(0, len(attributes['cas:memberOf'])):
attributes["cas:memberOf"][index] = attributes["cas:memberOf"][index].lstrip('[').rstrip(']').split(',')
for group_number in range(0, len(attributes['cas:memberOf'][index])):
attributes['cas:memberOf'][index][group_number] = attributes['cas:memberOf'][index][group_number].lstrip(' ').rstrip(' ')
flask.session[cas_attributes_session_key] = attributes
else:
current_app.logger.debug("invalid")
return isValid | PypiClean |
/tensorflow_macos-2.14.0rc0-cp311-cp311-macosx_12_0_arm64.whl/tensorflow/python/saved_model/loader_impl.py | import os
import sys
from google.protobuf import message
from google.protobuf import text_format
from tensorflow.core.framework import graph_debug_info_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saved_model_pb2
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import path_helpers
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import utils_impl as saved_model_utils
# Placeholder for protosplitter merger import.
from tensorflow.python.saved_model.pywrap_saved_model import metrics
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
# API label for SavedModel metrics.
_LOADER_LABEL = "loader"
def parse_saved_model_with_debug_info(export_dir):
"""Reads the savedmodel as well as the graph debug info.
Args:
export_dir: Directory containing the SavedModel and GraphDebugInfo files.
Returns:
`SavedModel` and `GraphDebugInfo` protocol buffers.
Raises:
IOError: If the saved model file does not exist, or cannot be successfully
parsed. Missing graph debug info file is fine.
"""
saved_model = parse_saved_model(export_dir)
debug_info_path = file_io.join(
path_helpers.get_debug_dir(export_dir),
constants.DEBUG_INFO_FILENAME_PB)
debug_info = graph_debug_info_pb2.GraphDebugInfo()
if file_io.file_exists(debug_info_path):
with file_io.FileIO(debug_info_path, "rb") as debug_file:
try:
debug_info.ParseFromString(debug_file.read())
except message.DecodeError as e:
raise IOError(f"Cannot parse file {debug_info_path}: {e}.")
return (saved_model, debug_info)
@tf_export("__internal__.saved_model.parse_saved_model", v1=[])
def parse_saved_model(export_dir):
"""Reads the savedmodel.pb or savedmodel.pbtxt file containing `SavedModel`.
Args:
export_dir: String or Pathlike, path to the directory containing the
SavedModel file.
Returns:
A `SavedModel` protocol buffer.
Raises:
IOError: If the file does not exist, or cannot be successfully parsed.
"""
# Build the path to the SavedModel in pbtxt format.
path_to_pbtxt = file_io.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT))
# Build the path to the SavedModel in pb format.
path_to_pb = file_io.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB))
# Build the path to the SavedModel in cpb format.
path_to_cpb = file_io.join(
compat.as_bytes(compat.path_to_str(export_dir)),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_CPB))
# Parse the SavedModel protocol buffer.
saved_model = saved_model_pb2.SavedModel()
if file_io.file_exists(path_to_pb):
with file_io.FileIO(path_to_pb, "rb") as f:
file_content = f.read()
try:
saved_model.ParseFromString(file_content)
except message.DecodeError as e:
raise IOError(f"Cannot parse file {path_to_pb}: {str(e)}.") from e
elif file_io.file_exists(path_to_pbtxt):
with file_io.FileIO(path_to_pbtxt, "rb") as f:
file_content = f.read()
try:
text_format.Parse(file_content.decode("utf-8"), saved_model)
except text_format.ParseError as e:
raise IOError(f"Cannot parse file {path_to_pbtxt}: {str(e)}.") from e
else:
raise IOError(
f"SavedModel file does not exist at: {export_dir}{os.path.sep}"
f"{{{constants.SAVED_MODEL_FILENAME_PBTXT}|"
f"{constants.SAVED_MODEL_FILENAME_PB}}}")
return saved_model
def get_asset_tensors(export_dir, meta_graph_def_to_load, import_scope=None):
"""Gets the asset tensors, if defined in the meta graph def to load.
Args:
export_dir: Directory where the SavedModel is located.
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
import_scope: Optional `string` -- if specified, prepend this followed by
'/' to all returned asset tensor names.
Returns:
A dictionary of asset tensors, keyed by the name of the asset tensor. The
value in the map corresponds to the absolute path of the asset file.
"""
# Collection-def that may contain the assets key.
collection_def = meta_graph_def_to_load.collection_def
asset_tensor_dict = {}
asset_protos = []
if meta_graph_def_to_load.asset_file_def:
asset_protos = meta_graph_def_to_load.asset_file_def
elif constants.ASSETS_KEY in collection_def:
assets_any_proto = collection_def[constants.ASSETS_KEY].any_list.value
for asset_any_proto in assets_any_proto:
asset_proto = meta_graph_pb2.AssetFileDef()
asset_any_proto.Unpack(asset_proto)
asset_protos.append(asset_proto)
# Location of the assets for SavedModel.
assets_directory = file_io.join(
compat.as_bytes(export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY))
# Process each asset and add it to the asset tensor dictionary.
for asset_proto in asset_protos:
tensor_name = asset_proto.tensor_info.name
if import_scope:
tensor_name = "%s/%s" % (import_scope, tensor_name)
asset_tensor_dict[tensor_name] = file_io.join(
compat.as_bytes(assets_directory),
compat.as_bytes(asset_proto.filename))
return asset_tensor_dict
def _get_main_op_tensor(
meta_graph_def_to_load, init_op_key=constants.MAIN_OP_KEY):
"""Gets the main op tensor, if one exists.
Args:
meta_graph_def_to_load: The meta graph def from the SavedModel to be loaded.
init_op_key: name of the collection to check; should be one of MAIN_OP_KEY
or the deprecated LEGACY_INIT_OP_KEY
Returns:
The main op tensor, if it exists and `None` otherwise.
Raises:
RuntimeError: If the collection def corresponding to the main op key has
other than exactly one tensor.
"""
# TODO(kathywu): Rename this method to _get_op_from_collection when
# dependency from SavedModelEstimator is removed.
collection_def = meta_graph_def_to_load.collection_def
init_op = None
if init_op_key in collection_def:
init_op_list = collection_def[init_op_key].node_list.value
if len(init_op_list) != 1:
raise RuntimeError("Expected exactly one SavedModel init op. "
f"Found {len(init_op_list)}: {init_op_list}.")
init_op = ops.get_collection(init_op_key)[0]
return init_op
def _get_op_from_collection(meta_graph_def, op_key):
return _get_main_op_tensor(meta_graph_def, op_key)
def _get_op_from_signature_def(meta_graph_def, op_signature_key, import_scope):
"""Retrieve op stored in the imported meta graph's signature def."""
if op_signature_key in meta_graph_def.signature_def:
return signature_def_utils.load_op_from_signature_def(
meta_graph_def.signature_def[op_signature_key], op_signature_key,
import_scope)
else:
return None
def get_init_op(meta_graph_def, import_scope=None):
return (_get_op_from_signature_def(
meta_graph_def, constants.INIT_OP_SIGNATURE_KEY, import_scope) or
_get_op_from_collection(meta_graph_def, constants.MAIN_OP_KEY) or
_get_op_from_collection(meta_graph_def, constants.LEGACY_INIT_OP_KEY))
def get_train_op(meta_graph_def, import_scope=None):
train_op = _get_op_from_signature_def(
meta_graph_def, constants.TRAIN_OP_SIGNATURE_KEY, import_scope)
if train_op is None:
train_op = _get_op_from_collection(meta_graph_def, constants.TRAIN_OP_KEY)
return train_op
@tf_export(v1=[
"saved_model.contains_saved_model",
"saved_model.maybe_saved_model_directory",
"saved_model.loader.maybe_saved_model_directory"
])
@deprecation.deprecated_endpoints(
"saved_model.loader.maybe_saved_model_directory")
def maybe_saved_model_directory(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute string path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
txt_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PBTXT)
pb_path = file_io.join(export_dir, constants.SAVED_MODEL_FILENAME_PB)
return file_io.file_exists(txt_path) or file_io.file_exists(pb_path)
@tf_export("saved_model.contains_saved_model", v1=[])
def contains_saved_model(export_dir):
"""Checks whether the provided export directory could contain a SavedModel.
Note that the method does not load any data by itself. If the method returns
`false`, the export directory definitely does not contain a SavedModel. If the
method returns `true`, the export directory may contain a SavedModel but
provides no guarantee that it can be loaded.
Args:
export_dir: Absolute path to possible export location. For example,
'/my/foo/model'.
Returns:
True if the export directory contains SavedModel files, False otherwise.
"""
if isinstance(export_dir, os.PathLike):
export_dir = os.fspath(export_dir)
return maybe_saved_model_directory(export_dir)
@tf_export(v1=["saved_model.load", "saved_model.loader.load"])
@deprecation.deprecated(
None,
"Use `tf.saved_model.load` instead.")
def load(sess, tags, export_dir, import_scope=None, **saver_kwargs):
"""Loads the model from a SavedModel as specified by tags.
Args:
sess: The TensorFlow session to restore the variables.
tags: Set of string tags to identify the required MetaGraphDef. These should
correspond to the tags used when saving the variables using the
SavedModel `save()` API.
export_dir: Directory in which the SavedModel protocol buffer and variables
to be loaded are located.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: Optional keyword arguments passed through to Saver.
Returns:
The `MetaGraphDef` protocol buffer loaded in the provided session. This
can be used to further extract signature-defs, collection-defs, etc.
Raises:
RuntimeError: MetaGraphDef associated with the tags cannot be found.
@compatibility(TF2)
`tf.compat.v1.saved_model.load` or `tf.compat.v1.saved_model.loader.load` is
not compatible with eager execution. Please use `tf.saved_model.load` instead
to load your model. You can refer to the [SavedModel guide]
(https://www.tensorflow.org/guide/saved_model) for more information as well as
"Importing SavedModels from TensorFlow 1.x" in the [`tf.saved_model.load`]
(https://www.tensorflow.org/api_docs/python/tf/saved_model/load) docstring.
#### How to Map Arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| :-------------------- | :-------------- | :------------------------- |
| `sess` | Not supported | - |
| `tags` | `tags` | - |
| `export_dir` | `export_dir` | - |
| `import_scope` | Not supported | Name scopes are not needed.
: : : By default, variables are :
: : : associated with the loaded :
: : : object and function names :
: : : are deduped. :
| `saver_kwargs` | Not supported | - |
#### Before & After Usage Example
Before:
```
with tf.compat.v1.Session(graph=tf.Graph()) as sess:
tf.compat.v1.saved_model.loader.load(sess, ["foo-tag"], export_dir)
```
After:
```
model = tf.saved_model.load(export_dir, tags=["foo-tag"])
```
@end_compatibility
"""
loader = SavedModelLoader(export_dir)
return loader.load(sess, tags, import_scope, **saver_kwargs)
class SavedModelLoader(object):
"""Load graphs and restore variable values from a `SavedModel`."""
def __init__(self, export_dir):
"""Creates a `SavedModelLoader`.
Args:
export_dir: Directory in which the SavedModel protocol buffer and
variables to be loaded are located.
"""
self._export_dir = export_dir
self._variables_path = path_helpers.get_variables_path(export_dir)
self._saved_model = parse_saved_model(export_dir)
@property
def export_dir(self):
"""Directory containing the SavedModel."""
return self._export_dir
@property
def variables_path(self):
"""Path to variable checkpoint files."""
return self._variables_path
@property
def saved_model(self):
"""SavedModel object parsed from the export directory."""
return self._saved_model
def get_meta_graph_def_from_tags(self, tags):
"""Return MetaGraphDef with the exact specified tags.
Args:
tags: A list or set of string tags that identify the MetaGraphDef.
Returns:
MetaGraphDef with the same tags.
Raises:
RuntimeError: if no metagraphs were found with the associated tags.
"""
found_match = False
meta_graph_def_to_load = None
available_tags = []
for meta_graph_def in self._saved_model.meta_graphs:
available_tags.append(set(meta_graph_def.meta_info_def.tags))
if set(meta_graph_def.meta_info_def.tags) == set(tags):
meta_graph_def_to_load = meta_graph_def
found_match = True
break
if not found_match:
raise RuntimeError(
f"MetaGraphDef associated with tags {str(tags).strip('[]')} "
"could not be found in SavedModel, with available tags "
f"'{available_tags}'. To inspect available tag-sets in"
" the SavedModel, please use the SavedModel CLI: `saved_model_cli`.")
return meta_graph_def_to_load
def load_graph(self, graph, tags, import_scope=None, **saver_kwargs):
"""Load ops and nodes from SavedModel MetaGraph into graph.
Args:
graph: tf.Graph object.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
A tuple of
* Saver defined by the MetaGraph, which can be used to restore the
variable values.
* List of `Operation`/`Tensor` objects returned from
`tf.import_graph_def` (may be `None`).
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
if sys.byteorder == "big":
saved_model_utils.swap_function_tensor_content(meta_graph_def, "little",
"big")
with graph.as_default():
return tf_saver._import_meta_graph_with_return_elements( # pylint: disable=protected-access
meta_graph_def, import_scope=import_scope, **saver_kwargs)
def restore_variables(self, sess, saver, import_scope=None):
"""Restore SavedModel variable values into the session.
Args:
sess: tf.compat.v1.Session to restore variable values.
saver: a tf.compat.v1.train.Saver object. Can be None if there are no
variables in graph. This may be the saver returned by the load_graph()
function, or a default `tf.compat.v1.train.Saver()`.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
Raises:
ValueError: if no saver was passed to the saver argument, and there are
variables in the graph.
"""
with sess.graph.as_default():
if (saver is None and
not variables._all_saveable_objects(scope=import_scope)): # pylint: disable=protected-access
tf_logging.info("The specified SavedModel has no variables; no "
"checkpoints were restored.")
elif isinstance(saver, tf_saver.Saver):
saver.restore(sess, self._variables_path)
else:
raise ValueError(
"No tf.train.Saver object was passed to the function "
"`SavedModelLoader.restore_variables`. Since there are variables in"
" the graph, a saver is required.")
def run_init_ops(self, sess, tags, import_scope=None):
"""Run initialization ops defined in the `MetaGraphDef`.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
"""
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
with sess.graph.as_default():
# Get asset tensors, if any.
asset_tensors_dictionary = get_asset_tensors(
self._export_dir, meta_graph_def, import_scope=import_scope)
init_op = get_init_op(meta_graph_def, import_scope)
if init_op is not None:
sess.run(fetches=[init_op], feed_dict=asset_tensors_dictionary)
def load(self, sess, tags, import_scope=None, **saver_kwargs):
"""Load the MetaGraphDef graph and restore variable values into the session.
Args:
sess: tf.compat.v1.Session to restore variable values.
tags: a set of string tags identifying a MetaGraphDef.
import_scope: Optional `string` -- if specified, prepend this string
followed by '/' to all loaded tensor names. This scope is applied to
tensor instances loaded into the passed session, but it is *not* written
through to the static `MetaGraphDef` protocol buffer that is returned.
**saver_kwargs: keyword arguments to pass to tf.train.import_meta_graph.
Returns:
`MetagraphDef` proto of the graph that was loaded.
"""
saved_model_proto = parse_saved_model(self._export_dir)
metrics.IncrementReadApi(_LOADER_LABEL)
with sess.graph.as_default():
saver, _ = self.load_graph(sess.graph, tags, import_scope,
**saver_kwargs)
self.restore_variables(sess, saver, import_scope)
self.run_init_ops(sess, tags, import_scope)
meta_graph_def = self.get_meta_graph_def_from_tags(tags)
if (len(saved_model_proto.meta_graphs) == 1 and
saved_model_proto.meta_graphs[0].HasField("object_graph_def")):
metrics.IncrementRead(write_version="2")
else:
metrics.IncrementRead(write_version="1")
return meta_graph_def | PypiClean |
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/redhatopenshift/v20220904/machine_pool.py |
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = ['MachinePoolArgs', 'MachinePool']
@pulumi.input_type
class MachinePoolArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
resource_name: pulumi.Input[str],
child_resource_name: Optional[pulumi.Input[str]] = None,
resources: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MachinePool resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name: The name of the OpenShift cluster resource.
:param pulumi.Input[str] child_resource_name: The name of the MachinePool resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "resource_name", resource_name)
if child_resource_name is not None:
pulumi.set(__self__, "child_resource_name", child_resource_name)
if resources is not None:
pulumi.set(__self__, "resources", resources)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group. The name is case insensitive.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="resourceName")
def resource_name(self) -> pulumi.Input[str]:
"""
The name of the OpenShift cluster resource.
"""
return pulumi.get(self, "resource_name")
@resource_name.setter
def resource_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_name", value)
@property
@pulumi.getter(name="childResourceName")
def child_resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the MachinePool resource.
"""
return pulumi.get(self, "child_resource_name")
@child_resource_name.setter
def child_resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "child_resource_name", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resources", value)
class MachinePool(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
child_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
resources: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
MachinePool represents a MachinePool
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] child_resource_name: The name of the MachinePool resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group. The name is case insensitive.
:param pulumi.Input[str] resource_name_: The name of the OpenShift cluster resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MachinePoolArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
MachinePool represents a MachinePool
:param str resource_name: The name of the resource.
:param MachinePoolArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MachinePoolArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
child_resource_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
resource_name_: Optional[pulumi.Input[str]] = None,
resources: Optional[pulumi.Input[str]] = None,
__props__=None):
opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts)
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MachinePoolArgs.__new__(MachinePoolArgs)
__props__.__dict__["child_resource_name"] = child_resource_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if resource_name_ is None and not opts.urn:
raise TypeError("Missing required property 'resource_name_'")
__props__.__dict__["resource_name"] = resource_name_
__props__.__dict__["resources"] = resources
__props__.__dict__["name"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:redhatopenshift:MachinePool"), pulumi.Alias(type_="azure-native:redhatopenshift/v20230401:MachinePool"), pulumi.Alias(type_="azure-native:redhatopenshift/v20230701preview:MachinePool")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(MachinePool, __self__).__init__(
'azure-native:redhatopenshift/v20220904:MachinePool',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'MachinePool':
"""
Get an existing MachinePool resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = MachinePoolArgs.__new__(MachinePoolArgs)
__props__.__dict__["name"] = None
__props__.__dict__["resources"] = None
__props__.__dict__["system_data"] = None
__props__.__dict__["type"] = None
return MachinePool(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def resources(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "resources")
@property
@pulumi.getter(name="systemData")
def system_data(self) -> pulumi.Output['outputs.SystemDataResponse']:
"""
Azure Resource Manager metadata containing createdBy and modifiedBy information.
"""
return pulumi.get(self, "system_data")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type") | PypiClean |
/ChessAnalysisPipeline-0.0.11-py3-none-any.whl/CHAP/edd/processor.py | # system modules
from json import dumps
import os
# third party modules
import numpy as np
# local modules
from CHAP.processor import Processor
class DiffractionVolumeLengthProcessor(Processor):
"""A Processor using a steel foil raster scan to calculate the
length of the diffraction volume for an EDD setup.
"""
def process(self,
data,
config=None,
save_figures=False,
outputdir='.',
inputdir='.',
interactive=False):
"""Return calculated value of the DV length.
:param data: input configuration for the raw scan data & DVL
calculation procedure.
:type data: list[PipelineData]
:param config: initialization parameters for an instance of
CHAP.edd.models.DiffractionVolumeLengthConfig, defaults to
None
:type config: dict, optional
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param inputdir: input directory, used only if files in the
input configuration are not absolute paths.
be saved, defaults to '.'
:type inputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: complete DVL configuraiton dictionary
:rtype: dict
"""
try:
dvl_config = self.get_config(
data, 'edd.models.DiffractionVolumeLengthConfig',
inputdir=inputdir)
except Exception as data_exc:
self.logger.info('No valid DVL config in input pipeline data, '
+ 'using config parameter instead.')
try:
from CHAP.edd.models import DiffractionVolumeLengthConfig
dvl_config = DiffractionVolumeLengthConfig(
**config, inputdir=inputdir)
except Exception as dict_exc:
self.logger.error('Could not get a valid DVL config')
raise RuntimeError from dict_exc
for detector in dvl_config.detectors:
dvl = self.measure_dvl(dvl_config, detector,
save_figures=save_figures,
interactive=interactive,
outputdir=outputdir)
detector.dvl_measured = dvl
return dvl_config.dict()
def measure_dvl(self,
dvl_config,
detector,
save_figures=False,
outputdir='.',
interactive=False):
"""Return a measured value for the length of the diffraction
volume. Use the iron foil raster scan data provided in
`dvl_config` and fit a gaussian to the sum of all MCA channel
counts vs scanned motor position in the raster scan. The
computed diffraction volume length is approximately equal to
the standard deviation of the fitted peak.
:param dvl_config: configuration for the DVL calculation
procedure
:type dvl_config: DiffractionVolumeLengthConfig
:param detector: A single MCA detector element configuration
:type detector: CHAP.edd.models.MCAElementDiffractionVolumeLengthConfig
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: calculated diffraction volume length
:rtype: float
"""
from CHAP.utils.fit import Fit
from CHAP.utils.general import draw_mask_1d
# Get raw MCA data from raster scan
mca_data = dvl_config.mca_data(detector)
# Interactively set mask, if needed & possible.
if interactive or save_figures:
self.logger.info(
'Interactively select a mask in the matplotlib figure')
mask, include_bin_ranges, figure = draw_mask_1d(
np.sum(mca_data, axis=0),
xdata = np.arange(detector.num_bins),
current_index_ranges=detector.include_bin_ranges,
label='sum of MCA spectra over all scan points',
title='Click and drag to select ranges of MCA data to\n'
+ 'include when measuring the diffraction volume length.',
xlabel='MCA channel (index)',
ylabel='MCA intensity (counts)',
test_mode=not interactive,
return_figure=True
)
detector.include_bin_ranges = include_bin_ranges
self.logger.debug('Mask selected. Including detector bin ranges: '
+ str(detector.include_bin_ranges))
if save_figures:
figure.savefig(os.path.join(
outputdir, f'{detector.detector_name}_dvl_mask.png'))
import matplotlib.pyplot as plt
plt.close()
if detector.include_bin_ranges is None:
raise ValueError(
'No value provided for include_bin_ranges. '
+ 'Provide them in the Diffraction Volume Length '
+ 'Measurement Configuration, or re-run the pipeline '
+ 'with the --interactive flag.')
# Reduce the raw MCA data in 3 ways:
# 1) sum of intensities in all detector bins
# 2) max of intensities in detector bins after mask is applied
# 3) sum of intensities in detector bins after mask is applied
unmasked_sum = np.sum(mca_data, axis=1)
mask = detector.mca_mask()
masked_mca_data = np.empty(
(mca_data.shape[0], *mca_data[0][mask].shape))
for i in range(mca_data.shape[0]):
masked_mca_data[i] = mca_data[i][mask]
masked_max = np.amax(masked_mca_data, axis=1)
masked_sum = np.sum(masked_mca_data, axis=1)
# Find the motor position corresponding roughly to the center
# of the diffraction volume
scanned_vals = dvl_config.scanned_vals
scan_center = np.sum(scanned_vals * masked_sum) / np.sum(masked_sum)
x = scanned_vals - scan_center
# "Normalize" the masked summed data and fit a gaussian to it
y = (masked_sum - min(masked_sum)) / max(masked_sum)
fit = Fit.fit_data(y, 'gaussian', x=x, normalize=False)
# Calculate / manually select diffraction volume length
dvl = fit.best_values['sigma'] * detector.sigma_to_dvl_factor
if detector.measurement_mode == 'manual':
if interactive:
mask, dvl_bounds = draw_mask_1d(
y, xdata=x,
label='total (masked & normalized)',
ref_data=[
((x, fit.best_fit),
{'label': 'gaussian fit (to total)'}),
((x, masked_max / max(masked_max)),
{'label': 'maximum (masked)'}),
((x, unmasked_sum / max(unmasked_sum)),
{'label': 'total (unmasked)'})
],
num_index_ranges_max=1,
title=('Click and drag to indicate the\n'
+ 'boundary of the diffraction volume'),
xlabel=(dvl_config.scanned_dim_lbl
+ ' (offset from scan "center")'),
ylabel='MCA intensity (normalized)')
dvl_bounds = dvl_bounds[0]
dvl = abs(x[dvl_bounds[1]] - x[dvl_bounds[0]])
else:
self.logger.warning(
'Cannot manually indicate DVL when running CHAP '
+ 'non-interactively. '
+ 'Using default DVL calcluation instead.')
if interactive or save_figures:
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
ax.set_title(f'Diffraction Volume ({detector.detector_name})')
ax.set_xlabel(dvl_config.scanned_dim_lbl \
+ ' (offset from scan "center")')
ax.set_ylabel('MCA intensity (normalized)')
ax.plot(x, y, label='total (masked & normalized)')
ax.plot(x, fit.best_fit, label='gaussian fit (to total)')
ax.plot(x, masked_max / max(masked_max),
label='maximum (masked)')
ax.plot(x, unmasked_sum / max(unmasked_sum),
label='total (unmasked)')
ax.axvspan(-dvl / 2., dvl / 2.,
color='gray', alpha=0.5,
label='diffraction volume'
+ f' ({detector.measurement_mode})')
ax.legend()
if save_figures:
figfile = os.path.join(outputdir,
f'{detector.detector_name}_dvl.png')
plt.savefig(figfile)
self.logger.info(f'Saved figure to {figfile}')
if interactive:
plt.show()
return dvl
class MCACeriaCalibrationProcessor(Processor):
"""A Processor using a CeO2 scan to obtain tuned values for the
bragg diffraction angle and linear correction parameters for MCA
channel energies for an EDD experimental setup.
"""
def process(self,
data,
config=None,
save_figures=False,
outputdir='.',
inputdir='.',
interactive=False):
"""Return tuned values for 2&theta and linear correction
parameters for the MCA channel energies.
:param data: input configuration for the raw data & tuning
procedure
:type data: list[dict[str,object]]
:param config: initialization parameters for an instance of
CHAP.edd.models.MCACeriaCalibrationConfig, defaults to
None
:type config: dict, optional
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param inputdir: input directory, used only if files in the
input configuration are not absolute paths.
be saved, defaults to '.'
:type inputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: original configuration dictionary with tuned values
added
:rtype: dict[str,float]
"""
try:
calibration_config = self.get_config(
data, 'edd.models.MCACeriaCalibrationConfig',
inputdir=inputdir)
except Exception as data_exc:
self.logger.info('No valid calibration config in input pipeline '
+ 'data, using config parameter instead.')
try:
from CHAP.edd.models import MCACeriaCalibrationConfig
calibration_config = MCACeriaCalibrationConfig(
**config, inputdir=inputdir)
except Exception as dict_exc:
raise RuntimeError from dict_exc
for detector in calibration_config.detectors:
tth, slope, intercept = self.calibrate(
calibration_config, detector,
save_figures=save_figures,
interactive=interactive, outputdir=outputdir)
detector.tth_calibrated = tth
detector.slope_calibrated = slope
detector.intercept_calibrated = intercept
return calibration_config.dict()
def calibrate(self,
calibration_config,
detector,
save_figures=False,
outputdir='.',
interactive=False):
"""Iteratively calibrate 2&theta by fitting selected peaks of
an MCA spectrum until the computed strain is sufficiently
small. Use the fitted peak locations to determine linear
correction parameters for the MCA's channel energies.
:param calibration_config: object configuring the CeO2
calibration procedure
:type calibration_config: MCACeriaCalibrationConfig
:param detector: a single MCA detector element configuration
:type detector: CHAP.edd.models.MCAElementCalibrationConfig
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: calibrated values of 2&theta and linear correction
parameters for MCA channel energies : tth, slope,
intercept
:rtype: float, float, float
"""
from CHAP.edd.utils import hc
from CHAP.utils.fit import Fit
# Collect raw MCA data of interest
mca_data = calibration_config.mca_data(detector)
mca_bin_energies = np.arange(0, detector.num_bins) \
* (detector.max_energy_kev / detector.num_bins)
if interactive:
# Interactively adjust initial tth guess
from CHAP.edd.utils import select_tth_initial_guess
select_tth_initial_guess(detector, calibration_config.material,
mca_data, mca_bin_energies)
self.logger.debug(f'tth_initial_guess = {detector.tth_initial_guess}')
# Mask out the corrected MCA data for fitting
if interactive:
from CHAP.utils.general import draw_mask_1d
self.logger.info(
'Interactively select a mask in the matplotlib figure')
mask, include_bin_ranges = draw_mask_1d(
mca_data,
xdata=mca_bin_energies,
current_index_ranges=detector.include_bin_ranges,
title='Click and drag to select ranges of Ceria'
+' calibration data to include',
xlabel='MCA channel energy (keV)',
ylabel='MCA intensity (counts)')
detector.include_bin_ranges = include_bin_ranges
self.logger.debug('Mask selected. Including detector bin ranges: '
+ str(detector.include_bin_ranges))
if detector.include_bin_ranges is None:
raise ValueError(
'No value provided for include_bin_ranges. '
'Provide them in the MCA Ceria Calibration Configuration, '
'or re-run the pipeline with the --interactive flag.')
mca_mask = detector.mca_mask()
fit_mca_energies = mca_bin_energies[mca_mask]
fit_mca_intensities = mca_data[mca_mask]
# Correct raw MCA data for variable flux at different energies
flux_correct = \
calibration_config.flux_correction_interpolation_function()
mca_intensity_weights = flux_correct(fit_mca_energies)
fit_mca_intensities = fit_mca_intensities / mca_intensity_weights
# Get the HKLs and lattice spacings that will be used for
# fitting
tth = detector.tth_initial_guess
if interactive or save_figures:
import matplotlib.pyplot as plt
from CHAP.edd.utils import select_hkls
fig = select_hkls(detector, [calibration_config.material], tth,
mca_data, mca_bin_energies, interactive)
if save_figures:
fig.savefig(os.path.join(
outputdir,
f'{detector.detector_name}_calibration_hkls.png'))
plt.close()
self.logger.debug(f'HKLs selected: {detector.fit_hkls}')
if detector.fit_hkls is None:
raise ValueError(
'No value provided for fit_hkls. Provide them in '
'the detector\'s MCA Ceria Calibration Configuration, or'
' re-run the pipeline with the --interactive flag.')
fit_hkls, fit_ds = detector.fit_ds(calibration_config.material)
c_1 = fit_hkls[:,0]**2 + fit_hkls[:,1]**2 + fit_hkls[:,2]**2
for iter_i in range(calibration_config.max_iter):
self.logger.debug(f'Tuning tth: iteration no. {iter_i}, '
+ f'starting tth value = {tth} ')
# Perform the uniform fit first
# Get expected peak energy locations for this iteration's
# starting value of tth
fit_lambda = 2.0*fit_ds*np.sin(0.5*np.radians(tth))
fit_E0 = hc / fit_lambda
# Run the uniform fit
uniform_fit = Fit(fit_mca_intensities, x=fit_mca_energies)
uniform_fit.create_multipeak_model(fit_E0, fit_type='uniform')
uniform_fit.fit()
# Extract values of interest from the best values for the
# uniform fit parameters
uniform_fit_centers = [
uniform_fit.best_values[f'peak{i+1}_center']
for i in range(len(detector.fit_hkls))]
uniform_a = uniform_fit.best_values['scale_factor']
uniform_strain = np.log(
(uniform_a
/ calibration_config.material.lattice_parameters)) # CeO2 is cubic, so this is fine here.
# Next, perform the unconstrained fit
# Use the peak locations found in the uniform fit as the
# initial guesses for peak locations in the unconstrained
# fit
unconstrained_fit = Fit(fit_mca_intensities, x=fit_mca_energies)
unconstrained_fit.create_multipeak_model(
uniform_fit_centers, fit_type='unconstrained')
unconstrained_fit.fit()
# Extract values of interest from the best values for the
# unconstrained fit parameters
unconstrained_fit_centers = np.array(
[unconstrained_fit.best_values[f'peak{i+1}_center']
for i in range(len(detector.fit_hkls))])
unconstrained_a = 0.5*hc*np.sqrt(c_1) \
/ (unconstrained_fit_centers*abs(np.sin(0.5*np.radians(tth))))
unconstrained_strains = np.log(
(unconstrained_a
/ calibration_config.material.lattice_parameters))
unconstrained_strain = np.mean(unconstrained_strains)
unconstrained_tth = tth * (1.0 + unconstrained_strain)
# Update tth for the next iteration of tuning
prev_tth = tth
tth = unconstrained_tth
# Stop tuning tth at this iteration if differences are
# small enough
if abs(tth - prev_tth) < calibration_config.tune_tth_tol:
break
# Fit line to expected / computed peak locations from the last
# unconstrained fit.
fit = Fit.fit_data(
fit_E0,
'linear',
x=unconstrained_fit_centers,
nan_policy='omit')
slope = fit.best_values['slope']
intercept = fit.best_values['intercept']
if interactive or save_figures:
fig, axs = plt.subplots(2, 2, sharex='all', figsize=(11, 8.5))
# Upper left axes: Input data & best fits
axs[0,0].set_title('Ceria Calibration Fits')
axs[0,0].set_xlabel('Energy (keV)')
axs[0,0].set_ylabel('Intensity (a.u)')
for i, hkl_E in enumerate(fit_E0):
# KLS: annotate indicated HKLs w millier indices
axs[0,0].axvline(hkl_E, color='k', linestyle='--')
axs[0,0].text(hkl_E, 1, str(fit_hkls[i])[1:-1],
ha='right', va='top', rotation=90,
transform=axs[0,0].get_xaxis_transform())
axs[0,0].plot(fit_mca_energies, uniform_fit.best_fit,
label='Single Strain')
axs[0,0].plot(fit_mca_energies, unconstrained_fit.best_fit,
label='Unconstrained')
#axs[0,0].plot(fit_mca_energies, MISSING?, label='least squares')
axs[0,0].plot(fit_mca_energies, fit_mca_intensities,
label='Flux-Corrected & Masked MCA Data')
axs[0,0].legend()
# Lower left axes: fit residuals
axs[1,0].set_title('Fit Residuals')
axs[1,0].set_xlabel('Energy (keV)')
axs[1,0].set_ylabel('Residual (a.u)')
axs[1,0].plot(fit_mca_energies,
uniform_fit.residual,
label='Single Strain')
axs[1,0].plot(fit_mca_energies,
unconstrained_fit.residual,
label='Unconstrained')
axs[1,0].legend()
# Upper right axes: E vs strain for each fit
axs[0,1].set_title('HKL Energy vs. Microstrain')
axs[0,1].set_xlabel('Energy (keV)')
axs[0,1].set_ylabel('Strain (\u03BC\u03B5)')
axs[0,1].axhline(uniform_strain * 1e6,
linestyle='--', label='Single Strain')
axs[0,1].plot(fit_E0, unconstrained_strains * 1e6,
color='C1', marker='s', label='Unconstrained')
axs[0,1].axhline(unconstrained_strain * 1e6,
color='C1', linestyle='--',
label='Unconstrained: Unweighted Mean')
axs[0,1].legend()
# Lower right axes: theoretical HKL E vs fit HKL E for
# each fit
axs[1,1].set_title('Theoretical vs. Fit HKL Energies')
axs[1,1].set_xlabel('Energy (keV)')
axs[1,1].set_ylabel('Energy (keV)')
axs[1,1].plot(fit_E0, uniform_fit_centers,
marker='o', label='Single Strain')
axs[1,1].plot(fit_E0, unconstrained_fit_centers,
linestyle='', marker='o', label='Unconstrained')
axs[1,1].plot(slope * unconstrained_fit_centers + intercept,fit_E0,
color='C1', label='Unconstrained: Linear Fit')
axs[1,1].legend()
fig.tight_layout()
if save_figures:
figfile = os.path.join(outputdir, 'ceria_calibration_fits.png')
plt.savefig(figfile)
self.logger.info(f'Saved figure to {figfile}')
if interactive:
plt.show()
return float(tth), float(slope), float(intercept)
class MCADataProcessor(Processor):
"""A Processor to return data from an MCA, restuctured to
incorporate the shape & metadata associated with a map
configuration to which the MCA data belongs, and linearly
transformed according to the results of a ceria calibration.
"""
def process(self, data):
"""Process configurations for a map and MCA detector(s), and
return the calibrated MCA data collected over the map.
:param data: input map configuration and results of ceria
calibration
:type data: list[dict[str,object]]
:return: calibrated and flux-corrected MCA data
:rtype: nexusformat.nexus.NXentry
"""
map_config = self.get_config(
data, 'common.models.map.MapConfig')
calibration_config = self.get_config(
data, 'edd.models.MCACeriaCalibrationConfig')
nxroot = self.get_nxroot(map_config, calibration_config)
return nxroot
def get_nxroot(self, map_config, calibration_config):
"""Get a map of the MCA data collected by the scans in
`map_config`. The MCA data will be calibrated and
flux-corrected according to the parameters included in
`calibration_config`. The data will be returned along with
relevant metadata in the form of a NeXus structure.
:param map_config: the map configuration
:type map_config: MapConfig
:param calibration_config: the calibration configuration
:type calibration_config: MCACeriaCalibrationConfig
:return: a map of the calibrated and flux-corrected MCA data
:rtype: nexusformat.nexus.NXroot
"""
# third party modules
from nexusformat.nexus import (NXdata,
NXdetector,
NXinstrument,
NXroot)
# local modules
from CHAP.common import MapProcessor
nxroot = NXroot()
nxroot[map_config.title] = MapProcessor.get_nxentry(map_config)
nxentry = nxroot[map_config.title]
for detector in calibration_config.detectors:
nxentry.instrument = NXinstrument()
nxentry.instrument[detector.detector_name] = NXdetector()
nxentry.instrument[detector.detector_name].calibration = dumps(
detector.dict())
nxentry.instrument[detector.detector_name].data = NXdata()
nxdata = nxentry.instrument[detector.detector_name].data
nxdata.raw = np.empty((*map_config.shape,
detector.num_bins))
nxdata.raw.attrs['units'] = 'counts'
nxdata.channel_energy = detector.slope_calibrated \
* np.arange(0, detector.num_bins) \
* (detector.max_energy_kev / detector.num_bins) \
+ detector.intercept_calibrated
nxdata.channel_energy.attrs['units'] = 'keV'
for map_index in np.ndindex(map_config.shape):
scans, scan_number, scan_step_index = \
map_config.get_scan_step_index(map_index)
scanparser = scans.get_scanparser(scan_number)
nxdata.raw[map_index] = scanparser.get_detector_data(
calibration_config.detector_name,
scan_step_index)
nxentry.data.makelink(nxdata.raw, name=detector.detector_name)
nxentry.data.makelink(
nxdata.channel_energy,
name=f'{detector.detector_name}_channel_energy')
if isinstance(nxentry.data.attrs['axes'], str):
nxentry.data.attrs['axes'] = [
nxentry.data.attrs['axes'],
f'{detector.detector_name}_channel_energy']
else:
nxentry.data.attrs['axes'] += [
f'{detector.detector_name}_channel_energy']
nxentry.data.attrs['signal'] = detector.detector_name
return nxroot
class StrainAnalysisProcessor(Processor):
"""Processor that takes a map of MCA data and returns a map of
sample strains
"""
def process(self,
data,
config=None,
save_figures=False,
outputdir='.',
inputdir='.',
interactive=False):
"""Return strain analysis maps & associated metadata in an NXprocess.
:param data: input data containing configurations for a map,
completed ceria calibration, and parameters for strain
analysis
:type data: list[PipelineData]
:param config: initialization parameters for an instance of
CHAP.edd.models.StrainAnalysisConfig, defaults to
None
:type config: dict, optional
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param inputdir: input directory, used only if files in the
input configuration are not absolute paths.
be saved, defaults to '.'
:type inputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: NXprocess containing metadata about strain analysis
processing parameters and empty datasets for strain maps
to be filled in later.
:rtype: nexusformat.nexus.NXprocess
"""
# Get required configuration models from input data
# map_config = self.get_config(
# data, 'common.models.map.MapConfig')
ceria_calibration_config = self.get_config(
data, 'edd.models.MCACeriaCalibrationConfig', inputdir=inputdir)
try:
strain_analysis_config = self.get_config(
data, 'edd.models.StrainAnalysisConfig')
except Exception as data_exc:
self.logger.info('No valid strain analysis config in input '
+ 'pipeline data, using config parameter instead')
from CHAP.edd.models import StrainAnalysisConfig
try:
strain_analysis_config = StrainAnalysisConfig(
**config, inputdir=inputdir)
except Exception as dict_exc:
raise RuntimeError from dict_exc
nxroot = self.get_nxroot(
#map_config,
strain_analysis_config.map_config,
ceria_calibration_config,
strain_analysis_config,
save_figures=save_figures,
outputdir=outputdir,
interactive=interactive)
self.logger.debug(nxroot.tree)
return nxroot
def get_nxroot(self,
map_config,
ceria_calibration_config,
strain_analysis_config,
save_figures=False,
outputdir='.',
interactive=False):
"""Return NXroot containing strain maps.
:param map_config: Input map configuration
:type map_config: CHAP.common.models.map.MapConfig
:param ceria_calibration_config: Results of ceria calibration
:type ceria_calibration_config:
'CHAP.edd.models.MCACeriaCalibrationConfig'
:param strain_analysis_config: Strain analysis processing
configuration
:type strain_analysis_config: CHAP.edd.models.StrainAnalysisConfig
:param save_figures: save .pngs of plots for checking inputs &
outputs of this Processor, defaults to False
:type save_figures: bool, optional
:param outputdir: directory to which any output figures will
be saved, defaults to '.'
:type outputdir: str, optional
:param interactive: allow for user interactions, defaults to
False
:type interactive: bool, optional
:return: NXroot containing strain maps
:rtype: nexusformat.nexus.NXroot
"""
from nexusformat.nexus import (NXcollection,
NXdata,
NXdetector,
NXfield,
NXparameters,
NXprocess,
NXroot)
import numpy as np
from CHAP.common import MapProcessor
from CHAP.edd.utils import hc
from CHAP.utils.fit import FitMap
for detector in strain_analysis_config.detectors:
calibration = [
d for d in ceria_calibration_config.detectors \
if d.detector_name == detector.detector_name][0]
detector.add_calibration(calibration)
nxroot = NXroot()
nxroot[map_config.title] = MapProcessor.get_nxentry(map_config)
nxentry = nxroot[map_config.title]
nxroot[f'{map_config.title}_strainanalysis'] = NXprocess()
nxprocess = nxroot[f'{map_config.title}_strainanalysis']
nxprocess.strain_analysis_config = dumps(strain_analysis_config.dict())
# Setup plottable data group
nxprocess.data = NXdata()
nxprocess.default = 'data'
nxdata = nxprocess.data
nxdata.attrs['axes'] = map_config.dims
def linkdims(nxgroup):
for dim in map_config.dims:
nxgroup.makelink(nxentry.data[dim])
nxgroup.attrs[f'{dim}_indices'] = \
nxentry.data.attrs[f'{dim}_indices']
linkdims(nxdata)
# Select interactive params / save figures
if save_figures or interactive:
import matplotlib.pyplot as plt
from CHAP.edd.utils import select_hkls
from CHAP.utils.general import draw_mask_1d
for detector in strain_analysis_config.detectors:
x = np.linspace(detector.intercept_calibrated,
detector.max_energy_kev \
* detector.slope_calibrated,
detector.num_bins)
y = strain_analysis_config.mca_data(
detector,
(0,) * len(strain_analysis_config.map_config.shape))
fig = select_hkls(detector,
strain_analysis_config.materials,
detector.tth_calibrated,
y, x, interactive)
if save_figures:
fig.savefig(os.path.join(
outputdir,
f'{detector.detector_name}_strainanalysis_hkls.png'))
plt.close()
if interactive:
self.logger.info(
'Interactively select a mask in the matplotlib figure')
mask, include_bin_ranges, figure = draw_mask_1d(
y, xdata=x,
current_index_ranges=detector.include_bin_ranges,
label='reference spectrum',
title='Click and drag to select ranges of MCA data to\n'
+ 'include when analyzing strain.',
xlabel='MCA channel (index)',
ylabel='MCA intensity (counts)',
test_mode=not interactive,
return_figure=True
)
detector.include_bin_ranges = include_bin_ranges
if save_figures:
figure.savefig(os.path.join(
outputdir,
f'{detector.detector_name}_strainanalysis_mask.png'))
plt.close()
if interactive:
from CHAP.edd.utils import select_material_params
x = np.linspace(
strain_analysis_config.detectors[0].intercept_calibrated,
detector.max_energy_kev \
* detector.slope_calibrated,
detector.num_bins)
y = strain_analysis_config.mca_data(
strain_analysis_config.detectors[0],
(0,) * len(strain_analysis_config.map_config.shape))
tth = strain_analysis_config.detectors[0].tth_calibrated
strain_analysis_config.materials = select_material_params(
x, y, tth, materials=strain_analysis_config.materials)
for detector in strain_analysis_config.detectors:
# Setup NXdata group
self.logger.debug(
f'Setting up NXdata group for {detector.detector_name}')
nxprocess[detector.detector_name] = NXdetector()
nxdetector = nxprocess[detector.detector_name]
nxdetector.local_name = detector.detector_name
nxdetector.detector_config = dumps(detector.dict())
nxdetector.data = NXdata()
det_nxdata = nxdetector.data
det_nxdata.attrs['axes'] = map_config.dims + ['energy']
linkdims(det_nxdata)
all_energies = np.arange(0, detector.num_bins) \
* (detector.max_energy_kev / detector.num_bins) \
* detector.slope_calibrated \
+ detector.intercept_calibrated
mask = detector.mca_mask()
energies = all_energies[mask]
det_nxdata.energy = NXfield(value=energies,
attrs={'units': 'keV'})
det_nxdata.attrs['energy_indices'] = len(map_config.dims)
det_nxdata.intensity = NXfield(
dtype='uint16',
shape=(*map_config.shape, len(energies)),
attrs={'units': 'counts'})
det_nxdata.microstrain = NXfield(
dtype='float64',
shape=map_config.shape,
attrs={'long_name': 'Strain (\u03BC\u03B5)'})
# Gather detector data
self.logger.debug(
f'Gathering detector data for {detector.detector_name}')
for map_index in np.ndindex(map_config.shape):
try:
scans, scan_number, scan_step_index = \
map_config.get_scan_step_index(map_index)
except:
continue
scanparser = scans.get_scanparser(scan_number)
intensity = scanparser.get_detector_data(
detector.detector_name, scan_step_index)\
.astype('uint16')[mask]
det_nxdata.intensity[map_index] = intensity
det_nxdata.summed_intensity = det_nxdata.intensity.sum(axis=-1)
# Perform strain analysis
self.logger.debug(
f'Beginning strain analysis for {detector.detector_name}')
fit_hkls, fit_ds = detector.fit_ds(
strain_analysis_config.materials)
peak_locations = hc / (
2. * fit_ds * np.sin(0.5*np.radians(detector.tth_calibrated)))
# KLS: Use the below def of peak_locations when
# FitMap.create_multipeak_model can accept a list of maps
# for centers.
# tth = np.radians(detector.map_tth(map_config))
# peak_locations = [hc / (2. * d0 * np.sin(0.5*tth)) \
# for d0 in fit_ds]
# Perform initial fit: assume uniform strain for all HKLs
self.logger.debug('Performing uniform fit')
fit = FitMap(det_nxdata.intensity.nxdata, x=energies)
fit.create_multipeak_model(
peak_locations,
fit_type='uniform',
peak_models=detector.peak_models,
background=detector.background)
fit.fit()
uniform_fit_centers = [
fit.best_values[
fit.best_parameters().index(f'peak{i+1}_center')]
for i in range(len(peak_locations))]
uniform_fit_errors = [
fit.best_errors[
fit.best_parameters().index(f'peak{i+1}_center')]
for i in range(len(peak_locations))]
# Add uniform fit results to the NeXus structure
nxdetector.uniform_fit = NXcollection()
fit_nxgroup = nxdetector.uniform_fit
# Full map of results
fit_nxgroup.results = NXdata()
fit_nxdata = fit_nxgroup.results
fit_nxdata.attrs['axes'] = map_config.dims + ['energy']
linkdims(fit_nxdata)
fit_nxdata.makelink(det_nxdata.energy)
fit_nxdata.attrs['energy_indices'] = len(map_config.dims)
for d in fit.best_results:
if d.endswith('_fit'):
fit_nxdata.fits = fit.best_results[d]
fit_nxdata.residuals = fit.residual
# Peak-by-peak results
fit_nxgroup.fit_hkl_centers = NXdata()
fit_nxdata = fit_nxgroup.fit_hkl_centers
fit_nxdata.attrs['axes'] = map_config.dims
linkdims(fit_nxdata)
for hkl, center_guessed, centers_fit, centers_errors in \
zip(fit_hkls, peak_locations,
uniform_fit_centers, uniform_fit_errors):
hkl_name = '_'.join(str(hkl)[1:-1].split(' '))
fit_nxgroup[hkl_name] = NXparameters()
fit_nxgroup[hkl_name].initial_guess = center_guessed
fit_nxgroup[hkl_name].initial_guess.attrs['units'] = 'keV'
fit_nxgroup[hkl_name].centers = NXdata()
fit_nxgroup[hkl_name].centers.attrs['axes'] = map_config.dims
linkdims(fit_nxgroup[hkl_name].centers)
fit_nxgroup[hkl_name].centers.values = NXfield(
value=centers_fit, attrs={'units': 'keV'})
fit_nxgroup[hkl_name].centers.errors = NXfield(
value=centers_errors)
fit_nxdata.makelink(fit_nxgroup[f'{hkl_name}/centers/values'],
name=hkl_name)
# Perform second fit: do not assume uniform strain for all
# HKLs, and use the fit peak centers from the uniform fit
# as inital guesses
self.logger.debug('Performing unconstrained fit')
fit.create_multipeak_model(fit_type='unconstrained')
fit.fit()
unconstrained_fit_centers = np.array(
[fit.best_values[
fit.best_parameters()\
.index(f'peak{i+1}_center')]
for i in range(len(peak_locations))])
unconstrained_fit_errors = np.array(
[fit.best_errors[
fit.best_parameters()\
.index(f'peak{i+1}_center')]
for i in range(len(peak_locations))])
unconstrained_strains = np.empty_like(unconstrained_fit_centers)
for i, peak_loc in enumerate(peak_locations):
unconstrained_strains[i] = np.log(
peak_loc / unconstrained_fit_centers[i])
unconstrained_strain = np.mean(unconstrained_strains, axis=0)
det_nxdata.microstrain.nxdata = unconstrained_strain * 1e6
# Add unconstrained fit results to the NeXus structure
nxdetector.unconstrained_fit = NXcollection()
fit_nxgroup = nxdetector.unconstrained_fit
# Full map of results
fit_nxgroup.data = NXdata()
fit_nxdata = fit_nxgroup.data
fit_nxdata.attrs['axes'] = map_config.dims + ['energy']
linkdims(fit_nxdata)
fit_nxdata.makelink(det_nxdata.energy)
fit_nxdata.attrs['energy_indices'] = len(map_config.dims)
for d in fit.best_results:
if d.endswith('_fit'):
fit_nxdata.fits = fit.best_results[d]
fit_nxdata.residuals = fit.residual
# Peak-by-peak results
fit_nxgroup.fit_hkl_centers = NXdata()
fit_nxdata = fit_nxgroup.fit_hkl_centers
fit_nxdata.attrs['axes'] = map_config.dims
linkdims(fit_nxdata)
for (hkl, unconstrained_center_guesses,
centers_fit, centers_errors) in \
zip(fit_hkls, peak_locations,
unconstrained_fit_centers, unconstrained_fit_errors):
hkl_name = '_'.join(str(hkl)[1:-1].split(' '))
fit_nxgroup[hkl_name] = NXparameters()
fit_nxgroup[hkl_name].initial_guess = center_guessed
fit_nxgroup[hkl_name].initial_guess.attrs['units'] = 'keV'
fit_nxgroup[hkl_name].centers = NXdata()
fit_nxgroup[hkl_name].centers.attrs['axes'] = map_config.dims
linkdims(fit_nxgroup[hkl_name].centers)
fit_nxgroup[hkl_name].centers.values = NXfield(
value=centers_fit, attrs={'units': 'keV'})
fit_nxgroup[hkl_name].centers.errors = NXfield(
value=centers_errors)
fit_nxdata.makelink(fit_nxgroup[f'{hkl_name}/centers/values'],
name=hkl_name)
return nxroot
if __name__ == '__main__':
# local modules
from CHAP.processor import main
main() | PypiClean |
/slixmppfix-1.4.3.tar.gz/slixmppfix-1.4.3/slixmpp/plugins/xep_0077/register.py | import logging
import ssl
from slixmpp.stanza import StreamFeatures, Iq
from slixmpp.xmlstream import register_stanza_plugin, JID
from slixmpp.plugins import BasePlugin
from slixmpp.plugins.xep_0077 import stanza, Register, RegisterFeature
log = logging.getLogger(__name__)
class XEP_0077(BasePlugin):
"""
XEP-0077: In-Band Registration
"""
name = 'xep_0077'
description = 'XEP-0077: In-Band Registration'
dependencies = {'xep_0004', 'xep_0066'}
stanza = stanza
default_config = {
'create_account': True,
'force_registration': False,
'order': 50
}
def plugin_init(self):
register_stanza_plugin(StreamFeatures, RegisterFeature)
register_stanza_plugin(Iq, Register)
if not self.xmpp.is_component:
self.xmpp.register_feature('register',
self._handle_register_feature,
restart=False,
order=self.order)
register_stanza_plugin(Register, self.xmpp['xep_0004'].stanza.Form)
register_stanza_plugin(Register, self.xmpp['xep_0066'].stanza.OOB)
self.xmpp.add_event_handler('connected', self._force_registration)
def plugin_end(self):
if not self.xmpp.is_component:
self.xmpp.unregister_feature('register', self.order)
def _force_registration(self, event):
if self.force_registration:
self.xmpp.add_filter('in', self._force_stream_feature)
def _force_stream_feature(self, stanza):
if isinstance(stanza, StreamFeatures):
if not self.xmpp.disable_starttls:
if 'starttls' not in self.xmpp.features:
return stanza
elif not isinstance(self.xmpp.socket, ssl.SSLSocket):
return stanza
if 'mechanisms' not in self.xmpp.features:
log.debug('Forced adding in-band registration stream feature')
stanza.enable('register')
self.xmpp.del_filter('in', self._force_stream_feature)
return stanza
def _handle_register_feature(self, features):
if 'mechanisms' in self.xmpp.features:
# We have already logged in with an account
return False
if self.create_account and self.xmpp.event_handled('register'):
form = self.get_registration()
self.xmpp.event('register', form)
return True
return False
def get_registration(self, jid=None, ifrom=None,
timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'get'
iq['to'] = jid
iq['from'] = ifrom
iq.enable('register')
return iq.send(timeout=timeout, callback=callback)
def cancel_registration(self, jid=None, ifrom=None,
timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
iq['from'] = ifrom
iq['register']['remove'] = True
return iq.send(timeout=timeout, callback=callback)
def change_password(self, password, jid=None, ifrom=None,
timeout=None, callback=None):
iq = self.xmpp.Iq()
iq['type'] = 'set'
iq['to'] = jid
iq['from'] = ifrom
if self.xmpp.is_component:
ifrom = JID(ifrom)
iq['register']['username'] = ifrom.user
else:
iq['register']['username'] = self.xmpp.boundjid.user
iq['register']['password'] = password
return iq.send(timeout=timeout, callback=callback) | PypiClean |
/boot-synth-1.2.0.tar.gz/boot-synth-1.2.0/synth/projects_master/nginx_router/frontend/react/node_modules/@jest/source-map/node_modules/source-map/dist/source-map.min.js | !function(e,n){"object"==typeof exports&&"object"==typeof module?module.exports=n():"function"==typeof define&&define.amd?define([],n):"object"==typeof exports?exports.sourceMap=n():e.sourceMap=n()}(this,function(){return function(e){function n(t){if(r[t])return r[t].exports;var o=r[t]={exports:{},id:t,loaded:!1};return e[t].call(o.exports,o,o.exports,n),o.loaded=!0,o.exports}var r={};return n.m=e,n.c=r,n.p="",n(0)}([function(e,n,r){n.SourceMapGenerator=r(1).SourceMapGenerator,n.SourceMapConsumer=r(7).SourceMapConsumer,n.SourceNode=r(10).SourceNode},function(e,n,r){function t(e){e||(e={}),this._file=i.getArg(e,"file",null),this._sourceRoot=i.getArg(e,"sourceRoot",null),this._skipValidation=i.getArg(e,"skipValidation",!1),this._sources=new s,this._names=new s,this._mappings=new a,this._sourcesContents=null}var o=r(2),i=r(4),s=r(5).ArraySet,a=r(6).MappingList;t.prototype._version=3,t.fromSourceMap=function(e){var n=e.sourceRoot,r=new t({file:e.file,sourceRoot:n});return e.eachMapping(function(e){var t={generated:{line:e.generatedLine,column:e.generatedColumn}};null!=e.source&&(t.source=e.source,null!=n&&(t.source=i.relative(n,t.source)),t.original={line:e.originalLine,column:e.originalColumn},null!=e.name&&(t.name=e.name)),r.addMapping(t)}),e.sources.forEach(function(t){var o=t;null!==n&&(o=i.relative(n,t)),r._sources.has(o)||r._sources.add(o);var s=e.sourceContentFor(t);null!=s&&r.setSourceContent(t,s)}),r},t.prototype.addMapping=function(e){var n=i.getArg(e,"generated"),r=i.getArg(e,"original",null),t=i.getArg(e,"source",null),o=i.getArg(e,"name",null);this._skipValidation||this._validateMapping(n,r,t,o),null!=t&&(t=String(t),this._sources.has(t)||this._sources.add(t)),null!=o&&(o=String(o),this._names.has(o)||this._names.add(o)),this._mappings.add({generatedLine:n.line,generatedColumn:n.column,originalLine:null!=r&&r.line,originalColumn:null!=r&&r.column,source:t,name:o})},t.prototype.setSourceContent=function(e,n){var r=e;null!=this._sourceRoot&&(r=i.relative(this._sourceRoot,r)),null!=n?(this._sourcesContents||(this._sourcesContents=Object.create(null)),this._sourcesContents[i.toSetString(r)]=n):this._sourcesContents&&(delete this._sourcesContents[i.toSetString(r)],0===Object.keys(this._sourcesContents).length&&(this._sourcesContents=null))},t.prototype.applySourceMap=function(e,n,r){var t=n;if(null==n){if(null==e.file)throw new Error('SourceMapGenerator.prototype.applySourceMap requires either an explicit source file, or the source map\'s "file" property. Both were omitted.');t=e.file}var o=this._sourceRoot;null!=o&&(t=i.relative(o,t));var a=new s,u=new s;this._mappings.unsortedForEach(function(n){if(n.source===t&&null!=n.originalLine){var s=e.originalPositionFor({line:n.originalLine,column:n.originalColumn});null!=s.source&&(n.source=s.source,null!=r&&(n.source=i.join(r,n.source)),null!=o&&(n.source=i.relative(o,n.source)),n.originalLine=s.line,n.originalColumn=s.column,null!=s.name&&(n.name=s.name))}var l=n.source;null==l||a.has(l)||a.add(l);var c=n.name;null==c||u.has(c)||u.add(c)},this),this._sources=a,this._names=u,e.sources.forEach(function(n){var t=e.sourceContentFor(n);null!=t&&(null!=r&&(n=i.join(r,n)),null!=o&&(n=i.relative(o,n)),this.setSourceContent(n,t))},this)},t.prototype._validateMapping=function(e,n,r,t){if(n&&"number"!=typeof n.line&&"number"!=typeof n.column)throw new Error("original.line and original.column are not numbers -- you probably meant to omit the original mapping entirely and only map the generated position. If so, pass null for the original mapping instead of an object with empty or null values.");if((!(e&&"line"in e&&"column"in e&&e.line>0&&e.column>=0)||n||r||t)&&!(e&&"line"in e&&"column"in e&&n&&"line"in n&&"column"in n&&e.line>0&&e.column>=0&&n.line>0&&n.column>=0&&r))throw new Error("Invalid mapping: "+JSON.stringify({generated:e,source:r,original:n,name:t}))},t.prototype._serializeMappings=function(){for(var e,n,r,t,s=0,a=1,u=0,l=0,c=0,g=0,p="",h=this._mappings.toArray(),f=0,d=h.length;f<d;f++){if(n=h[f],e="",n.generatedLine!==a)for(s=0;n.generatedLine!==a;)e+=";",a++;else if(f>0){if(!i.compareByGeneratedPositionsInflated(n,h[f-1]))continue;e+=","}e+=o.encode(n.generatedColumn-s),s=n.generatedColumn,null!=n.source&&(t=this._sources.indexOf(n.source),e+=o.encode(t-g),g=t,e+=o.encode(n.originalLine-1-l),l=n.originalLine-1,e+=o.encode(n.originalColumn-u),u=n.originalColumn,null!=n.name&&(r=this._names.indexOf(n.name),e+=o.encode(r-c),c=r)),p+=e}return p},t.prototype._generateSourcesContent=function(e,n){return e.map(function(e){if(!this._sourcesContents)return null;null!=n&&(e=i.relative(n,e));var r=i.toSetString(e);return Object.prototype.hasOwnProperty.call(this._sourcesContents,r)?this._sourcesContents[r]:null},this)},t.prototype.toJSON=function(){var e={version:this._version,sources:this._sources.toArray(),names:this._names.toArray(),mappings:this._serializeMappings()};return null!=this._file&&(e.file=this._file),null!=this._sourceRoot&&(e.sourceRoot=this._sourceRoot),this._sourcesContents&&(e.sourcesContent=this._generateSourcesContent(e.sources,e.sourceRoot)),e},t.prototype.toString=function(){return JSON.stringify(this.toJSON())},n.SourceMapGenerator=t},function(e,n,r){function t(e){return e<0?(-e<<1)+1:(e<<1)+0}function o(e){var n=1===(1&e),r=e>>1;return n?-r:r}var i=r(3),s=5,a=1<<s,u=a-1,l=a;n.encode=function(e){var n,r="",o=t(e);do n=o&u,o>>>=s,o>0&&(n|=l),r+=i.encode(n);while(o>0);return r},n.decode=function(e,n,r){var t,a,c=e.length,g=0,p=0;do{if(n>=c)throw new Error("Expected more digits in base 64 VLQ value.");if(a=i.decode(e.charCodeAt(n++)),a===-1)throw new Error("Invalid base64 digit: "+e.charAt(n-1));t=!!(a&l),a&=u,g+=a<<p,p+=s}while(t);r.value=o(g),r.rest=n}},function(e,n){var r="ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/".split("");n.encode=function(e){if(0<=e&&e<r.length)return r[e];throw new TypeError("Must be between 0 and 63: "+e)},n.decode=function(e){var n=65,r=90,t=97,o=122,i=48,s=57,a=43,u=47,l=26,c=52;return n<=e&&e<=r?e-n:t<=e&&e<=o?e-t+l:i<=e&&e<=s?e-i+c:e==a?62:e==u?63:-1}},function(e,n){function r(e,n,r){if(n in e)return e[n];if(3===arguments.length)return r;throw new Error('"'+n+'" is a required argument.')}function t(e){var n=e.match(v);return n?{scheme:n[1],auth:n[2],host:n[3],port:n[4],path:n[5]}:null}function o(e){var n="";return e.scheme&&(n+=e.scheme+":"),n+="//",e.auth&&(n+=e.auth+"@"),e.host&&(n+=e.host),e.port&&(n+=":"+e.port),e.path&&(n+=e.path),n}function i(e){var r=e,i=t(e);if(i){if(!i.path)return e;r=i.path}for(var s,a=n.isAbsolute(r),u=r.split(/\/+/),l=0,c=u.length-1;c>=0;c--)s=u[c],"."===s?u.splice(c,1):".."===s?l++:l>0&&(""===s?(u.splice(c+1,l),l=0):(u.splice(c,2),l--));return r=u.join("/"),""===r&&(r=a?"/":"."),i?(i.path=r,o(i)):r}function s(e,n){""===e&&(e="."),""===n&&(n=".");var r=t(n),s=t(e);if(s&&(e=s.path||"/"),r&&!r.scheme)return s&&(r.scheme=s.scheme),o(r);if(r||n.match(y))return n;if(s&&!s.host&&!s.path)return s.host=n,o(s);var a="/"===n.charAt(0)?n:i(e.replace(/\/+$/,"")+"/"+n);return s?(s.path=a,o(s)):a}function a(e,n){""===e&&(e="."),e=e.replace(/\/$/,"");for(var r=0;0!==n.indexOf(e+"/");){var t=e.lastIndexOf("/");if(t<0)return n;if(e=e.slice(0,t),e.match(/^([^\/]+:\/)?\/*$/))return n;++r}return Array(r+1).join("../")+n.substr(e.length+1)}function u(e){return e}function l(e){return g(e)?"$"+e:e}function c(e){return g(e)?e.slice(1):e}function g(e){if(!e)return!1;var n=e.length;if(n<9)return!1;if(95!==e.charCodeAt(n-1)||95!==e.charCodeAt(n-2)||111!==e.charCodeAt(n-3)||116!==e.charCodeAt(n-4)||111!==e.charCodeAt(n-5)||114!==e.charCodeAt(n-6)||112!==e.charCodeAt(n-7)||95!==e.charCodeAt(n-8)||95!==e.charCodeAt(n-9))return!1;for(var r=n-10;r>=0;r--)if(36!==e.charCodeAt(r))return!1;return!0}function p(e,n,r){var t=f(e.source,n.source);return 0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t||r?t:(t=e.generatedColumn-n.generatedColumn,0!==t?t:(t=e.generatedLine-n.generatedLine,0!==t?t:f(e.name,n.name)))))}function h(e,n,r){var t=e.generatedLine-n.generatedLine;return 0!==t?t:(t=e.generatedColumn-n.generatedColumn,0!==t||r?t:(t=f(e.source,n.source),0!==t?t:(t=e.originalLine-n.originalLine,0!==t?t:(t=e.originalColumn-n.originalColumn,0!==t?t:f(e.name,n.name)))))}function f(e,n){return e===n?0:null===e?1:null===n?-1:e>n?1:-1}function d(e,n){var r=e.generatedLine-n.generatedLine;return 0!==r?r:(r=e.generatedColumn-n.generatedColumn,0!==r?r:(r=f(e.source,n.source),0!==r?r:(r=e.originalLine-n.originalLine,0!==r?r:(r=e.originalColumn-n.originalColumn,0!==r?r:f(e.name,n.name)))))}function m(e){return JSON.parse(e.replace(/^\)]}'[^\n]*\n/,""))}function _(e,n,r){if(n=n||"",e&&("/"!==e[e.length-1]&&"/"!==n[0]&&(e+="/"),n=e+n),r){var a=t(r);if(!a)throw new Error("sourceMapURL could not be parsed");if(a.path){var u=a.path.lastIndexOf("/");u>=0&&(a.path=a.path.substring(0,u+1))}n=s(o(a),n)}return i(n)}n.getArg=r;var v=/^(?:([\w+\-.]+):)?\/\/(?:(\w+:\w+)@)?([\w.-]*)(?::(\d+))?(.*)$/,y=/^data:.+\,.+$/;n.urlParse=t,n.urlGenerate=o,n.normalize=i,n.join=s,n.isAbsolute=function(e){return"/"===e.charAt(0)||v.test(e)},n.relative=a;var C=function(){var e=Object.create(null);return!("__proto__"in e)}();n.toSetString=C?u:l,n.fromSetString=C?u:c,n.compareByOriginalPositions=p,n.compareByGeneratedPositionsDeflated=h,n.compareByGeneratedPositionsInflated=d,n.parseSourceMapInput=m,n.computeSourceURL=_},function(e,n,r){function t(){this._array=[],this._set=s?new Map:Object.create(null)}var o=r(4),i=Object.prototype.hasOwnProperty,s="undefined"!=typeof Map;t.fromArray=function(e,n){for(var r=new t,o=0,i=e.length;o<i;o++)r.add(e[o],n);return r},t.prototype.size=function(){return s?this._set.size:Object.getOwnPropertyNames(this._set).length},t.prototype.add=function(e,n){var r=s?e:o.toSetString(e),t=s?this.has(e):i.call(this._set,r),a=this._array.length;t&&!n||this._array.push(e),t||(s?this._set.set(e,a):this._set[r]=a)},t.prototype.has=function(e){if(s)return this._set.has(e);var n=o.toSetString(e);return i.call(this._set,n)},t.prototype.indexOf=function(e){if(s){var n=this._set.get(e);if(n>=0)return n}else{var r=o.toSetString(e);if(i.call(this._set,r))return this._set[r]}throw new Error('"'+e+'" is not in the set.')},t.prototype.at=function(e){if(e>=0&&e<this._array.length)return this._array[e];throw new Error("No element indexed by "+e)},t.prototype.toArray=function(){return this._array.slice()},n.ArraySet=t},function(e,n,r){function t(e,n){var r=e.generatedLine,t=n.generatedLine,o=e.generatedColumn,s=n.generatedColumn;return t>r||t==r&&s>=o||i.compareByGeneratedPositionsInflated(e,n)<=0}function o(){this._array=[],this._sorted=!0,this._last={generatedLine:-1,generatedColumn:0}}var i=r(4);o.prototype.unsortedForEach=function(e,n){this._array.forEach(e,n)},o.prototype.add=function(e){t(this._last,e)?(this._last=e,this._array.push(e)):(this._sorted=!1,this._array.push(e))},o.prototype.toArray=function(){return this._sorted||(this._array.sort(i.compareByGeneratedPositionsInflated),this._sorted=!0),this._array},n.MappingList=o},function(e,n,r){function t(e,n){var r=e;return"string"==typeof e&&(r=a.parseSourceMapInput(e)),null!=r.sections?new s(r,n):new o(r,n)}function o(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var t=a.getArg(r,"version"),o=a.getArg(r,"sources"),i=a.getArg(r,"names",[]),s=a.getArg(r,"sourceRoot",null),u=a.getArg(r,"sourcesContent",null),c=a.getArg(r,"mappings"),g=a.getArg(r,"file",null);if(t!=this._version)throw new Error("Unsupported version: "+t);s&&(s=a.normalize(s)),o=o.map(String).map(a.normalize).map(function(e){return s&&a.isAbsolute(s)&&a.isAbsolute(e)?a.relative(s,e):e}),this._names=l.fromArray(i.map(String),!0),this._sources=l.fromArray(o,!0),this._absoluteSources=this._sources.toArray().map(function(e){return a.computeSourceURL(s,e,n)}),this.sourceRoot=s,this.sourcesContent=u,this._mappings=c,this._sourceMapURL=n,this.file=g}function i(){this.generatedLine=0,this.generatedColumn=0,this.source=null,this.originalLine=null,this.originalColumn=null,this.name=null}function s(e,n){var r=e;"string"==typeof e&&(r=a.parseSourceMapInput(e));var o=a.getArg(r,"version"),i=a.getArg(r,"sections");if(o!=this._version)throw new Error("Unsupported version: "+o);this._sources=new l,this._names=new l;var s={line:-1,column:0};this._sections=i.map(function(e){if(e.url)throw new Error("Support for url field in sections not implemented.");var r=a.getArg(e,"offset"),o=a.getArg(r,"line"),i=a.getArg(r,"column");if(o<s.line||o===s.line&&i<s.column)throw new Error("Section offsets must be ordered and non-overlapping.");return s=r,{generatedOffset:{generatedLine:o+1,generatedColumn:i+1},consumer:new t(a.getArg(e,"map"),n)}})}var a=r(4),u=r(8),l=r(5).ArraySet,c=r(2),g=r(9).quickSort;t.fromSourceMap=function(e,n){return o.fromSourceMap(e,n)},t.prototype._version=3,t.prototype.__generatedMappings=null,Object.defineProperty(t.prototype,"_generatedMappings",{configurable:!0,enumerable:!0,get:function(){return this.__generatedMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__generatedMappings}}),t.prototype.__originalMappings=null,Object.defineProperty(t.prototype,"_originalMappings",{configurable:!0,enumerable:!0,get:function(){return this.__originalMappings||this._parseMappings(this._mappings,this.sourceRoot),this.__originalMappings}}),t.prototype._charIsMappingSeparator=function(e,n){var r=e.charAt(n);return";"===r||","===r},t.prototype._parseMappings=function(e,n){throw new Error("Subclasses must implement _parseMappings")},t.GENERATED_ORDER=1,t.ORIGINAL_ORDER=2,t.GREATEST_LOWER_BOUND=1,t.LEAST_UPPER_BOUND=2,t.prototype.eachMapping=function(e,n,r){var o,i=n||null,s=r||t.GENERATED_ORDER;switch(s){case t.GENERATED_ORDER:o=this._generatedMappings;break;case t.ORIGINAL_ORDER:o=this._originalMappings;break;default:throw new Error("Unknown order of iteration.")}var u=this.sourceRoot;o.map(function(e){var n=null===e.source?null:this._sources.at(e.source);return n=a.computeSourceURL(u,n,this._sourceMapURL),{source:n,generatedLine:e.generatedLine,generatedColumn:e.generatedColumn,originalLine:e.originalLine,originalColumn:e.originalColumn,name:null===e.name?null:this._names.at(e.name)}},this).forEach(e,i)},t.prototype.allGeneratedPositionsFor=function(e){var n=a.getArg(e,"line"),r={source:a.getArg(e,"source"),originalLine:n,originalColumn:a.getArg(e,"column",0)};if(r.source=this._findSourceIndex(r.source),r.source<0)return[];var t=[],o=this._findMapping(r,this._originalMappings,"originalLine","originalColumn",a.compareByOriginalPositions,u.LEAST_UPPER_BOUND);if(o>=0){var i=this._originalMappings[o];if(void 0===e.column)for(var s=i.originalLine;i&&i.originalLine===s;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o];else for(var l=i.originalColumn;i&&i.originalLine===n&&i.originalColumn==l;)t.push({line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}),i=this._originalMappings[++o]}return t},n.SourceMapConsumer=t,o.prototype=Object.create(t.prototype),o.prototype.consumer=t,o.prototype._findSourceIndex=function(e){var n=e;if(null!=this.sourceRoot&&(n=a.relative(this.sourceRoot,n)),this._sources.has(n))return this._sources.indexOf(n);var r;for(r=0;r<this._absoluteSources.length;++r)if(this._absoluteSources[r]==e)return r;return-1},o.fromSourceMap=function(e,n){var r=Object.create(o.prototype),t=r._names=l.fromArray(e._names.toArray(),!0),s=r._sources=l.fromArray(e._sources.toArray(),!0);r.sourceRoot=e._sourceRoot,r.sourcesContent=e._generateSourcesContent(r._sources.toArray(),r.sourceRoot),r.file=e._file,r._sourceMapURL=n,r._absoluteSources=r._sources.toArray().map(function(e){return a.computeSourceURL(r.sourceRoot,e,n)});for(var u=e._mappings.toArray().slice(),c=r.__generatedMappings=[],p=r.__originalMappings=[],h=0,f=u.length;h<f;h++){var d=u[h],m=new i;m.generatedLine=d.generatedLine,m.generatedColumn=d.generatedColumn,d.source&&(m.source=s.indexOf(d.source),m.originalLine=d.originalLine,m.originalColumn=d.originalColumn,d.name&&(m.name=t.indexOf(d.name)),p.push(m)),c.push(m)}return g(r.__originalMappings,a.compareByOriginalPositions),r},o.prototype._version=3,Object.defineProperty(o.prototype,"sources",{get:function(){return this._absoluteSources.slice()}}),o.prototype._parseMappings=function(e,n){for(var r,t,o,s,u,l=1,p=0,h=0,f=0,d=0,m=0,_=e.length,v=0,y={},C={},S=[],A=[];v<_;)if(";"===e.charAt(v))l++,v++,p=0;else if(","===e.charAt(v))v++;else{for(r=new i,r.generatedLine=l,s=v;s<_&&!this._charIsMappingSeparator(e,s);s++);if(t=e.slice(v,s),o=y[t])v+=t.length;else{for(o=[];v<s;)c.decode(e,v,C),u=C.value,v=C.rest,o.push(u);if(2===o.length)throw new Error("Found a source, but no line and column");if(3===o.length)throw new Error("Found a source and line, but no column");y[t]=o}r.generatedColumn=p+o[0],p=r.generatedColumn,o.length>1&&(r.source=d+o[1],d+=o[1],r.originalLine=h+o[2],h=r.originalLine,r.originalLine+=1,r.originalColumn=f+o[3],f=r.originalColumn,o.length>4&&(r.name=m+o[4],m+=o[4])),A.push(r),"number"==typeof r.originalLine&&S.push(r)}g(A,a.compareByGeneratedPositionsDeflated),this.__generatedMappings=A,g(S,a.compareByOriginalPositions),this.__originalMappings=S},o.prototype._findMapping=function(e,n,r,t,o,i){if(e[r]<=0)throw new TypeError("Line must be greater than or equal to 1, got "+e[r]);if(e[t]<0)throw new TypeError("Column must be greater than or equal to 0, got "+e[t]);return u.search(e,n,o,i)},o.prototype.computeColumnSpans=function(){for(var e=0;e<this._generatedMappings.length;++e){var n=this._generatedMappings[e];if(e+1<this._generatedMappings.length){var r=this._generatedMappings[e+1];if(n.generatedLine===r.generatedLine){n.lastGeneratedColumn=r.generatedColumn-1;continue}}n.lastGeneratedColumn=1/0}},o.prototype.originalPositionFor=function(e){var n={generatedLine:a.getArg(e,"line"),generatedColumn:a.getArg(e,"column")},r=this._findMapping(n,this._generatedMappings,"generatedLine","generatedColumn",a.compareByGeneratedPositionsDeflated,a.getArg(e,"bias",t.GREATEST_LOWER_BOUND));if(r>=0){var o=this._generatedMappings[r];if(o.generatedLine===n.generatedLine){var i=a.getArg(o,"source",null);null!==i&&(i=this._sources.at(i),i=a.computeSourceURL(this.sourceRoot,i,this._sourceMapURL));var s=a.getArg(o,"name",null);return null!==s&&(s=this._names.at(s)),{source:i,line:a.getArg(o,"originalLine",null),column:a.getArg(o,"originalColumn",null),name:s}}}return{source:null,line:null,column:null,name:null}},o.prototype.hasContentsOfAllSources=function(){return!!this.sourcesContent&&(this.sourcesContent.length>=this._sources.size()&&!this.sourcesContent.some(function(e){return null==e}))},o.prototype.sourceContentFor=function(e,n){if(!this.sourcesContent)return null;var r=this._findSourceIndex(e);if(r>=0)return this.sourcesContent[r];var t=e;null!=this.sourceRoot&&(t=a.relative(this.sourceRoot,t));var o;if(null!=this.sourceRoot&&(o=a.urlParse(this.sourceRoot))){var i=t.replace(/^file:\/\//,"");if("file"==o.scheme&&this._sources.has(i))return this.sourcesContent[this._sources.indexOf(i)];if((!o.path||"/"==o.path)&&this._sources.has("/"+t))return this.sourcesContent[this._sources.indexOf("/"+t)]}if(n)return null;throw new Error('"'+t+'" is not in the SourceMap.')},o.prototype.generatedPositionFor=function(e){var n=a.getArg(e,"source");if(n=this._findSourceIndex(n),n<0)return{line:null,column:null,lastColumn:null};var r={source:n,originalLine:a.getArg(e,"line"),originalColumn:a.getArg(e,"column")},o=this._findMapping(r,this._originalMappings,"originalLine","originalColumn",a.compareByOriginalPositions,a.getArg(e,"bias",t.GREATEST_LOWER_BOUND));if(o>=0){var i=this._originalMappings[o];if(i.source===r.source)return{line:a.getArg(i,"generatedLine",null),column:a.getArg(i,"generatedColumn",null),lastColumn:a.getArg(i,"lastGeneratedColumn",null)}}return{line:null,column:null,lastColumn:null}},n.BasicSourceMapConsumer=o,s.prototype=Object.create(t.prototype),s.prototype.constructor=t,s.prototype._version=3,Object.defineProperty(s.prototype,"sources",{get:function(){for(var e=[],n=0;n<this._sections.length;n++)for(var r=0;r<this._sections[n].consumer.sources.length;r++)e.push(this._sections[n].consumer.sources[r]);return e}}),s.prototype.originalPositionFor=function(e){var n={generatedLine:a.getArg(e,"line"),generatedColumn:a.getArg(e,"column")},r=u.search(n,this._sections,function(e,n){var r=e.generatedLine-n.generatedOffset.generatedLine;return r?r:e.generatedColumn-n.generatedOffset.generatedColumn}),t=this._sections[r];return t?t.consumer.originalPositionFor({line:n.generatedLine-(t.generatedOffset.generatedLine-1),column:n.generatedColumn-(t.generatedOffset.generatedLine===n.generatedLine?t.generatedOffset.generatedColumn-1:0),bias:e.bias}):{source:null,line:null,column:null,name:null}},s.prototype.hasContentsOfAllSources=function(){return this._sections.every(function(e){return e.consumer.hasContentsOfAllSources()})},s.prototype.sourceContentFor=function(e,n){for(var r=0;r<this._sections.length;r++){var t=this._sections[r],o=t.consumer.sourceContentFor(e,!0);if(o)return o}if(n)return null;throw new Error('"'+e+'" is not in the SourceMap.')},s.prototype.generatedPositionFor=function(e){for(var n=0;n<this._sections.length;n++){var r=this._sections[n];if(r.consumer._findSourceIndex(a.getArg(e,"source"))!==-1){var t=r.consumer.generatedPositionFor(e);if(t){var o={line:t.line+(r.generatedOffset.generatedLine-1),column:t.column+(r.generatedOffset.generatedLine===t.line?r.generatedOffset.generatedColumn-1:0)};return o}}}return{line:null,column:null}},s.prototype._parseMappings=function(e,n){this.__generatedMappings=[],this.__originalMappings=[];for(var r=0;r<this._sections.length;r++)for(var t=this._sections[r],o=t.consumer._generatedMappings,i=0;i<o.length;i++){var s=o[i],u=t.consumer._sources.at(s.source);u=a.computeSourceURL(t.consumer.sourceRoot,u,this._sourceMapURL),this._sources.add(u),u=this._sources.indexOf(u);var l=null;s.name&&(l=t.consumer._names.at(s.name),this._names.add(l),l=this._names.indexOf(l));var c={source:u,generatedLine:s.generatedLine+(t.generatedOffset.generatedLine-1),generatedColumn:s.generatedColumn+(t.generatedOffset.generatedLine===s.generatedLine?t.generatedOffset.generatedColumn-1:0),originalLine:s.originalLine,originalColumn:s.originalColumn,name:l};this.__generatedMappings.push(c),"number"==typeof c.originalLine&&this.__originalMappings.push(c)}g(this.__generatedMappings,a.compareByGeneratedPositionsDeflated),g(this.__originalMappings,a.compareByOriginalPositions)},n.IndexedSourceMapConsumer=s},function(e,n){function r(e,t,o,i,s,a){var u=Math.floor((t-e)/2)+e,l=s(o,i[u],!0);return 0===l?u:l>0?t-u>1?r(u,t,o,i,s,a):a==n.LEAST_UPPER_BOUND?t<i.length?t:-1:u:u-e>1?r(e,u,o,i,s,a):a==n.LEAST_UPPER_BOUND?u:e<0?-1:e}n.GREATEST_LOWER_BOUND=1,n.LEAST_UPPER_BOUND=2,n.search=function(e,t,o,i){if(0===t.length)return-1;var s=r(-1,t.length,e,t,o,i||n.GREATEST_LOWER_BOUND);if(s<0)return-1;for(;s-1>=0&&0===o(t[s],t[s-1],!0);)--s;return s}},function(e,n){function r(e,n,r){var t=e[n];e[n]=e[r],e[r]=t}function t(e,n){return Math.round(e+Math.random()*(n-e))}function o(e,n,i,s){if(i<s){var a=t(i,s),u=i-1;r(e,a,s);for(var l=e[s],c=i;c<s;c++)n(e[c],l)<=0&&(u+=1,r(e,u,c));r(e,u+1,c);var g=u+1;o(e,n,i,g-1),o(e,n,g+1,s)}}n.quickSort=function(e,n){o(e,n,0,e.length-1)}},function(e,n,r){function t(e,n,r,t,o){this.children=[],this.sourceContents={},this.line=null==e?null:e,this.column=null==n?null:n,this.source=null==r?null:r,this.name=null==o?null:o,this[u]=!0,null!=t&&this.add(t)}var o=r(1).SourceMapGenerator,i=r(4),s=/(\r?\n)/,a=10,u="$$$isSourceNode$$$";t.fromStringWithSourceMap=function(e,n,r){function o(e,n){if(null===e||void 0===e.source)a.add(n);else{var o=r?i.join(r,e.source):e.source;a.add(new t(e.originalLine,e.originalColumn,o,n,e.name))}}var a=new t,u=e.split(s),l=0,c=function(){function e(){return l<u.length?u[l++]:void 0}var n=e(),r=e()||"";return n+r},g=1,p=0,h=null;return n.eachMapping(function(e){if(null!==h){if(!(g<e.generatedLine)){var n=u[l]||"",r=n.substr(0,e.generatedColumn-p);return u[l]=n.substr(e.generatedColumn-p),p=e.generatedColumn,o(h,r),void(h=e)}o(h,c()),g++,p=0}for(;g<e.generatedLine;)a.add(c()),g++;if(p<e.generatedColumn){var n=u[l]||"";a.add(n.substr(0,e.generatedColumn)),u[l]=n.substr(e.generatedColumn),p=e.generatedColumn}h=e},this),l<u.length&&(h&&o(h,c()),a.add(u.splice(l).join(""))),n.sources.forEach(function(e){var t=n.sourceContentFor(e);null!=t&&(null!=r&&(e=i.join(r,e)),a.setSourceContent(e,t))}),a},t.prototype.add=function(e){if(Array.isArray(e))e.forEach(function(e){this.add(e)},this);else{if(!e[u]&&"string"!=typeof e)throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e);e&&this.children.push(e)}return this},t.prototype.prepend=function(e){if(Array.isArray(e))for(var n=e.length-1;n>=0;n--)this.prepend(e[n]);else{if(!e[u]&&"string"!=typeof e)throw new TypeError("Expected a SourceNode, string, or an array of SourceNodes and strings. Got "+e);this.children.unshift(e)}return this},t.prototype.walk=function(e){for(var n,r=0,t=this.children.length;r<t;r++)n=this.children[r],n[u]?n.walk(e):""!==n&&e(n,{source:this.source,line:this.line,column:this.column,name:this.name})},t.prototype.join=function(e){var n,r,t=this.children.length;if(t>0){for(n=[],r=0;r<t-1;r++)n.push(this.children[r]),n.push(e);n.push(this.children[r]),this.children=n}return this},t.prototype.replaceRight=function(e,n){var r=this.children[this.children.length-1];return r[u]?r.replaceRight(e,n):"string"==typeof r?this.children[this.children.length-1]=r.replace(e,n):this.children.push("".replace(e,n)),this},t.prototype.setSourceContent=function(e,n){this.sourceContents[i.toSetString(e)]=n},t.prototype.walkSourceContents=function(e){for(var n=0,r=this.children.length;n<r;n++)this.children[n][u]&&this.children[n].walkSourceContents(e);for(var t=Object.keys(this.sourceContents),n=0,r=t.length;n<r;n++)e(i.fromSetString(t[n]),this.sourceContents[t[n]])},t.prototype.toString=function(){var e="";return this.walk(function(n){e+=n}),e},t.prototype.toStringWithSourceMap=function(e){var n={code:"",line:1,column:0},r=new o(e),t=!1,i=null,s=null,u=null,l=null;return this.walk(function(e,o){n.code+=e,null!==o.source&&null!==o.line&&null!==o.column?(i===o.source&&s===o.line&&u===o.column&&l===o.name||r.addMapping({source:o.source,original:{line:o.line,column:o.column},generated:{line:n.line,column:n.column},name:o.name}),i=o.source,s=o.line,u=o.column,l=o.name,t=!0):t&&(r.addMapping({generated:{line:n.line,column:n.column}}),i=null,t=!1);for(var c=0,g=e.length;c<g;c++)e.charCodeAt(c)===a?(n.line++,n.column=0,c+1===g?(i=null,t=!1):t&&r.addMapping({source:o.source,original:{line:o.line,column:o.column},generated:{line:n.line,column:n.column},name:o.name})):n.column++}),this.walkSourceContents(function(e,n){r.setSourceContent(e,n)}),{code:n.code,map:r}},n.SourceNode=t}])});
//# sourceMappingURL=source-map.min.js.map | PypiClean |
/girder-jobs-3.1.22.tar.gz/girder-jobs-3.1.22/girder_jobs/web_client/JobStatus.js | import _ from 'underscore';
// The same job status enum as the server.
var JobStatus = {
_map: {},
text: function (status) {
var text = status;
if (status in this._map) {
text = this._map[status].text;
}
return '' + text;
},
icon: function (status) {
var icon;
if (status in this._map) {
icon = this._map[status].icon;
}
return icon;
},
color: function (status) {
return this._map[status].color;
},
textColor: function (status) {
return this._map[status].textColor;
},
isCancelable: _.constant(false),
finished: function (status) {
return this._map[status].finished;
},
/**
* Convert this status text into a value appropriate for an HTML class name.
*/
classAffix: function (status) {
return this.text(status).toLowerCase().replace(/ /g, '-');
},
/**
* Add new job statuses. The argument should be an object mapping the enum
* symbol name to an information object for that status. The info object
* must include a "value" field (its integer value), a "text" field, which
* is how the status should be rendered as text, and an "icon" field for
* what classes to apply to the icon for this status.
*/
registerStatus: function (status) {
_.each(status, function (info, name) {
this[name] = info.value;
const statusInfo = {
text: info.text,
icon: info.icon,
color: info.color,
textColor: 'white',
finished: _.has(info, 'finished') ? info.finished : false
};
if (_.has(info, 'textColor')) {
statusInfo.textColor = info.textColor;
}
this._map[info.value] = statusInfo;
}, this);
},
getAll: function () {
return _.values(this._map);
}
};
JobStatus.registerStatus({
INACTIVE: {
value: 0,
text: 'Inactive',
icon: 'icon-pause',
color: '#ccc',
textColor: '#555',
finished: false
},
QUEUED: {
value: 1,
text: 'Queued',
icon: 'icon-ellipsis',
color: '#dbc345',
finished: false
},
RUNNING: {
value: 2,
text: 'Running',
icon: 'icon-spin3 animate-spin',
color: '#6666d5',
finished: false
},
SUCCESS: {
value: 3,
text: 'Success',
icon: 'icon-ok',
color: '#53b653',
finished: true
},
ERROR: {
value: 4,
text: 'Error',
icon: 'icon-cancel',
color: '#d44',
finished: true
},
CANCELED: {
value: 5,
text: 'Canceled',
icon: 'icon-cancel',
color: '#545',
finished: true
}
});
export default JobStatus; | PypiClean |
/farbox_bucket-0.2068.tar.gz/farbox_bucket-0.2068/farbox_bucket/server/static/lib/mark/jquery.mark.js | "use strict";
var _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; };
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _typeof = typeof Symbol === "function" && typeof Symbol.iterator === "symbol" ? function (obj) { return typeof obj; } : function (obj) { return obj && typeof Symbol === "function" && obj.constructor === Symbol && obj !== Symbol.prototype ? "symbol" : typeof obj; };
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
(function (factory, window, document) {
if (typeof define === "function" && define.amd) {
define(["jquery"], function (jQuery) {
return factory(window, document, jQuery);
});
} else if ((typeof module === "undefined" ? "undefined" : _typeof(module)) === "object" && module.exports) {
module.exports = factory(window, document, require("jquery"));
} else {
factory(window, document, jQuery);
}
})(function (window, document, $) {
var Mark = function () {
function Mark(ctx) {
_classCallCheck(this, Mark);
this.ctx = ctx;
this.ie = false;
var ua = window.navigator.userAgent;
if (ua.indexOf("MSIE") > -1 || ua.indexOf("Trident") > -1) {
this.ie = true;
}
}
_createClass(Mark, [{
key: "log",
value: function log(msg) {
var level = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : "debug";
var log = this.opt.log;
if (!this.opt.debug) {
return;
}
if ((typeof log === "undefined" ? "undefined" : _typeof(log)) === "object" && typeof log[level] === "function") {
log[level]("mark.js: " + msg);
}
}
}, {
key: "escapeStr",
value: function escapeStr(str) {
return str.replace(/[\-\[\]\/\{\}\(\)\*\+\?\.\\\^\$\|]/g, "\\$&");
}
}, {
key: "createRegExp",
value: function createRegExp(str) {
if (this.opt.wildcards !== "disabled") {
str = this.setupWildcardsRegExp(str);
}
str = this.escapeStr(str);
if (Object.keys(this.opt.synonyms).length) {
str = this.createSynonymsRegExp(str);
}
if (this.opt.ignoreJoiners) {
str = this.setupIgnoreJoinersRegExp(str);
}
if (this.opt.diacritics) {
str = this.createDiacriticsRegExp(str);
}
str = this.createMergedBlanksRegExp(str);
if (this.opt.ignoreJoiners) {
str = this.createIgnoreJoinersRegExp(str);
}
if (this.opt.wildcards !== "disabled") {
str = this.createWildcardsRegExp(str);
}
str = this.createAccuracyRegExp(str);
return str;
}
}, {
key: "createSynonymsRegExp",
value: function createSynonymsRegExp(str) {
var syn = this.opt.synonyms,
sens = this.opt.caseSensitive ? "" : "i";
for (var index in syn) {
if (syn.hasOwnProperty(index)) {
var value = syn[index],
k1 = this.opt.wildcards !== "disabled" ? this.setupWildcardsRegExp(index) : this.escapeStr(index),
k2 = this.opt.wildcards !== "disabled" ? this.setupWildcardsRegExp(value) : this.escapeStr(value);
if (k1 !== "" && k2 !== "") {
str = str.replace(new RegExp("(" + k1 + "|" + k2 + ")", "gm" + sens), "(" + k1 + "|" + k2 + ")");
}
}
}
return str;
}
}, {
key: "setupWildcardsRegExp",
value: function setupWildcardsRegExp(str) {
str = str.replace(/(?:\\)*\?/g, function (val) {
return val.charAt(0) === "\\" ? "?" : "\x01";
});
return str.replace(/(?:\\)*\*/g, function (val) {
return val.charAt(0) === "\\" ? "*" : "\x02";
});
}
}, {
key: "createWildcardsRegExp",
value: function createWildcardsRegExp(str) {
var spaces = this.opt.wildcards === "withSpaces";
return str.replace(/\u0001/g, spaces ? "[\\S\\s]?" : "\\S?").replace(/\u0002/g, spaces ? "[\\S\\s]*?" : "\\S*");
}
}, {
key: "setupIgnoreJoinersRegExp",
value: function setupIgnoreJoinersRegExp(str) {
return str.replace(/[^(|)\\]/g, function (val, indx, original) {
var nextChar = original.charAt(indx + 1);
if (/[(|)\\]/.test(nextChar) || nextChar === "") {
return val;
} else {
return val + "\0";
}
});
}
}, {
key: "createIgnoreJoinersRegExp",
value: function createIgnoreJoinersRegExp(str) {
return str.split("\0").join("[\\u00ad|\\u200b|\\u200c|\\u200d]?");
}
}, {
key: "createDiacriticsRegExp",
value: function createDiacriticsRegExp(str) {
var sens = this.opt.caseSensitive ? "" : "i",
dct = this.opt.caseSensitive ? ["aàáâãäåāąă", "AÀÁÂÃÄÅĀĄĂ", "cçćč", "CÇĆČ", "dđď", "DĐĎ", "eèéêëěēę", "EÈÉÊËĚĒĘ", "iìíîïī", "IÌÍÎÏĪ", "lł", "LŁ", "nñňń", "NÑŇŃ", "oòóôõöøō", "OÒÓÔÕÖØŌ", "rř", "RŘ", "sšśșş", "SŠŚȘŞ", "tťțţ", "TŤȚŢ", "uùúûüůū", "UÙÚÛÜŮŪ", "yÿý", "YŸÝ", "zžżź", "ZŽŻŹ"] : ["aàáâãäåāąăAÀÁÂÃÄÅĀĄĂ", "cçćčCÇĆČ", "dđďDĐĎ", "eèéêëěēęEÈÉÊËĚĒĘ", "iìíîïīIÌÍÎÏĪ", "lłLŁ", "nñňńNÑŇŃ", "oòóôõöøōOÒÓÔÕÖØŌ", "rřRŘ", "sšśșşSŠŚȘŞ", "tťțţTŤȚŢ", "uùúûüůūUÙÚÛÜŮŪ", "yÿýYŸÝ", "zžżźZŽŻŹ"];
var handled = [];
str.split("").forEach(function (ch) {
dct.every(function (dct) {
if (dct.indexOf(ch) !== -1) {
if (handled.indexOf(dct) > -1) {
return false;
}
str = str.replace(new RegExp("[" + dct + "]", "gm" + sens), "[" + dct + "]");
handled.push(dct);
}
return true;
});
});
return str;
}
}, {
key: "createMergedBlanksRegExp",
value: function createMergedBlanksRegExp(str) {
return str.replace(/[\s]+/gmi, "[\\s]+");
}
}, {
key: "createAccuracyRegExp",
value: function createAccuracyRegExp(str) {
var _this = this;
var chars = "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~\xA1\xBF";
var acc = this.opt.accuracy,
val = typeof acc === "string" ? acc : acc.value,
ls = typeof acc === "string" ? [] : acc.limiters,
lsJoin = "";
ls.forEach(function (limiter) {
lsJoin += "|" + _this.escapeStr(limiter);
});
switch (val) {
case "partially":
default:
return "()(" + str + ")";
case "complementary":
lsJoin = "\\s" + (lsJoin ? lsJoin : this.escapeStr(chars));
return "()([^" + lsJoin + "]*" + str + "[^" + lsJoin + "]*)";
case "exactly":
return "(^|\\s" + lsJoin + ")(" + str + ")(?=$|\\s" + lsJoin + ")";
}
}
}, {
key: "getSeparatedKeywords",
value: function getSeparatedKeywords(sv) {
var _this2 = this;
var stack = [];
sv.forEach(function (kw) {
if (!_this2.opt.separateWordSearch) {
if (kw.trim() && stack.indexOf(kw) === -1) {
stack.push(kw);
}
} else {
kw.split(" ").forEach(function (kwSplitted) {
if (kwSplitted.trim() && stack.indexOf(kwSplitted) === -1) {
stack.push(kwSplitted);
}
});
}
});
return {
"keywords": stack.sort(function (a, b) {
return b.length - a.length;
}),
"length": stack.length
};
}
}, {
key: "isNumeric",
value: function isNumeric(value) {
return Number(parseFloat(value)) == value;
}
}, {
key: "checkRanges",
value: function checkRanges(array) {
var _this3 = this;
if (!Array.isArray(array) || Object.prototype.toString.call(array[0]) !== "[object Object]") {
this.log("markRanges() will only accept an array of objects");
this.opt.noMatch(array);
return [];
}
var stack = [];
var last = 0;
array.sort(function (a, b) {
return a.start - b.start;
}).forEach(function (item) {
var _callNoMatchOnInvalid = _this3.callNoMatchOnInvalidRanges(item, last),
start = _callNoMatchOnInvalid.start,
end = _callNoMatchOnInvalid.end,
valid = _callNoMatchOnInvalid.valid;
if (valid) {
item.start = start;
item.length = end - start;
stack.push(item);
last = end;
}
});
return stack;
}
}, {
key: "callNoMatchOnInvalidRanges",
value: function callNoMatchOnInvalidRanges(range, last) {
var start = void 0,
end = void 0,
valid = false;
if (range && typeof range.start !== "undefined") {
start = parseInt(range.start, 10);
end = start + parseInt(range.length, 10);
if (this.isNumeric(range.start) && this.isNumeric(range.length) && end - last > 0 && end - start > 0) {
valid = true;
} else {
this.log("Ignoring invalid or overlapping range: " + ("" + JSON.stringify(range)));
this.opt.noMatch(range);
}
} else {
this.log("Ignoring invalid range: " + JSON.stringify(range));
this.opt.noMatch(range);
}
return {
start: start,
end: end,
valid: valid
};
}
}, {
key: "checkWhitespaceRanges",
value: function checkWhitespaceRanges(range, originalLength, string) {
var end = void 0,
valid = true,
max = string.length,
offset = originalLength - max,
start = parseInt(range.start, 10) - offset;
start = start > max ? max : start;
end = start + parseInt(range.length, 10);
if (end > max) {
end = max;
this.log("End range automatically set to the max value of " + max);
}
if (start < 0 || end - start < 0 || start > max || end > max) {
valid = false;
this.log("Invalid range: " + JSON.stringify(range));
this.opt.noMatch(range);
} else if (string.substring(start, end).replace(/\s+/g, "") === "") {
valid = false;
this.log("Skipping whitespace only range: " + JSON.stringify(range));
this.opt.noMatch(range);
}
return {
start: start,
end: end,
valid: valid
};
}
}, {
key: "getTextNodes",
value: function getTextNodes(cb) {
var _this4 = this;
var val = "",
nodes = [];
this.iterator.forEachNode(NodeFilter.SHOW_TEXT, function (node) {
nodes.push({
start: val.length,
end: (val += node.textContent).length,
node: node
});
}, function (node) {
if (_this4.matchesExclude(node.parentNode)) {
return NodeFilter.FILTER_REJECT;
} else {
return NodeFilter.FILTER_ACCEPT;
}
}, function () {
cb({
value: val,
nodes: nodes
});
});
}
}, {
key: "matchesExclude",
value: function matchesExclude(el) {
return DOMIterator.matches(el, this.opt.exclude.concat(["script", "style", "title", "head", "html"]));
}
}, {
key: "wrapRangeInTextNode",
value: function wrapRangeInTextNode(node, start, end) {
var hEl = !this.opt.element ? "mark" : this.opt.element,
startNode = node.splitText(start),
ret = startNode.splitText(end - start);
var repl = document.createElement(hEl);
repl.setAttribute("data-markjs", "true");
if (this.opt.className) {
repl.setAttribute("class", this.opt.className);
}
repl.textContent = startNode.textContent;
startNode.parentNode.replaceChild(repl, startNode);
return ret;
}
}, {
key: "wrapRangeInMappedTextNode",
value: function wrapRangeInMappedTextNode(dict, start, end, filterCb, eachCb) {
var _this5 = this;
dict.nodes.every(function (n, i) {
var sibl = dict.nodes[i + 1];
if (typeof sibl === "undefined" || sibl.start > start) {
if (!filterCb(n.node)) {
return false;
}
var s = start - n.start,
e = (end > n.end ? n.end : end) - n.start,
startStr = dict.value.substr(0, n.start),
endStr = dict.value.substr(e + n.start);
n.node = _this5.wrapRangeInTextNode(n.node, s, e);
dict.value = startStr + endStr;
dict.nodes.forEach(function (k, j) {
if (j >= i) {
if (dict.nodes[j].start > 0 && j !== i) {
dict.nodes[j].start -= e;
}
dict.nodes[j].end -= e;
}
});
end -= e;
eachCb(n.node.previousSibling, n.start);
if (end > n.end) {
start = n.end;
} else {
return false;
}
}
return true;
});
}
}, {
key: "wrapMatches",
value: function wrapMatches(regex, ignoreGroups, filterCb, eachCb, endCb) {
var _this6 = this;
var matchIdx = ignoreGroups === 0 ? 0 : ignoreGroups + 1;
this.getTextNodes(function (dict) {
dict.nodes.forEach(function (node) {
node = node.node;
var match = void 0;
while ((match = regex.exec(node.textContent)) !== null && match[matchIdx] !== "") {
if (!filterCb(match[matchIdx], node)) {
continue;
}
var pos = match.index;
if (matchIdx !== 0) {
for (var i = 1; i < matchIdx; i++) {
pos += match[i].length;
}
}
node = _this6.wrapRangeInTextNode(node, pos, pos + match[matchIdx].length);
eachCb(node.previousSibling);
regex.lastIndex = 0;
}
});
endCb();
});
}
}, {
key: "wrapMatchesAcrossElements",
value: function wrapMatchesAcrossElements(regex, ignoreGroups, filterCb, eachCb, endCb) {
var _this7 = this;
var matchIdx = ignoreGroups === 0 ? 0 : ignoreGroups + 1;
this.getTextNodes(function (dict) {
var match = void 0;
while ((match = regex.exec(dict.value)) !== null && match[matchIdx] !== "") {
var start = match.index;
if (matchIdx !== 0) {
for (var i = 1; i < matchIdx; i++) {
start += match[i].length;
}
}
var end = start + match[matchIdx].length;
_this7.wrapRangeInMappedTextNode(dict, start, end, function (node) {
return filterCb(match[matchIdx], node);
}, function (node, lastIndex) {
regex.lastIndex = lastIndex;
eachCb(node);
});
}
endCb();
});
}
}, {
key: "wrapRangeFromIndex",
value: function wrapRangeFromIndex(ranges, filterCb, eachCb, endCb) {
var _this8 = this;
this.getTextNodes(function (dict) {
var originalLength = dict.value.length;
ranges.forEach(function (range, counter) {
var _checkWhitespaceRange = _this8.checkWhitespaceRanges(range, originalLength, dict.value),
start = _checkWhitespaceRange.start,
end = _checkWhitespaceRange.end,
valid = _checkWhitespaceRange.valid;
if (valid) {
_this8.wrapRangeInMappedTextNode(dict, start, end, function (node) {
return filterCb(node, range, dict.value.substring(start, end), counter);
}, function (node) {
eachCb(node, range);
});
}
});
endCb();
});
}
}, {
key: "unwrapMatches",
value: function unwrapMatches(node) {
var parent = node.parentNode;
var docFrag = document.createDocumentFragment();
while (node.firstChild) {
docFrag.appendChild(node.removeChild(node.firstChild));
}
parent.replaceChild(docFrag, node);
if (!this.ie) {
parent.normalize();
} else {
this.normalizeTextNode(parent);
}
}
}, {
key: "normalizeTextNode",
value: function normalizeTextNode(node) {
if (!node) {
return;
}
if (node.nodeType === 3) {
while (node.nextSibling && node.nextSibling.nodeType === 3) {
node.nodeValue += node.nextSibling.nodeValue;
node.parentNode.removeChild(node.nextSibling);
}
} else {
this.normalizeTextNode(node.firstChild);
}
this.normalizeTextNode(node.nextSibling);
}
}, {
key: "markRegExp",
value: function markRegExp(regexp, opt) {
var _this9 = this;
this.opt = opt;
this.log("Searching with expression \"" + regexp + "\"");
var totalMatches = 0,
fn = "wrapMatches";
var eachCb = function eachCb(element) {
totalMatches++;
_this9.opt.each(element);
};
if (this.opt.acrossElements) {
fn = "wrapMatchesAcrossElements";
}
this[fn](regexp, this.opt.ignoreGroups, function (match, node) {
return _this9.opt.filter(node, match, totalMatches);
}, eachCb, function () {
if (totalMatches === 0) {
_this9.opt.noMatch(regexp);
}
_this9.opt.done(totalMatches);
});
}
}, {
key: "mark",
value: function mark(sv, opt) {
var _this10 = this;
this.opt = opt;
var totalMatches = 0,
fn = "wrapMatches";
var _getSeparatedKeywords = this.getSeparatedKeywords(typeof sv === "string" ? [sv] : sv),
kwArr = _getSeparatedKeywords.keywords,
kwArrLen = _getSeparatedKeywords.length,
sens = this.opt.caseSensitive ? "" : "i",
handler = function handler(kw) {
var regex = new RegExp(_this10.createRegExp(kw), "gm" + sens),
matches = 0;
_this10.log("Searching with expression \"" + regex + "\"");
_this10[fn](regex, 1, function (term, node) {
return _this10.opt.filter(node, kw, totalMatches, matches);
}, function (element) {
matches++;
totalMatches++;
_this10.opt.each(element);
}, function () {
if (matches === 0) {
_this10.opt.noMatch(kw);
}
if (kwArr[kwArrLen - 1] === kw) {
_this10.opt.done(totalMatches);
} else {
handler(kwArr[kwArr.indexOf(kw) + 1]);
}
});
};
if (this.opt.acrossElements) {
fn = "wrapMatchesAcrossElements";
}
if (kwArrLen === 0) {
this.opt.done(totalMatches);
} else {
handler(kwArr[0]);
}
}
}, {
key: "markRanges",
value: function markRanges(rawRanges, opt) {
var _this11 = this;
this.opt = opt;
var totalMatches = 0,
ranges = this.checkRanges(rawRanges);
if (ranges && ranges.length) {
this.log("Starting to mark with the following ranges: " + JSON.stringify(ranges));
this.wrapRangeFromIndex(ranges, function (node, range, match, counter) {
return _this11.opt.filter(node, range, match, counter);
}, function (element, range) {
totalMatches++;
_this11.opt.each(element, range);
}, function () {
_this11.opt.done(totalMatches);
});
} else {
this.opt.done(totalMatches);
}
}
}, {
key: "unmark",
value: function unmark(opt) {
var _this12 = this;
this.opt = opt;
var sel = this.opt.element ? this.opt.element : "*";
sel += "[data-markjs]";
if (this.opt.className) {
sel += "." + this.opt.className;
}
this.log("Removal selector \"" + sel + "\"");
this.iterator.forEachNode(NodeFilter.SHOW_ELEMENT, function (node) {
_this12.unwrapMatches(node);
}, function (node) {
var matchesSel = DOMIterator.matches(node, sel),
matchesExclude = _this12.matchesExclude(node);
if (!matchesSel || matchesExclude) {
return NodeFilter.FILTER_REJECT;
} else {
return NodeFilter.FILTER_ACCEPT;
}
}, this.opt.done);
}
}, {
key: "opt",
set: function set(val) {
this._opt = _extends({}, {
"element": "",
"className": "",
"exclude": [],
"iframes": false,
"iframesTimeout": 5000,
"separateWordSearch": true,
"diacritics": true,
"synonyms": {},
"accuracy": "partially",
"acrossElements": false,
"caseSensitive": false,
"ignoreJoiners": false,
"ignoreGroups": 0,
"wildcards": "disabled",
"each": function each() {},
"noMatch": function noMatch() {},
"filter": function filter() {
return true;
},
"done": function done() {},
"debug": false,
"log": window.console
}, val);
},
get: function get() {
return this._opt;
}
}, {
key: "iterator",
get: function get() {
return new DOMIterator(this.ctx, this.opt.iframes, this.opt.exclude, this.opt.iframesTimeout);
}
}]);
return Mark;
}();
var DOMIterator = function () {
function DOMIterator(ctx) {
var iframes = arguments.length > 1 && arguments[1] !== undefined ? arguments[1] : true;
var exclude = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : [];
var iframesTimeout = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : 5000;
_classCallCheck(this, DOMIterator);
this.ctx = ctx;
this.iframes = iframes;
this.exclude = exclude;
this.iframesTimeout = iframesTimeout;
}
_createClass(DOMIterator, [{
key: "getContexts",
value: function getContexts() {
var ctx = void 0,
filteredCtx = [];
if (typeof this.ctx === "undefined" || !this.ctx) {
ctx = [];
} else if (NodeList.prototype.isPrototypeOf(this.ctx)) {
ctx = Array.prototype.slice.call(this.ctx);
} else if (Array.isArray(this.ctx)) {
ctx = this.ctx;
} else if (typeof this.ctx === "string") {
ctx = Array.prototype.slice.call(document.querySelectorAll(this.ctx));
} else {
ctx = [this.ctx];
}
ctx.forEach(function (ctx) {
var isDescendant = filteredCtx.filter(function (contexts) {
return contexts.contains(ctx);
}).length > 0;
if (filteredCtx.indexOf(ctx) === -1 && !isDescendant) {
filteredCtx.push(ctx);
}
});
return filteredCtx;
}
}, {
key: "getIframeContents",
value: function getIframeContents(ifr, successFn) {
var errorFn = arguments.length > 2 && arguments[2] !== undefined ? arguments[2] : function () {};
var doc = void 0;
try {
var ifrWin = ifr.contentWindow;
doc = ifrWin.document;
if (!ifrWin || !doc) {
throw new Error("iframe inaccessible");
}
} catch (e) {
errorFn();
}
if (doc) {
successFn(doc);
}
}
}, {
key: "isIframeBlank",
value: function isIframeBlank(ifr) {
var bl = "about:blank",
src = ifr.getAttribute("src").trim(),
href = ifr.contentWindow.location.href;
return href === bl && src !== bl && src;
}
}, {
key: "observeIframeLoad",
value: function observeIframeLoad(ifr, successFn, errorFn) {
var _this13 = this;
var called = false,
tout = null;
var listener = function listener() {
if (called) {
return;
}
called = true;
clearTimeout(tout);
try {
if (!_this13.isIframeBlank(ifr)) {
ifr.removeEventListener("load", listener);
_this13.getIframeContents(ifr, successFn, errorFn);
}
} catch (e) {
errorFn();
}
};
ifr.addEventListener("load", listener);
tout = setTimeout(listener, this.iframesTimeout);
}
}, {
key: "onIframeReady",
value: function onIframeReady(ifr, successFn, errorFn) {
try {
if (ifr.contentWindow.document.readyState === "complete") {
if (this.isIframeBlank(ifr)) {
this.observeIframeLoad(ifr, successFn, errorFn);
} else {
this.getIframeContents(ifr, successFn, errorFn);
}
} else {
this.observeIframeLoad(ifr, successFn, errorFn);
}
} catch (e) {
errorFn();
}
}
}, {
key: "waitForIframes",
value: function waitForIframes(ctx, done) {
var _this14 = this;
var eachCalled = 0;
this.forEachIframe(ctx, function () {
return true;
}, function (ifr) {
eachCalled++;
_this14.waitForIframes(ifr.querySelector("html"), function () {
if (! --eachCalled) {
done();
}
});
}, function (handled) {
if (!handled) {
done();
}
});
}
}, {
key: "forEachIframe",
value: function forEachIframe(ctx, filter, each) {
var _this15 = this;
var end = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : function () {};
var ifr = ctx.querySelectorAll("iframe"),
open = ifr.length,
handled = 0;
ifr = Array.prototype.slice.call(ifr);
var checkEnd = function checkEnd() {
if (--open <= 0) {
end(handled);
}
};
if (!open) {
checkEnd();
}
ifr.forEach(function (ifr) {
if (DOMIterator.matches(ifr, _this15.exclude)) {
checkEnd();
} else {
_this15.onIframeReady(ifr, function (con) {
if (filter(ifr)) {
handled++;
each(con);
}
checkEnd();
}, checkEnd);
}
});
}
}, {
key: "createIterator",
value: function createIterator(ctx, whatToShow, filter) {
return document.createNodeIterator(ctx, whatToShow, filter, false);
}
}, {
key: "createInstanceOnIframe",
value: function createInstanceOnIframe(contents) {
return new DOMIterator(contents.querySelector("html"), this.iframes);
}
}, {
key: "compareNodeIframe",
value: function compareNodeIframe(node, prevNode, ifr) {
var compCurr = node.compareDocumentPosition(ifr),
prev = Node.DOCUMENT_POSITION_PRECEDING;
if (compCurr & prev) {
if (prevNode !== null) {
var compPrev = prevNode.compareDocumentPosition(ifr),
after = Node.DOCUMENT_POSITION_FOLLOWING;
if (compPrev & after) {
return true;
}
} else {
return true;
}
}
return false;
}
}, {
key: "getIteratorNode",
value: function getIteratorNode(itr) {
var prevNode = itr.previousNode();
var node = void 0;
if (prevNode === null) {
node = itr.nextNode();
} else {
node = itr.nextNode() && itr.nextNode();
}
return {
prevNode: prevNode,
node: node
};
}
}, {
key: "checkIframeFilter",
value: function checkIframeFilter(node, prevNode, currIfr, ifr) {
var key = false,
handled = false;
ifr.forEach(function (ifrDict, i) {
if (ifrDict.val === currIfr) {
key = i;
handled = ifrDict.handled;
}
});
if (this.compareNodeIframe(node, prevNode, currIfr)) {
if (key === false && !handled) {
ifr.push({
val: currIfr,
handled: true
});
} else if (key !== false && !handled) {
ifr[key].handled = true;
}
return true;
}
if (key === false) {
ifr.push({
val: currIfr,
handled: false
});
}
return false;
}
}, {
key: "handleOpenIframes",
value: function handleOpenIframes(ifr, whatToShow, eCb, fCb) {
var _this16 = this;
ifr.forEach(function (ifrDict) {
if (!ifrDict.handled) {
_this16.getIframeContents(ifrDict.val, function (con) {
_this16.createInstanceOnIframe(con).forEachNode(whatToShow, eCb, fCb);
});
}
});
}
}, {
key: "iterateThroughNodes",
value: function iterateThroughNodes(whatToShow, ctx, eachCb, filterCb, doneCb) {
var _this17 = this;
var itr = this.createIterator(ctx, whatToShow, filterCb);
var ifr = [],
elements = [],
node = void 0,
prevNode = void 0,
retrieveNodes = function retrieveNodes() {
var _getIteratorNode = _this17.getIteratorNode(itr);
prevNode = _getIteratorNode.prevNode;
node = _getIteratorNode.node;
return node;
};
while (retrieveNodes()) {
if (this.iframes) {
this.forEachIframe(ctx, function (currIfr) {
return _this17.checkIframeFilter(node, prevNode, currIfr, ifr);
}, function (con) {
_this17.createInstanceOnIframe(con).forEachNode(whatToShow, function (ifrNode) {
return elements.push(ifrNode);
}, filterCb);
});
}
elements.push(node);
}
elements.forEach(function (node) {
eachCb(node);
});
if (this.iframes) {
this.handleOpenIframes(ifr, whatToShow, eachCb, filterCb);
}
doneCb();
}
}, {
key: "forEachNode",
value: function forEachNode(whatToShow, each, filter) {
var _this18 = this;
var done = arguments.length > 3 && arguments[3] !== undefined ? arguments[3] : function () {};
var contexts = this.getContexts();
var open = contexts.length;
if (!open) {
done();
}
contexts.forEach(function (ctx) {
var ready = function ready() {
_this18.iterateThroughNodes(whatToShow, ctx, each, filter, function () {
if (--open <= 0) {
done();
}
});
};
if (_this18.iframes) {
_this18.waitForIframes(ctx, ready);
} else {
ready();
}
});
}
}], [{
key: "matches",
value: function matches(element, selector) {
var selectors = typeof selector === "string" ? [selector] : selector,
fn = element.matches || element.matchesSelector || element.msMatchesSelector || element.mozMatchesSelector || element.oMatchesSelector || element.webkitMatchesSelector;
if (fn) {
var match = false;
selectors.every(function (sel) {
if (fn.call(element, sel)) {
match = true;
return false;
}
return true;
});
return match;
} else {
return false;
}
}
}]);
return DOMIterator;
}();
$.fn.mark = function (sv, opt) {
new Mark(this.get()).mark(sv, opt);
return this;
};
$.fn.markRegExp = function (regexp, opt) {
new Mark(this.get()).markRegExp(regexp, opt);
return this;
};
$.fn.markRanges = function (ranges, opt) {
new Mark(this.get()).markRanges(ranges, opt);
return this;
};
$.fn.unmark = function (opt) {
new Mark(this.get()).unmark(opt);
return this;
};
return $;
}, window, document); | PypiClean |
/django-storages-redux-1.3.3.tar.gz/django-storages-redux-1.3.3/docs/_build/html/_static/websupport.js | (function($) {
$.fn.autogrow = function() {
return this.each(function() {
var textarea = this;
$.fn.autogrow.resize(textarea);
$(textarea)
.focus(function() {
textarea.interval = setInterval(function() {
$.fn.autogrow.resize(textarea);
}, 500);
})
.blur(function() {
clearInterval(textarea.interval);
});
});
};
$.fn.autogrow.resize = function(textarea) {
var lineHeight = parseInt($(textarea).css('line-height'), 10);
var lines = textarea.value.split('\n');
var columns = textarea.cols;
var lineCount = 0;
$.each(lines, function() {
lineCount += Math.ceil(this.length / columns) || 1;
});
var height = lineHeight * (lineCount + 1);
$(textarea).css('height', height);
};
})(jQuery);
(function($) {
var comp, by;
function init() {
initEvents();
initComparator();
}
function initEvents() {
$('a.comment-close').live("click", function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
});
$('a.vote').live("click", function(event) {
event.preventDefault();
handleVote($(this));
});
$('a.reply').live("click", function(event) {
event.preventDefault();
openReply($(this).attr('id').substring(2));
});
$('a.close-reply').live("click", function(event) {
event.preventDefault();
closeReply($(this).attr('id').substring(2));
});
$('a.sort-option').live("click", function(event) {
event.preventDefault();
handleReSort($(this));
});
$('a.show-proposal').live("click", function(event) {
event.preventDefault();
showProposal($(this).attr('id').substring(2));
});
$('a.hide-proposal').live("click", function(event) {
event.preventDefault();
hideProposal($(this).attr('id').substring(2));
});
$('a.show-propose-change').live("click", function(event) {
event.preventDefault();
showProposeChange($(this).attr('id').substring(2));
});
$('a.hide-propose-change').live("click", function(event) {
event.preventDefault();
hideProposeChange($(this).attr('id').substring(2));
});
$('a.accept-comment').live("click", function(event) {
event.preventDefault();
acceptComment($(this).attr('id').substring(2));
});
$('a.delete-comment').live("click", function(event) {
event.preventDefault();
deleteComment($(this).attr('id').substring(2));
});
$('a.comment-markup').live("click", function(event) {
event.preventDefault();
toggleCommentMarkupBox($(this).attr('id').substring(2));
});
}
/**
* Set comp, which is a comparator function used for sorting and
* inserting comments into the list.
*/
function setComparator() {
// If the first three letters are "asc", sort in ascending order
// and remove the prefix.
if (by.substring(0,3) == 'asc') {
var i = by.substring(3);
comp = function(a, b) { return a[i] - b[i]; };
} else {
// Otherwise sort in descending order.
comp = function(a, b) { return b[by] - a[by]; };
}
// Reset link styles and format the selected sort option.
$('a.sel').attr('href', '#').removeClass('sel');
$('a.by' + by).removeAttr('href').addClass('sel');
}
/**
* Create a comp function. If the user has preferences stored in
* the sortBy cookie, use those, otherwise use the default.
*/
function initComparator() {
by = 'rating'; // Default to sort by rating.
// If the sortBy cookie is set, use that instead.
if (document.cookie.length > 0) {
var start = document.cookie.indexOf('sortBy=');
if (start != -1) {
start = start + 7;
var end = document.cookie.indexOf(";", start);
if (end == -1) {
end = document.cookie.length;
by = unescape(document.cookie.substring(start, end));
}
}
}
setComparator();
}
/**
* Show a comment div.
*/
function show(id) {
$('#ao' + id).hide();
$('#ah' + id).show();
var context = $.extend({id: id}, opts);
var popup = $(renderTemplate(popupTemplate, context)).hide();
popup.find('textarea[name="proposal"]').hide();
popup.find('a.by' + by).addClass('sel');
var form = popup.find('#cf' + id);
form.submit(function(event) {
event.preventDefault();
addComment(form);
});
$('#s' + id).after(popup);
popup.slideDown('fast', function() {
getComments(id);
});
}
/**
* Hide a comment div.
*/
function hide(id) {
$('#ah' + id).hide();
$('#ao' + id).show();
var div = $('#sc' + id);
div.slideUp('fast', function() {
div.remove();
});
}
/**
* Perform an ajax request to get comments for a node
* and insert the comments into the comments tree.
*/
function getComments(id) {
$.ajax({
type: 'GET',
url: opts.getCommentsURL,
data: {node: id},
success: function(data, textStatus, request) {
var ul = $('#cl' + id);
var speed = 100;
$('#cf' + id)
.find('textarea[name="proposal"]')
.data('source', data.source);
if (data.comments.length === 0) {
ul.html('<li>No comments yet.</li>');
ul.data('empty', true);
} else {
// If there are comments, sort them and put them in the list.
var comments = sortComments(data.comments);
speed = data.comments.length * 100;
appendComments(comments, ul);
ul.data('empty', false);
}
$('#cn' + id).slideUp(speed + 200);
ul.slideDown(speed);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem retrieving the comments.');
},
dataType: 'json'
});
}
/**
* Add a comment via ajax and insert the comment into the comment tree.
*/
function addComment(form) {
var node_id = form.find('input[name="node"]').val();
var parent_id = form.find('input[name="parent"]').val();
var text = form.find('textarea[name="comment"]').val();
var proposal = form.find('textarea[name="proposal"]').val();
if (text == '') {
showError('Please enter a comment.');
return;
}
// Disable the form that is being submitted.
form.find('textarea,input').attr('disabled', 'disabled');
// Send the comment to the server.
$.ajax({
type: "POST",
url: opts.addCommentURL,
dataType: 'json',
data: {
node: node_id,
parent: parent_id,
text: text,
proposal: proposal
},
success: function(data, textStatus, error) {
// Reset the form.
if (node_id) {
hideProposeChange(node_id);
}
form.find('textarea')
.val('')
.add(form.find('input'))
.removeAttr('disabled');
var ul = $('#cl' + (node_id || parent_id));
if (ul.data('empty')) {
$(ul).empty();
ul.data('empty', false);
}
insertComment(data.comment);
var ao = $('#ao' + node_id);
ao.find('img').attr({'src': opts.commentBrightImage});
if (node_id) {
// if this was a "root" comment, remove the commenting box
// (the user can get it back by reopening the comment popup)
$('#ca' + node_id).slideUp();
}
},
error: function(request, textStatus, error) {
form.find('textarea,input').removeAttr('disabled');
showError('Oops, there was a problem adding the comment.');
}
});
}
/**
* Recursively append comments to the main comment list and children
* lists, creating the comment tree.
*/
function appendComments(comments, ul) {
$.each(comments, function() {
var div = createCommentDiv(this);
ul.append($(document.createElement('li')).html(div));
appendComments(this.children, div.find('ul.comment-children'));
// To avoid stagnating data, don't store the comments children in data.
this.children = null;
div.data('comment', this);
});
}
/**
* After adding a new comment, it must be inserted in the correct
* location in the comment tree.
*/
function insertComment(comment) {
var div = createCommentDiv(comment);
// To avoid stagnating data, don't store the comments children in data.
comment.children = null;
div.data('comment', comment);
var ul = $('#cl' + (comment.node || comment.parent));
var siblings = getChildren(ul);
var li = $(document.createElement('li'));
li.hide();
// Determine where in the parents children list to insert this comment.
for(i=0; i < siblings.length; i++) {
if (comp(comment, siblings[i]) <= 0) {
$('#cd' + siblings[i].id)
.parent()
.before(li.html(div));
li.slideDown('fast');
return;
}
}
// If we get here, this comment rates lower than all the others,
// or it is the only comment in the list.
ul.append(li.html(div));
li.slideDown('fast');
}
function acceptComment(id) {
$.ajax({
type: 'POST',
url: opts.acceptCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
$('#cm' + id).fadeOut('fast');
$('#cd' + id).removeClass('moderate');
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem accepting the comment.');
}
});
}
function deleteComment(id) {
$.ajax({
type: 'POST',
url: opts.deleteCommentURL,
data: {id: id},
success: function(data, textStatus, request) {
var div = $('#cd' + id);
if (data == 'delete') {
// Moderator mode: remove the comment and all children immediately
div.slideUp('fast', function() {
div.remove();
});
return;
}
// User mode: only mark the comment as deleted
div
.find('span.user-id:first')
.text('[deleted]').end()
.find('div.comment-text:first')
.text('[deleted]').end()
.find('#cm' + id + ', #dc' + id + ', #ac' + id + ', #rc' + id +
', #sp' + id + ', #hp' + id + ', #cr' + id + ', #rl' + id)
.remove();
var comment = div.data('comment');
comment.username = '[deleted]';
comment.text = '[deleted]';
div.data('comment', comment);
},
error: function(request, textStatus, error) {
showError('Oops, there was a problem deleting the comment.');
}
});
}
function showProposal(id) {
$('#sp' + id).hide();
$('#hp' + id).show();
$('#pr' + id).slideDown('fast');
}
function hideProposal(id) {
$('#hp' + id).hide();
$('#sp' + id).show();
$('#pr' + id).slideUp('fast');
}
function showProposeChange(id) {
$('#pc' + id).hide();
$('#hc' + id).show();
var textarea = $('#pt' + id);
textarea.val(textarea.data('source'));
$.fn.autogrow.resize(textarea[0]);
textarea.slideDown('fast');
}
function hideProposeChange(id) {
$('#hc' + id).hide();
$('#pc' + id).show();
var textarea = $('#pt' + id);
textarea.val('').removeAttr('disabled');
textarea.slideUp('fast');
}
function toggleCommentMarkupBox(id) {
$('#mb' + id).toggle();
}
/** Handle when the user clicks on a sort by link. */
function handleReSort(link) {
var classes = link.attr('class').split(/\s+/);
for (var i=0; i<classes.length; i++) {
if (classes[i] != 'sort-option') {
by = classes[i].substring(2);
}
}
setComparator();
// Save/update the sortBy cookie.
var expiration = new Date();
expiration.setDate(expiration.getDate() + 365);
document.cookie= 'sortBy=' + escape(by) +
';expires=' + expiration.toUTCString();
$('ul.comment-ul').each(function(index, ul) {
var comments = getChildren($(ul), true);
comments = sortComments(comments);
appendComments(comments, $(ul).empty());
});
}
/**
* Function to process a vote when a user clicks an arrow.
*/
function handleVote(link) {
if (!opts.voting) {
showError("You'll need to login to vote.");
return;
}
var id = link.attr('id');
if (!id) {
// Didn't click on one of the voting arrows.
return;
}
// If it is an unvote, the new vote value is 0,
// Otherwise it's 1 for an upvote, or -1 for a downvote.
var value = 0;
if (id.charAt(1) != 'u') {
value = id.charAt(0) == 'u' ? 1 : -1;
}
// The data to be sent to the server.
var d = {
comment_id: id.substring(2),
value: value
};
// Swap the vote and unvote links.
link.hide();
$('#' + id.charAt(0) + (id.charAt(1) == 'u' ? 'v' : 'u') + d.comment_id)
.show();
// The div the comment is displayed in.
var div = $('div#cd' + d.comment_id);
var data = div.data('comment');
// If this is not an unvote, and the other vote arrow has
// already been pressed, unpress it.
if ((d.value !== 0) && (data.vote === d.value * -1)) {
$('#' + (d.value == 1 ? 'd' : 'u') + 'u' + d.comment_id).hide();
$('#' + (d.value == 1 ? 'd' : 'u') + 'v' + d.comment_id).show();
}
// Update the comments rating in the local data.
data.rating += (data.vote === 0) ? d.value : (d.value - data.vote);
data.vote = d.value;
div.data('comment', data);
// Change the rating text.
div.find('.rating:first')
.text(data.rating + ' point' + (data.rating == 1 ? '' : 's'));
// Send the vote information to the server.
$.ajax({
type: "POST",
url: opts.processVoteURL,
data: d,
error: function(request, textStatus, error) {
showError('Oops, there was a problem casting that vote.');
}
});
}
/**
* Open a reply form used to reply to an existing comment.
*/
function openReply(id) {
// Swap out the reply link for the hide link
$('#rl' + id).hide();
$('#cr' + id).show();
// Add the reply li to the children ul.
var div = $(renderTemplate(replyTemplate, {id: id})).hide();
$('#cl' + id)
.prepend(div)
// Setup the submit handler for the reply form.
.find('#rf' + id)
.submit(function(event) {
event.preventDefault();
addComment($('#rf' + id));
closeReply(id);
})
.find('input[type=button]')
.click(function() {
closeReply(id);
});
div.slideDown('fast', function() {
$('#rf' + id).find('textarea').focus();
});
}
/**
* Close the reply form opened with openReply.
*/
function closeReply(id) {
// Remove the reply div from the DOM.
$('#rd' + id).slideUp('fast', function() {
$(this).remove();
});
// Swap out the hide link for the reply link
$('#cr' + id).hide();
$('#rl' + id).show();
}
/**
* Recursively sort a tree of comments using the comp comparator.
*/
function sortComments(comments) {
comments.sort(comp);
$.each(comments, function() {
this.children = sortComments(this.children);
});
return comments;
}
/**
* Get the children comments from a ul. If recursive is true,
* recursively include childrens' children.
*/
function getChildren(ul, recursive) {
var children = [];
ul.children().children("[id^='cd']")
.each(function() {
var comment = $(this).data('comment');
if (recursive)
comment.children = getChildren($(this).find('#cl' + comment.id), true);
children.push(comment);
});
return children;
}
/** Create a div to display a comment in. */
function createCommentDiv(comment) {
if (!comment.displayed && !opts.moderator) {
return $('<div class="moderate">Thank you! Your comment will show up '
+ 'once it is has been approved by a moderator.</div>');
}
// Prettify the comment rating.
comment.pretty_rating = comment.rating + ' point' +
(comment.rating == 1 ? '' : 's');
// Make a class (for displaying not yet moderated comments differently)
comment.css_class = comment.displayed ? '' : ' moderate';
// Create a div for this comment.
var context = $.extend({}, opts, comment);
var div = $(renderTemplate(commentTemplate, context));
// If the user has voted on this comment, highlight the correct arrow.
if (comment.vote) {
var direction = (comment.vote == 1) ? 'u' : 'd';
div.find('#' + direction + 'v' + comment.id).hide();
div.find('#' + direction + 'u' + comment.id).show();
}
if (opts.moderator || comment.text != '[deleted]') {
div.find('a.reply').show();
if (comment.proposal_diff)
div.find('#sp' + comment.id).show();
if (opts.moderator && !comment.displayed)
div.find('#cm' + comment.id).show();
if (opts.moderator || (opts.username == comment.username))
div.find('#dc' + comment.id).show();
}
return div;
}
/**
* A simple template renderer. Placeholders such as <%id%> are replaced
* by context['id'] with items being escaped. Placeholders such as <#id#>
* are not escaped.
*/
function renderTemplate(template, context) {
var esc = $(document.createElement('div'));
function handle(ph, escape) {
var cur = context;
$.each(ph.split('.'), function() {
cur = cur[this];
});
return escape ? esc.text(cur || "").html() : cur;
}
return template.replace(/<([%#])([\w\.]*)\1>/g, function() {
return handle(arguments[2], arguments[1] == '%' ? true : false);
});
}
/** Flash an error message briefly. */
function showError(message) {
$(document.createElement('div')).attr({'class': 'popup-error'})
.append($(document.createElement('div'))
.attr({'class': 'error-message'}).text(message))
.appendTo('body')
.fadeIn("slow")
.delay(2000)
.fadeOut("slow");
}
/** Add a link the user uses to open the comments popup. */
$.fn.comment = function() {
return this.each(function() {
var id = $(this).attr('id').substring(1);
var count = COMMENT_METADATA[id];
var title = count + ' comment' + (count == 1 ? '' : 's');
var image = count > 0 ? opts.commentBrightImage : opts.commentImage;
var addcls = count == 0 ? ' nocomment' : '';
$(this)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-open' + addcls,
id: 'ao' + id
})
.append($(document.createElement('img')).attr({
src: image,
alt: 'comment',
title: title
}))
.click(function(event) {
event.preventDefault();
show($(this).attr('id').substring(2));
})
)
.append(
$(document.createElement('a')).attr({
href: '#',
'class': 'sphinx-comment-close hidden',
id: 'ah' + id
})
.append($(document.createElement('img')).attr({
src: opts.closeCommentImage,
alt: 'close',
title: 'close'
}))
.click(function(event) {
event.preventDefault();
hide($(this).attr('id').substring(2));
})
);
});
};
var opts = {
processVoteURL: '/_process_vote',
addCommentURL: '/_add_comment',
getCommentsURL: '/_get_comments',
acceptCommentURL: '/_accept_comment',
deleteCommentURL: '/_delete_comment',
commentImage: '/static/_static/comment.png',
closeCommentImage: '/static/_static/comment-close.png',
loadingImage: '/static/_static/ajax-loader.gif',
commentBrightImage: '/static/_static/comment-bright.png',
upArrow: '/static/_static/up.png',
downArrow: '/static/_static/down.png',
upArrowPressed: '/static/_static/up-pressed.png',
downArrowPressed: '/static/_static/down-pressed.png',
voting: false,
moderator: false
};
if (typeof COMMENT_OPTIONS != "undefined") {
opts = jQuery.extend(opts, COMMENT_OPTIONS);
}
var popupTemplate = '\
<div class="sphinx-comments" id="sc<%id%>">\
<p class="sort-options">\
Sort by:\
<a href="#" class="sort-option byrating">best rated</a>\
<a href="#" class="sort-option byascage">newest</a>\
<a href="#" class="sort-option byage">oldest</a>\
</p>\
<div class="comment-header">Comments</div>\
<div class="comment-loading" id="cn<%id%>">\
loading comments... <img src="<%loadingImage%>" alt="" /></div>\
<ul id="cl<%id%>" class="comment-ul"></ul>\
<div id="ca<%id%>">\
<p class="add-a-comment">Add a comment\
(<a href="#" class="comment-markup" id="ab<%id%>">markup</a>):</p>\
<div class="comment-markup-box" id="mb<%id%>">\
reStructured text markup: <i>*emph*</i>, <b>**strong**</b>, \
<tt>``code``</tt>, \
code blocks: <tt>::</tt> and an indented block after blank line</div>\
<form method="post" id="cf<%id%>" class="comment-form" action="">\
<textarea name="comment" cols="80"></textarea>\
<p class="propose-button">\
<a href="#" id="pc<%id%>" class="show-propose-change">\
Propose a change ▹\
</a>\
<a href="#" id="hc<%id%>" class="hide-propose-change">\
Propose a change ▿\
</a>\
</p>\
<textarea name="proposal" id="pt<%id%>" cols="80"\
spellcheck="false"></textarea>\
<input type="submit" value="Add comment" />\
<input type="hidden" name="node" value="<%id%>" />\
<input type="hidden" name="parent" value="" />\
</form>\
</div>\
</div>';
var commentTemplate = '\
<div id="cd<%id%>" class="sphinx-comment<%css_class%>">\
<div class="vote">\
<div class="arrow">\
<a href="#" id="uv<%id%>" class="vote" title="vote up">\
<img src="<%upArrow%>" />\
</a>\
<a href="#" id="uu<%id%>" class="un vote" title="vote up">\
<img src="<%upArrowPressed%>" />\
</a>\
</div>\
<div class="arrow">\
<a href="#" id="dv<%id%>" class="vote" title="vote down">\
<img src="<%downArrow%>" id="da<%id%>" />\
</a>\
<a href="#" id="du<%id%>" class="un vote" title="vote down">\
<img src="<%downArrowPressed%>" />\
</a>\
</div>\
</div>\
<div class="comment-content">\
<p class="tagline comment">\
<span class="user-id"><%username%></span>\
<span class="rating"><%pretty_rating%></span>\
<span class="delta"><%time.delta%></span>\
</p>\
<div class="comment-text comment"><#text#></div>\
<p class="comment-opts comment">\
<a href="#" class="reply hidden" id="rl<%id%>">reply ▹</a>\
<a href="#" class="close-reply" id="cr<%id%>">reply ▿</a>\
<a href="#" id="sp<%id%>" class="show-proposal">proposal ▹</a>\
<a href="#" id="hp<%id%>" class="hide-proposal">proposal ▿</a>\
<a href="#" id="dc<%id%>" class="delete-comment hidden">delete</a>\
<span id="cm<%id%>" class="moderation hidden">\
<a href="#" id="ac<%id%>" class="accept-comment">accept</a>\
</span>\
</p>\
<pre class="proposal" id="pr<%id%>">\
<#proposal_diff#>\
</pre>\
<ul class="comment-children" id="cl<%id%>"></ul>\
</div>\
<div class="clearleft"></div>\
</div>\
</div>';
var replyTemplate = '\
<li>\
<div class="reply-div" id="rd<%id%>">\
<form id="rf<%id%>">\
<textarea name="comment" cols="80"></textarea>\
<input type="submit" value="Add reply" />\
<input type="button" value="Cancel" />\
<input type="hidden" name="parent" value="<%id%>" />\
<input type="hidden" name="node" value="" />\
</form>\
</div>\
</li>';
$(document).ready(function() {
init();
});
})(jQuery);
$(document).ready(function() {
// add comment anchors for all paragraphs that are commentable
$('.sphinx-has-comment').comment();
// highlight search words in search results
$("div.context").each(function() {
var params = $.getQueryParameters();
var terms = (params.q) ? params.q[0].split(/\s+/) : [];
var result = $(this);
$.each(terms, function() {
result.highlightText(this.toLowerCase(), 'highlighted');
});
});
// directly open comment window if requested
var anchor = document.location.hash;
if (anchor.substring(0, 9) == '#comment-') {
$('#ao' + anchor.substring(9)).click();
document.location.hash = '#s' + anchor.substring(9);
}
}); | PypiClean |
/YooMoney-0.1.0.tar.gz/YooMoney-0.1.0/yoomoney/history/history.py | from datetime import datetime
from typing import Optional
import requests
import json
from yoomoney.operation.operation import Operation
from yoomoney.exceptions import (
IllegalParamType,
IllegalParamStartRecord,
IllegalParamRecords,
IllegalParamLabel,
IllegalParamFromDate,
IllegalParamTillDate,
TechnicalError
)
class History:
def __init__(self,
base_url: str = None,
token: str = None,
method: str = None,
type: str = None,
label: str = None,
from_date: Optional[datetime] = None,
till_date: Optional[datetime] = None,
start_record: str = None,
records: int = None,
details: bool = None,
):
self.__private_method = method
self.__private_base_url = base_url
self.__private_token = token
self.type = type
self.label = label
try:
if from_date is not None:
from_date = "{Y}-{m}-{d}T{H}:{M}:{S}".format(
Y=str(from_date.year),
m=str(from_date.month),
d=str(from_date.day),
H=str(from_date.hour),
M=str(from_date.minute),
S=str(from_date.second)
)
except:
raise IllegalParamFromDate()
try:
if till_date is not None:
till_date = "{Y}-{m}-{d}T{H}:{M}:{S}".format(
Y=str(till_date.year),
m=str(till_date.month),
d=str(till_date.day),
H=str(till_date.hour),
M=str(till_date.minute),
S=str(till_date.second)
)
except:
IllegalParamTillDate()
self.from_date = from_date
self.till_date = till_date
self.start_record = start_record
self.records = records
self.details = details
data = self._request()
if "error" in data:
if data["error"] == "illegal_param_type":
raise IllegalParamType()
elif data["error"] == "illegal_param_start_record":
raise IllegalParamStartRecord()
elif data["error"] == "illegal_param_records":
raise IllegalParamRecords()
elif data["error"] == "illegal_param_label":
raise IllegalParamLabel()
elif data["error"] == "illegal_param_from":
raise IllegalParamFromDate()
elif data["error"] == "illegal_param_till":
raise IllegalParamTillDate()
else:
raise TechnicalError()
self.next_record = None
if "next_record" in data:
self.next_record = data["next_record"]
self.operations = list()
for operation_data in data["operations"]:
param = {}
if "operation_id" in operation_data:
param["operation_id"] = operation_data["operation_id"]
else:
param["operation_id"] = None
if "status" in operation_data:
param["status"] = operation_data["status"]
else:
param["status"] = None
if "datetime" in operation_data:
param["datetime"] = datetime.strptime(str(operation_data["datetime"]).replace("T", " ").replace("Z", ""), '%Y-%m-%d %H:%M:%S')
else:
param["datetime"] = None
if "title" in operation_data:
param["title"] = operation_data["title"]
else:
param["title"] = None
if "pattern_id" in operation_data:
param["pattern_id"] = operation_data["pattern_id"]
else:
param["pattern_id"] = None
if "direction" in operation_data:
param["direction"] = operation_data["direction"]
else:
param["direction"] = None
if "amount" in operation_data:
param["amount"] = operation_data["amount"]
else:
param["amount"] = None
if "label" in operation_data:
param["label"] = operation_data["label"]
else:
param["label"] = None
if "type" in operation_data:
param["type"] = operation_data["type"]
else:
param["type"] = None
operation = Operation(
operation_id= param["operation_id"],
status=param["status"],
datetime=datetime.strptime(str(param["datetime"]).replace("T", " ").replace("Z", ""), '%Y-%m-%d %H:%M:%S'),
title=param["title"],
pattern_id=param["pattern_id"],
direction=param["direction"],
amount=param["amount"],
label=param["label"],
type=param["type"],
)
self.operations.append(operation)
def _request(self):
access_token = str(self.__private_token)
url = self.__private_base_url + self.__private_method
headers = {
'Authorization': 'Bearer ' + str(access_token),
'Content-Type': 'application/x-www-form-urlencoded'
}
payload = {}
if self.type is not None:
payload["type"] = self.type
if self.label is not None:
payload["label"] = self.label
if self.from_date is not None:
payload["from"] = self.from_date
if self.till_date is not None:
payload["till"] = self.till_date
if self.start_record is not None:
payload["start_record"] = self.start_record
if self.records is not None:
payload["records"] = self.records
if self.details is not None:
payload["details"] = self.details
response = requests.request("POST", url, headers=headers, data=payload)
return response.json() | PypiClean |
/uniohomeassistant-0.1.3.tar.gz/uniohomeassistant-0.1.3/homeassistant/bootstrap.py | import asyncio
import contextlib
from datetime import datetime
import logging
import logging.handlers
import os
import sys
import threading
from time import monotonic
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
import voluptuous as vol
import yarl
from homeassistant import config as conf_util, config_entries, core, loader
from homeassistant.components import http
from homeassistant.const import REQUIRED_NEXT_PYTHON_DATE, REQUIRED_NEXT_PYTHON_VER
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import (
DATA_SETUP,
DATA_SETUP_STARTED,
async_set_domains_to_be_loaded,
async_setup_component,
)
from homeassistant.util.async_ import gather_with_concurrency
from homeassistant.util.logging import async_activate_log_queue_handler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
if TYPE_CHECKING:
from .runner import RuntimeConfig
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
LOG_SLOW_STARTUP_INTERVAL = 60
STAGE_1_TIMEOUT = 120
STAGE_2_TIMEOUT = 300
WRAP_UP_TIMEOUT = 300
COOLDOWN_TIME = 60
MAX_LOAD_CONCURRENTLY = 6
DEBUGGER_INTEGRATIONS = {"debugpy", "ptvsd"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {
# Set log levels
"logger",
# Error logging
"system_log",
"sentry",
# To record data
"recorder",
}
STAGE_1_INTEGRATIONS = {
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
# Ensure supervisor is available
"hassio",
# Get the frontend up and running as soon
# as possible so problem integrations can
# be removed
"frontend",
}
async def async_setup_hass(
runtime_config: "RuntimeConfig",
) -> Optional[core.HomeAssistant]:
"""Set up Home Assistant."""
hass = core.HomeAssistant()
hass.config.config_dir = runtime_config.config_dir
async_enable_logging(
hass,
runtime_config.verbose,
runtime_config.log_rotate_days,
runtime_config.log_file,
runtime_config.log_no_color,
)
hass.config.skip_pip = runtime_config.skip_pip
if runtime_config.skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. This may cause issues"
)
if not await conf_util.async_ensure_config_exists(hass):
_LOGGER.error("Error getting configuration path")
return None
_LOGGER.info("Config directory: %s", runtime_config.config_dir)
config_dict = None
basic_setup_success = False
safe_mode = runtime_config.safe_mode
if not safe_mode:
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(
"Failed to parse configuration.yaml: %s. Activating safe mode",
err,
)
else:
if not is_virtual_env():
await async_mount_local_lib_path(runtime_config.config_dir)
basic_setup_success = (
await async_from_config_dict(config_dict, hass) is not None
)
finally:
clear_secret_cache()
if config_dict is None:
safe_mode = True
elif not basic_setup_success:
_LOGGER.warning("Unable to set up core integrations. Activating safe mode")
safe_mode = True
elif (
"frontend" in hass.data.get(DATA_SETUP, {})
and "frontend" not in hass.config.components
):
_LOGGER.warning("Detected that frontend did not load. Activating safe mode")
# Ask integrations to shut down. It's messy but we can't
# do a clean stop without knowing what is broken
with contextlib.suppress(asyncio.TimeoutError):
async with hass.timeout.async_timeout(10):
await hass.async_stop()
safe_mode = True
old_config = hass.config
hass = core.HomeAssistant()
hass.config.skip_pip = old_config.skip_pip
hass.config.internal_url = old_config.internal_url
hass.config.external_url = old_config.external_url
hass.config.config_dir = old_config.config_dir
if safe_mode:
_LOGGER.info("Starting in safe mode")
hass.config.safe_mode = True
http_conf = (await http.async_get_last_config(hass)) or {}
await async_from_config_dict(
{"safe_mode": {}, "http": http_conf},
hass,
)
if runtime_config.open_ui:
hass.add_job(open_hass_ui, hass)
return hass
def open_hass_ui(hass: core.HomeAssistant) -> None:
"""Open the UI."""
import webbrowser # pylint: disable=import-outside-toplevel
if hass.config.api is None or "frontend" not in hass.config.components:
_LOGGER.warning("Cannot launch the UI because frontend not loaded")
return
scheme = "https" if hass.config.api.use_ssl else "http"
url = str(
yarl.URL.build(scheme=scheme, host="127.0.0.1", port=hass.config.api.port)
)
if not webbrowser.open(url):
_LOGGER.warning(
"Unable to open the Home Assistant UI in a browser. Open it yourself at %s",
url,
)
async def async_from_config_dict(
config: ConfigType, hass: core.HomeAssistant
) -> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = monotonic()
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error("Home Assistant core failed to initialize. ")
return None
_LOGGER.debug("Home Assistant core initialized")
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
await _async_set_up_integrations(hass, config)
stop = monotonic()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if REQUIRED_NEXT_PYTHON_DATE and sys.version_info[:3] < REQUIRED_NEXT_PYTHON_VER:
msg = (
"Support for the running Python version "
f"{'.'.join(str(x) for x in sys.version_info[:3])} is deprecated and will "
f"be removed in the first release after {REQUIRED_NEXT_PYTHON_DATE}. "
"Please upgrade Python to "
f"{'.'.join(str(x) for x in REQUIRED_NEXT_PYTHON_VER)} or "
"higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
# pylint: disable=import-outside-toplevel
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this will result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
sys.excepthook = lambda *args: logging.getLogger(None).exception(
"Uncaught exception", exc_info=args # type: ignore
)
if sys.version_info[:2] >= (3, 8):
threading.excepthook = lambda args: logging.getLogger(None).exception(
"Uncaught thread exception",
exc_info=(args.exc_type, args.exc_value, args.exc_traceback),
)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = (
logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
logger = logging.getLogger("")
logger.addHandler(err_handler)
logger.setLevel(logging.INFO if verbose else logging.WARNING)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async_activate_log_queue_handler(hass)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = {key.split(" ")[0] for key in config if key != core.DOMAIN}
# Add config entry domains
if not hass.config.safe_mode:
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_log_pending_setups(
domains: Set[str], setup_started: Dict[str, datetime]
) -> None:
"""Periodic log of setups that are pending for longer than LOG_SLOW_STARTUP_INTERVAL."""
while True:
await asyncio.sleep(LOG_SLOW_STARTUP_INTERVAL)
remaining = [domain for domain in domains if domain in setup_started]
if remaining:
_LOGGER.warning(
"Waiting on integrations to complete setup: %s",
", ".join(remaining),
)
async def async_setup_multi_components(
hass: core.HomeAssistant,
domains: Set[str],
config: Dict[str, Any],
setup_started: Dict[str, datetime],
) -> None:
"""Set up multiple domains. Log on failure."""
futures = {
domain: hass.async_create_task(async_setup_component(hass, domain, config))
for domain in domains
}
log_task = asyncio.create_task(_async_log_pending_setups(domains, setup_started))
await asyncio.wait(futures.values())
log_task.cancel()
errors = [domain for domain in domains if futures[domain].exception()]
for domain in errors:
exception = futures[domain].exception()
assert exception is not None
_LOGGER.error(
"Error setting up integration %s - received exception",
domain,
exc_info=(type(exception), exception, exception.__traceback__),
)
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: Dict[str, Any]
) -> None:
"""Set up all the integrations."""
setup_started = hass.data[DATA_SETUP_STARTED] = {}
domains_to_setup = _get_domains(hass, config)
# Resolve all dependencies so we know all integrations
# that will have to be loaded and start rightaway
integration_cache: Dict[str, loader.Integration] = {}
to_resolve = domains_to_setup
while to_resolve:
old_to_resolve = to_resolve
to_resolve = set()
integrations_to_process = [
int_or_exc
for int_or_exc in await gather_with_concurrency(
loader.MAX_LOAD_CONCURRENTLY,
*(
loader.async_get_integration(hass, domain)
for domain in old_to_resolve
),
return_exceptions=True,
)
if isinstance(int_or_exc, loader.Integration)
]
resolve_dependencies_tasks = [
itg.resolve_dependencies()
for itg in integrations_to_process
if not itg.all_dependencies_resolved
]
if resolve_dependencies_tasks:
await asyncio.gather(*resolve_dependencies_tasks)
for itg in integrations_to_process:
integration_cache[itg.domain] = itg
for dep in itg.all_dependencies:
if dep in domains_to_setup:
continue
domains_to_setup.add(dep)
to_resolve.add(dep)
_LOGGER.info("Domains to be set up: %s", domains_to_setup)
logging_domains = domains_to_setup & LOGGING_INTEGRATIONS
# Load logging as soon as possible
if logging_domains:
_LOGGER.info("Setting up logging: %s", logging_domains)
await async_setup_multi_components(hass, logging_domains, config, setup_started)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains_to_setup & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Setting up debuggers: %s", debuggers)
await async_setup_multi_components(hass, debuggers, config, setup_started)
# calculate what components to setup in what stage
stage_1_domains = set()
# Find all dependencies of any dependency of any stage 1 integration that
# we plan on loading and promote them to stage 1
deps_promotion = STAGE_1_INTEGRATIONS
while deps_promotion:
old_deps_promotion = deps_promotion
deps_promotion = set()
for domain in old_deps_promotion:
if domain not in domains_to_setup or domain in stage_1_domains:
continue
stage_1_domains.add(domain)
dep_itg = integration_cache.get(domain)
if dep_itg is None:
continue
deps_promotion.update(dep_itg.all_dependencies)
stage_2_domains = domains_to_setup - logging_domains - debuggers - stage_1_domains
# Kick off loading the registries. They don't need to be awaited.
asyncio.create_task(hass.helpers.device_registry.async_get_registry())
asyncio.create_task(hass.helpers.entity_registry.async_get_registry())
asyncio.create_task(hass.helpers.area_registry.async_get_registry())
# Start setup
if stage_1_domains:
_LOGGER.info("Setting up stage 1: %s", stage_1_domains)
try:
async with hass.timeout.async_timeout(
STAGE_1_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(
hass, stage_1_domains, config, setup_started
)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 1 - moving forward")
# Enables after dependencies
async_set_domains_to_be_loaded(hass, stage_2_domains)
if stage_2_domains:
_LOGGER.info("Setting up stage 2: %s", stage_2_domains)
try:
async with hass.timeout.async_timeout(
STAGE_2_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(
hass, stage_2_domains, config, setup_started
)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 2 - moving forward")
# Wrap up startup
_LOGGER.debug("Waiting for startup to wrap up")
try:
async with hass.timeout.async_timeout(WRAP_UP_TIMEOUT, cool_down=COOLDOWN_TIME):
await hass.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for bootstrap - moving forward") | PypiClean |
/xs_transformers-1.0.7-py3-none-any.whl/xs_transformers/hf_argparser.py |
import dataclasses
import json
import sys
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Dict, Iterable, NewType, Optional, Tuple, Union, get_type_hints
import yaml
DataClass = NewType("DataClass", Any)
DataClassType = NewType("DataClassType", Any)
# From https://stackoverflow.com/questions/15008758/parsing-boolean-values-with-argparse
def string_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
f"Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive)."
)
class HfArgumentParser(ArgumentParser):
"""
This subclass of `argparse.ArgumentParser` uses type hints on dataclasses to generate arguments.
The class is designed to play well with the native argparse. In particular, you can add more (non-dataclass backed)
arguments to the parser after initialization and you'll get the output back after parsing as an additional
namespace. Optional: To create sub argument groups use the `_argument_group_name` attribute in the dataclass.
"""
dataclass_types: Iterable[DataClassType]
def __init__(
self, dataclass_types: Union[DataClassType, Iterable[DataClassType]], **kwargs
):
"""
Args:
dataclass_types:
Dataclass type, or list of dataclass types for which we will "fill" instances with the parsed args.
kwargs:
(Optional) Passed to `argparse.ArgumentParser()` in the regular way.
"""
# To make the default appear when using --help
if "formatter_class" not in kwargs:
kwargs["formatter_class"] = ArgumentDefaultsHelpFormatter
super().__init__(**kwargs)
if dataclasses.is_dataclass(dataclass_types):
dataclass_types = [dataclass_types]
self.dataclass_types = list(dataclass_types)
for dtype in self.dataclass_types:
self._add_dataclass_arguments(dtype)
@staticmethod
def _parse_dataclass_field(parser: ArgumentParser, field: dataclasses.Field):
field_name = f"--{field.name}"
kwargs = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type, str):
raise RuntimeError(
"Unresolved type detected, which should have been done with the help of "
"`typing.get_type_hints` method by default"
)
origin_type = getattr(field.type, "__origin__", field.type)
if origin_type is Union:
if str not in field.type.__args__ and (
len(field.type.__args__) != 2 or type(None) not in field.type.__args__
):
raise ValueError(
"Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"
" the argument parser only supports one type per argument."
f" Problem encountered in field '{field.name}'."
)
if type(None) not in field.type.__args__:
# filter `str` in Union
field.type = (
field.type.__args__[0]
if field.type.__args__[1] == str
else field.type.__args__[1]
)
origin_type = getattr(field.type, "__origin__", field.type)
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
field.type = (
field.type.__args__[0]
if isinstance(None, field.type.__args__[1])
else field.type.__args__[1]
)
origin_type = getattr(field.type, "__origin__", field.type)
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
bool_kwargs = {}
if isinstance(field.type, type) and issubclass(field.type, Enum):
kwargs["choices"] = [x.value for x in field.type]
kwargs["type"] = type(kwargs["choices"][0])
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
else:
kwargs["required"] = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
bool_kwargs = copy(kwargs)
# Hack because type=bool in argparse does not behave as we want.
kwargs["type"] = string_to_bool
if field.type is bool or (
field.default is not None and field.default is not dataclasses.MISSING
):
# Default value is False if we have no default when of type bool.
default = (
False if field.default is dataclasses.MISSING else field.default
)
# This is the value that will get picked if we don't include --field_name in any way
kwargs["default"] = default
# This tells argparse we accept 0 or 1 value after --field_name
kwargs["nargs"] = "?"
# This is the value that will get picked if we do --field_name (without value)
kwargs["const"] = True
elif isclass(origin_type) and issubclass(origin_type, list):
kwargs["type"] = field.type.__args__[0]
kwargs["nargs"] = "+"
if field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
elif field.default is dataclasses.MISSING:
kwargs["required"] = True
else:
kwargs["type"] = field.type
if field.default is not dataclasses.MISSING:
kwargs["default"] = field.default
elif field.default_factory is not dataclasses.MISSING:
kwargs["default"] = field.default_factory()
else:
kwargs["required"] = True
parser.add_argument(field_name, **kwargs)
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (
field.type is bool or field.type == Optional[bool]
):
bool_kwargs["default"] = False
parser.add_argument(
f"--no_{field.name}",
action="store_false",
dest=field.name,
**bool_kwargs,
)
def _add_dataclass_arguments(self, dtype: DataClassType):
if hasattr(dtype, "_argument_group_name"):
parser = self.add_argument_group(dtype._argument_group_name)
else:
parser = self
try:
type_hints: Dict[str, type] = get_type_hints(dtype)
except NameError:
raise RuntimeError(
f"Type resolution failed for f{dtype}. Try declaring the class in global scope or "
"removing line of `from __future__ import annotations` which opts in Postponed "
"Evaluation of Annotations (PEP 563)"
)
for field in dataclasses.fields(dtype):
if not field.init:
continue
field.type = type_hints[field.name]
self._parse_dataclass_field(parser, field)
def parse_args_into_dataclasses(
self,
args=None,
return_remaining_strings=False,
look_for_args_file=True,
args_filename=None,
) -> Tuple[DataClass, ...]:
"""
Parse command-line args into instances of the specified dataclass types.
This relies on argparse's `ArgumentParser.parse_known_args`. See the doc at:
docs.python.org/3.7/library/argparse.html#argparse.ArgumentParser.parse_args
Args:
args:
List of strings to parse. The default is taken from sys.argv. (same as argparse.ArgumentParser)
return_remaining_strings:
If true, also return a list of remaining argument strings.
look_for_args_file:
If true, will look for a ".args" file with the same base name as the entry point script for this
process, and will append its potential content to the command line args.
args_filename:
If not None, will uses this file instead of the ".args" file specified in the previous argument.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.abspath
- if applicable, an additional namespace for more (non-dataclass backed) arguments added to the parser
after initialization.
- The potential list of remaining argument strings. (same as argparse.ArgumentParser.parse_known_args)
"""
if args_filename or (look_for_args_file and len(sys.argv)):
if args_filename:
args_file = Path(args_filename)
else:
args_file = Path(sys.argv[0]).with_suffix(".args")
if args_file.exists():
fargs = args_file.read_text().split()
args = fargs + args if args is not None else fargs + sys.argv[1:]
# in case of duplicate arguments the first one has precedence
# so we append rather than prepend.
namespace, remaining_args = self.parse_known_args(args=args)
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in vars(namespace).items() if k in keys}
for k in keys:
delattr(namespace, k)
obj = dtype(**inputs)
outputs.append(obj)
if len(namespace.__dict__) > 0:
# additional namespace.
outputs.append(namespace)
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(
f"Some specified arguments are not used by the HfArgumentParser: {remaining_args}"
)
return (*outputs,)
def parse_dict(
self, args: Dict[str, Any], allow_extra_keys: bool = False
) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead uses a dict and populating the dataclass
types.
Args:
args (`dict`):
dict containing config values
allow_extra_keys (`bool`, *optional*, defaults to `False`):
Defaults to False. If False, will raise an exception if the dict contains keys that are not parsed.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.
"""
unused_keys = set(args.keys())
outputs = []
for dtype in self.dataclass_types:
keys = {f.name for f in dataclasses.fields(dtype) if f.init}
inputs = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys())
obj = dtype(**inputs)
outputs.append(obj)
if not allow_extra_keys and unused_keys:
raise ValueError(
f"Some keys are not used by the HfArgumentParser: {sorted(unused_keys)}"
)
return tuple(outputs)
def parse_json_file(
self, json_file: str, allow_extra_keys: bool = False
) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
Args:
json_file (`str` or `os.PathLike`):
File name of the json file to parse
allow_extra_keys (`bool`, *optional*, defaults to `False`):
Defaults to False. If False, will raise an exception if the json file contains keys that are not
parsed.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.
"""
open_json_file = open(Path(json_file))
data = json.loads(open_json_file.read())
outputs = self.parse_dict(data, allow_extra_keys=allow_extra_keys)
return tuple(outputs)
def parse_yaml_file(
self, yaml_file: str, allow_extra_keys: bool = False
) -> Tuple[DataClass, ...]:
"""
Alternative helper method that does not use `argparse` at all, instead loading a json file and populating the
dataclass types.
Args:
yaml_file (`str` or `os.PathLike`):
File name of the yaml file to parse
allow_extra_keys (`bool`, *optional*, defaults to `False`):
Defaults to False. If False, will raise an exception if the json file contains keys that are not
parsed.
Returns:
Tuple consisting of:
- the dataclass instances in the same order as they were passed to the initializer.
"""
outputs = self.parse_dict(
yaml.safe_load(Path(yaml_file).read_text()),
allow_extra_keys=allow_extra_keys,
)
return tuple(outputs) | PypiClean |
/idg_metadata_client-1.0.2.0-py3-none-any.whl/metadata/generated/schema/entity/data/chart.py |
from __future__ import annotations
from enum import Enum
from typing import List, Optional
from pydantic import BaseModel, Extra, Field
from ...type import basic, entityHistory, entityReference, tagLabel, usageDetails
from ..services import dashboardService
class ChartType(Enum):
Line = 'Line'
Table = 'Table'
Bar = 'Bar'
Area = 'Area'
Pie = 'Pie'
Histogram = 'Histogram'
Scatter = 'Scatter'
Text = 'Text'
BoxPlot = 'BoxPlot'
Other = 'Other'
class Chart(BaseModel):
class Config:
extra = Extra.forbid
id: basic.Uuid = Field(
..., description='Unique identifier that identifies a chart instance.'
)
name: basic.EntityName = Field(..., description='Name that identifies this Chart.')
displayName: Optional[str] = Field(
None,
description='Display Name that identifies this Chart. It could be title or label from the source services.',
)
fullyQualifiedName: Optional[basic.FullyQualifiedEntityName] = Field(
None,
description="A unique name that identifies a dashboard in the format 'ServiceName.ChartName'.",
)
description: Optional[basic.Markdown] = Field(
None, description='Description of the dashboard, what it is, and how to use it.'
)
version: Optional[entityHistory.EntityVersion] = Field(
None, description='Metadata version of the entity.'
)
updatedAt: Optional[basic.Timestamp] = Field(
None,
description='Last update time corresponding to the new version of the entity in Unix epoch time milliseconds.',
)
updatedBy: Optional[str] = Field(None, description='User who made the update.')
chartType: Optional[ChartType] = None
chartUrl: Optional[str] = Field(
None, description='Chart URL suffix from its service.'
)
href: Optional[basic.Href] = Field(
None, description='Link to the resource corresponding to this entity.'
)
owner: Optional[entityReference.EntityReference] = Field(
None, description='Owner of this dashboard.'
)
followers: Optional[entityReference.EntityReferenceList] = Field(
None, description='Followers of this chart.'
)
tags: Optional[List[tagLabel.TagLabel]] = Field(
None, description='Tags for this chart.'
)
service: entityReference.EntityReference = Field(
..., description='Link to service where this dashboard is hosted in.'
)
serviceType: Optional[dashboardService.DashboardServiceType] = Field(
None, description='Service type where this chart is hosted in.'
)
usageSummary: Optional[usageDetails.UsageDetails] = Field(
None, description='Latest usage information for this database.'
)
changeDescription: Optional[entityHistory.ChangeDescription] = Field(
None, description='Change that lead to this version of the entity.'
)
deleted: Optional[bool] = Field(
False, description='When `true` indicates the entity has been soft deleted.'
) | PypiClean |
/superset-growth-0.26.3.tar.gz/superset-growth-0.26.3/superset/static/assets/src/visualizations/horizon.js | import d3 from 'd3';
import './horizon.css';
const horizonChart = function () {
let colors = [
'#313695',
'#4575b4',
'#74add1',
'#abd9e9',
'#fee090',
'#fdae61',
'#f46d43',
'#d73027',
];
let height = 30;
const y = d3.scale.linear().range([0, height]);
let bands = colors.length >> 1; // number of bands in each direction (positive / negative)
let width = 1000;
let offsetX = 0;
let spacing = 0;
let mode = 'offset';
let axis;
let title;
let extent; // the extent is derived from the data, unless explicitly set via .extent([min, max])
let x;
let canvas;
function my(data) {
const horizon = d3.select(this);
const step = width / data.length;
horizon.append('span')
.attr('class', 'title')
.text(title);
horizon.append('span')
.attr('class', 'value');
canvas = horizon.append('canvas');
canvas
.attr('width', width)
.attr('height', height);
const context = canvas.node().getContext('2d');
context.imageSmoothingEnabled = false;
// update the y scale, based on the data extents
const ext = extent || d3.extent(data, d => d.y);
const max = Math.max(-ext[0], ext[1]);
y.domain([0, max]);
// x = d3.scaleTime().domain[];
axis = d3.svg.axis(x).ticks(5);
context.clearRect(0, 0, width, height);
// context.translate(0.5, 0.5);
// the data frame currently being shown:
const startIndex = Math.floor(Math.max(0, -(offsetX / step)));
const endIndex = Math.floor(Math.min(data.length, startIndex + (width / step)));
// skip drawing if there's no data to be drawn
if (startIndex > data.length) {
return;
}
// we are drawing positive & negative bands separately to avoid mutating canvas state
// http://www.html5rocks.com/en/tutorials/canvas/performance/
let negative = false;
// draw positive bands
let value;
let bExtents;
for (let b = 0; b < bands; b += 1) {
context.fillStyle = colors[bands + b];
// Adjust the range based on the current band index.
bExtents = (b + 1 - bands) * height;
y.range([bands * height + bExtents, bExtents]);
// only the current data frame is being drawn i.e. what's visible:
for (let i = startIndex; i < endIndex; i++) {
value = data[i].y;
if (value <= 0) { negative = true; continue; }
if (value === undefined) {
continue;
}
context.fillRect(offsetX + i * step, y(value), step + 1, y(0) - y(value));
}
}
// draw negative bands
if (negative) {
// mirror the negative bands, by flipping the canvas
if (mode === 'offset') {
context.translate(0, height);
context.scale(1, -1);
}
for (let b = 0; b < bands; b++) {
context.fillStyle = colors[bands - b - 1];
// Adjust the range based on the current band index.
bExtents = (b + 1 - bands) * height;
y.range([bands * height + bExtents, bExtents]);
// only the current data frame is being drawn i.e. what's visible:
for (let ii = startIndex; ii < endIndex; ii++) {
value = data[ii].y;
if (value >= 0) {
continue;
}
context.fillRect(offsetX + ii * step, y(-value), step + 1, y(0) - y(-value));
}
}
}
}
my.axis = function (_) {
if (!arguments.length) { return axis; }
axis = _;
return my;
};
my.title = function (_) {
if (!arguments.length) { return title; }
title = _;
return my;
};
my.canvas = function (_) {
if (!arguments.length) { return canvas; }
canvas = _;
return my;
};
// Array of colors representing the number of bands
my.colors = function (_) {
if (!arguments.length) {
return colors;
}
colors = _;
// update the number of bands
bands = colors.length >> 1;
return my;
};
my.height = function (_) {
if (!arguments.length) { return height; }
height = _;
return my;
};
my.width = function (_) {
if (!arguments.length) { return width; }
width = _;
return my;
};
my.spacing = function (_) {
if (!arguments.length) { return spacing; }
spacing = _;
return my;
};
// mirror or offset
my.mode = function (_) {
if (!arguments.length) { return mode; }
mode = _;
return my;
};
my.extent = function (_) {
if (!arguments.length) { return extent; }
extent = _;
return my;
};
my.offsetX = function (_) {
if (!arguments.length) { return offsetX; }
offsetX = _;
return my;
};
return my;
};
function horizonViz(slice, payload) {
const fd = slice.formData;
const div = d3.select(slice.selector);
div.selectAll('*').remove();
let extent;
if (fd.horizon_color_scale === 'overall') {
let allValues = [];
payload.data.forEach(function (d) {
allValues = allValues.concat(d.values);
});
extent = d3.extent(allValues, d => d.y);
} else if (fd.horizon_color_scale === 'change') {
payload.data.forEach(function (series) {
const t0y = series.values[0].y; // value at time 0
series.values = series.values.map(d =>
Object.assign({}, d, { y: d.y - t0y }),
);
});
}
div.selectAll('.horizon')
.data(payload.data)
.enter()
.append('div')
.attr('class', 'horizon')
.each(function (d, i) {
horizonChart()
.height(fd.series_height)
.width(slice.width())
.extent(extent)
.title(d.key)
.call(this, d.values, i);
});
}
module.exports = horizonViz; | PypiClean |
/adapter_transformers-3.2.1-py3-none-any.whl/transformers/models/chinese_clip/processing_chinese_clip.py | import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class ChineseCLIPProcessor(ProcessorMixin):
r"""
Constructs a Chinese-CLIP processor which wraps a Chinese-CLIP image processor and a Chinese-CLIP tokenizer into a
single processor.
[`ChineseCLIPProcessor`] offers all the functionalities of [`ChineseCLIPImageProcessor`] and [`BertTokenizerFast`].
See the [`~ChineseCLIPProcessor.__call__`] and [`~ChineseCLIPProcessor.decode`] for more information.
Args:
image_processor ([`ChineseCLIPImageProcessor`]):
The image processor is a required input.
tokenizer ([`BertTokenizerFast`]):
The tokenizer is a required input.
"""
attributes = ["image_processor", "tokenizer"]
image_processor_class = "ChineseCLIPImageProcessor"
tokenizer_class = ("BertTokenizer", "BertTokenizerFast")
def __init__(self, image_processor=None, tokenizer=None, **kwargs):
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead.",
FutureWarning,
)
feature_extractor = kwargs.pop("feature_extractor")
image_processor = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`.")
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`.")
super().__init__(image_processor, tokenizer)
self.current_processor = self.image_processor
def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to BertTokenizerFast's [`~BertTokenizerFast.__call__`] if `text` is not `None` to encode
the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
of the above two methods for more information.
Args:
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. In case of a NumPy array/PyTorch tensor, each image should be of shape (C, H, W), where C is a
number of channels, H and W are image height and width.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
- `'jax'`: Return JAX `jnp.ndarray` objects.
Returns:
[`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
"""
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none.")
if text is not None:
encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)
if images is not None:
image_features = self.image_processor(images, return_tensors=return_tensors, **kwargs)
if text is not None and images is not None:
encoding["pixel_values"] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
def batch_decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
refer to the docstring of this method for more information.
"""
return self.tokenizer.batch_decode(*args, **kwargs)
def decode(self, *args, **kwargs):
"""
This method forwards all its arguments to BertTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
the docstring of this method for more information.
"""
return self.tokenizer.decode(*args, **kwargs)
@property
def model_input_names(self):
tokenizer_input_names = self.tokenizer.model_input_names
image_processor_input_names = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
@property
def feature_extractor_class(self):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
FutureWarning,
)
return self.image_processor_class | PypiClean |
/streamlit_ace-0.1.1-py3-none-any.whl/streamlit_ace/frontend/build/2a71eab9fa03631bb04242db6853c0fa.js | ace.define("ace/theme/tomorrow_night_bright",["require","exports","module","ace/lib/dom"],(function(r,o,e){o.isDark=!0,o.cssClass="ace-tomorrow-night-bright",o.cssText=".ace-tomorrow-night-bright .ace_gutter {background: #1a1a1a;color: #DEDEDE}.ace-tomorrow-night-bright .ace_print-margin {width: 1px;background: #1a1a1a}.ace-tomorrow-night-bright {background-color: #000000;color: #DEDEDE}.ace-tomorrow-night-bright .ace_cursor {color: #9F9F9F}.ace-tomorrow-night-bright .ace_marker-layer .ace_selection {background: #424242}.ace-tomorrow-night-bright.ace_multiselect .ace_selection.ace_start {box-shadow: 0 0 3px 0px #000000;}.ace-tomorrow-night-bright .ace_marker-layer .ace_step {background: rgb(102, 82, 0)}.ace-tomorrow-night-bright .ace_marker-layer .ace_bracket {margin: -1px 0 0 -1px;border: 1px solid #888888}.ace-tomorrow-night-bright .ace_marker-layer .ace_highlight {border: 1px solid rgb(110, 119, 0);border-bottom: 0;box-shadow: inset 0 -1px rgb(110, 119, 0);margin: -1px 0 0 -1px;background: rgba(255, 235, 0, 0.1)}.ace-tomorrow-night-bright .ace_marker-layer .ace_active-line {background: #2A2A2A}.ace-tomorrow-night-bright .ace_gutter-active-line {background-color: #2A2A2A}.ace-tomorrow-night-bright .ace_stack {background-color: rgb(66, 90, 44)}.ace-tomorrow-night-bright .ace_marker-layer .ace_selected-word {border: 1px solid #888888}.ace-tomorrow-night-bright .ace_invisible {color: #343434}.ace-tomorrow-night-bright .ace_keyword,.ace-tomorrow-night-bright .ace_meta,.ace-tomorrow-night-bright .ace_storage,.ace-tomorrow-night-bright .ace_storage.ace_type,.ace-tomorrow-night-bright .ace_support.ace_type {color: #C397D8}.ace-tomorrow-night-bright .ace_keyword.ace_operator {color: #70C0B1}.ace-tomorrow-night-bright .ace_constant.ace_character,.ace-tomorrow-night-bright .ace_constant.ace_language,.ace-tomorrow-night-bright .ace_constant.ace_numeric,.ace-tomorrow-night-bright .ace_keyword.ace_other.ace_unit,.ace-tomorrow-night-bright .ace_support.ace_constant,.ace-tomorrow-night-bright .ace_variable.ace_parameter {color: #E78C45}.ace-tomorrow-night-bright .ace_constant.ace_other {color: #EEEEEE}.ace-tomorrow-night-bright .ace_invalid {color: #CED2CF;background-color: #DF5F5F}.ace-tomorrow-night-bright .ace_invalid.ace_deprecated {color: #CED2CF;background-color: #B798BF}.ace-tomorrow-night-bright .ace_fold {background-color: #7AA6DA;border-color: #DEDEDE}.ace-tomorrow-night-bright .ace_entity.ace_name.ace_function,.ace-tomorrow-night-bright .ace_support.ace_function,.ace-tomorrow-night-bright .ace_variable {color: #7AA6DA}.ace-tomorrow-night-bright .ace_support.ace_class,.ace-tomorrow-night-bright .ace_support.ace_type {color: #E7C547}.ace-tomorrow-night-bright .ace_heading,.ace-tomorrow-night-bright .ace_markup.ace_heading,.ace-tomorrow-night-bright .ace_string {color: #B9CA4A}.ace-tomorrow-night-bright .ace_entity.ace_name.ace_tag,.ace-tomorrow-night-bright .ace_entity.ace_other.ace_attribute-name,.ace-tomorrow-night-bright .ace_meta.ace_tag,.ace-tomorrow-night-bright .ace_string.ace_regexp,.ace-tomorrow-night-bright .ace_variable {color: #D54E53}.ace-tomorrow-night-bright .ace_comment {color: #969896}.ace-tomorrow-night-bright .ace_c9searchresults.ace_keyword {color: #C2C280}.ace-tomorrow-night-bright .ace_indent-guide {background: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAACCAYAAACZgbYnAAAAEklEQVQImWNgYGBgYFBXV/8PAAJoAXX4kT2EAAAAAElFTkSuQmCC) right repeat-y}",r("../lib/dom").importCssString(o.cssText,o.cssClass,!1)})),ace.require(["ace/theme/tomorrow_night_bright"],(function(r){"object"==typeof module&&"object"==typeof exports&&module&&(module.exports=r)})); | PypiClean |
/steelscript.netprofiler-2.0.tar.gz/steelscript.netprofiler-2.0/examples/traffic_flowlist.py |
# Copyright (c) 2019 Riverbed Technology, Inc.
#
# This software is licensed under the terms and conditions of the MIT License
# accompanying the software ("License"). This software is distributed "AS IS"
# as set forth in the License.
from steelscript.netprofiler.core.app import NetProfilerApp
from steelscript.netprofiler.core.report import TrafficFlowListReport
from steelscript.netprofiler.core.filters import TimeFilter, TrafficFilter
from steelscript.common.datautils import Formatter
import optparse
class TrafficFlowListApp(NetProfilerApp):
def add_options(self, parser):
super(TrafficFlowListApp, self).add_options(parser)
group = optparse.OptionGroup(parser, "Report Parameters")
group.add_option('--columns', dest='columns',
help='Comma-separated list of column names and/or '
'ID numbers, required')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Filter Options")
group.add_option('--timefilter', dest='timefilter', default='last 1 hour',
help='Time range to analyze (defaults to "last 1 hour") '
'other valid formats are: "4/21/13 4:00 to 4/21/13 5:00" '
'or "16:00:00 to 21:00:04.546"')
group.add_option('--trafficexpr', dest='trafficexpr', default=None,
help='Traffic Expression to apply to report (default None)')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, "Output options")
group.add_option('--sort', dest='sortby', default=None,
help='Column name to sort by (defaults to None)')
group.add_option('--csv', dest='as_csv', default=False, action='store_true',
help='Return values in CSV format instead of tabular')
parser.add_option_group(group)
def validate_args(self):
""" Ensure columns are included
"""
super(TrafficFlowListApp, self).validate_args()
if not self.options.columns:
self.parser.error('Comma-separated list of columns is required.')
def print_data(self, data, header):
if self.options.as_csv:
Formatter.print_csv(data, header)
else:
Formatter.print_table(data, header)
def main(self):
self.timefilter = TimeFilter.parse_range(self.options.timefilter)
if self.options.trafficexpr:
self.trafficexpr = TrafficFilter(self.options.trafficexpr)
else:
self.trafficexpr = None
with TrafficFlowListReport(self.netprofiler) as report:
report.run(columns=self.options.columns.split(','),
sort_col=self.options.sortby,
timefilter=self.timefilter,
trafficexpr=self.trafficexpr)
data = report.get_data()
legend = [c.label for c in report.get_legend()]
self.print_data(data, legend)
if __name__ == '__main__':
TrafficFlowListApp().run() | PypiClean |
/bitmovin_api_sdk-1.171.0-py3-none-any.whl/bitmovin_api_sdk/models/live_encoding.py |
from enum import Enum
from six import string_types, iteritems
from bitmovin_api_sdk.common.poscheck import poscheck_model
import pprint
import six
class LiveEncoding(object):
@poscheck_model
def __init__(self,
stream_key=None,
encoder_ip=None,
application=None):
# type: (string_types, string_types, string_types) -> None
self._stream_key = None
self._encoder_ip = None
self._application = None
self.discriminator = None
if stream_key is not None:
self.stream_key = stream_key
if encoder_ip is not None:
self.encoder_ip = encoder_ip
if application is not None:
self.application = application
@property
def openapi_types(self):
types = {
'stream_key': 'string_types',
'encoder_ip': 'string_types',
'application': 'string_types'
}
return types
@property
def attribute_map(self):
attributes = {
'stream_key': 'streamKey',
'encoder_ip': 'encoderIp',
'application': 'application'
}
return attributes
@property
def stream_key(self):
# type: () -> string_types
"""Gets the stream_key of this LiveEncoding.
Stream key of the live encoder (required)
:return: The stream_key of this LiveEncoding.
:rtype: string_types
"""
return self._stream_key
@stream_key.setter
def stream_key(self, stream_key):
# type: (string_types) -> None
"""Sets the stream_key of this LiveEncoding.
Stream key of the live encoder (required)
:param stream_key: The stream_key of this LiveEncoding.
:type: string_types
"""
if stream_key is not None:
if not isinstance(stream_key, string_types):
raise TypeError("Invalid type for `stream_key`, type has to be `string_types`")
self._stream_key = stream_key
@property
def encoder_ip(self):
# type: () -> string_types
"""Gets the encoder_ip of this LiveEncoding.
IP address of the live encoder (required)
:return: The encoder_ip of this LiveEncoding.
:rtype: string_types
"""
return self._encoder_ip
@encoder_ip.setter
def encoder_ip(self, encoder_ip):
# type: (string_types) -> None
"""Sets the encoder_ip of this LiveEncoding.
IP address of the live encoder (required)
:param encoder_ip: The encoder_ip of this LiveEncoding.
:type: string_types
"""
if encoder_ip is not None:
if not isinstance(encoder_ip, string_types):
raise TypeError("Invalid type for `encoder_ip`, type has to be `string_types`")
self._encoder_ip = encoder_ip
@property
def application(self):
# type: () -> string_types
"""Gets the application of this LiveEncoding.
This will indicate the application 'live'
:return: The application of this LiveEncoding.
:rtype: string_types
"""
return self._application
@application.setter
def application(self, application):
# type: (string_types) -> None
"""Sets the application of this LiveEncoding.
This will indicate the application 'live'
:param application: The application of this LiveEncoding.
:type: string_types
"""
if application is not None:
if not isinstance(application, string_types):
raise TypeError("Invalid type for `application`, type has to be `string_types`")
self._application = application
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if value is None:
continue
if isinstance(value, list):
if len(value) == 0:
continue
result[self.attribute_map.get(attr)] = [y.value if isinstance(y, Enum) else y for y in [x.to_dict() if hasattr(x, "to_dict") else x for x in value]]
elif hasattr(value, "to_dict"):
result[self.attribute_map.get(attr)] = value.to_dict()
elif isinstance(value, Enum):
result[self.attribute_map.get(attr)] = value.value
elif isinstance(value, dict):
result[self.attribute_map.get(attr)] = {k: (v.to_dict() if hasattr(v, "to_dict") else v) for (k, v) in value.items()}
else:
result[self.attribute_map.get(attr)] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LiveEncoding):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other | PypiClean |
/pyppbox_torchreid-1.4.0.2-cp38-cp38-win_amd64.whl/pyppbox_torchreid/data/sampler.py | from __future__ import division, absolute_import
import copy
import numpy as np
import random
from collections import defaultdict
from torch.utils.data.sampler import Sampler, RandomSampler, SequentialSampler
AVAI_SAMPLERS = [
'RandomIdentitySampler', 'SequentialSampler', 'RandomSampler',
'RandomDomainSampler', 'RandomDatasetSampler'
]
class RandomIdentitySampler(Sampler):
"""Randomly samples N identities each with K instances.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid, dsetid).
batch_size (int): batch size.
num_instances (int): number of instances per identity in a batch.
"""
def __init__(self, data_source, batch_size, num_instances):
if batch_size < num_instances:
raise ValueError(
'batch_size={} must be no less '
'than num_instances={}'.format(batch_size, num_instances)
)
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dic = defaultdict(list)
for index, items in enumerate(data_source):
pid = items[1]
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
assert len(self.pids) >= self.num_pids_per_batch
# estimate number of examples in an epoch
# TODO: improve precision
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if len(idxs) < self.num_instances:
idxs = np.random.choice(
idxs, size=self.num_instances, replace=True
)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length
class RandomDomainSampler(Sampler):
"""Random domain sampler.
We consider each camera as a visual domain.
How does the sampling work:
1. Randomly sample N cameras (based on the "camid" label).
2. From each camera, randomly sample K images.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid, dsetid).
batch_size (int): batch size.
n_domain (int): number of cameras to sample in a batch.
"""
def __init__(self, data_source, batch_size, n_domain):
self.data_source = data_source
# Keep track of image indices for each domain
self.domain_dict = defaultdict(list)
for i, items in enumerate(data_source):
camid = items[2]
self.domain_dict[camid].append(i)
self.domains = list(self.domain_dict.keys())
# Make sure each domain can be assigned an equal number of images
if n_domain is None or n_domain <= 0:
n_domain = len(self.domains)
assert batch_size % n_domain == 0
self.n_img_per_domain = batch_size // n_domain
self.batch_size = batch_size
self.n_domain = n_domain
self.length = len(list(self.__iter__()))
def __iter__(self):
domain_dict = copy.deepcopy(self.domain_dict)
final_idxs = []
stop_sampling = False
while not stop_sampling:
selected_domains = random.sample(self.domains, self.n_domain)
for domain in selected_domains:
idxs = domain_dict[domain]
selected_idxs = random.sample(idxs, self.n_img_per_domain)
final_idxs.extend(selected_idxs)
for idx in selected_idxs:
domain_dict[domain].remove(idx)
remaining = len(domain_dict[domain])
if remaining < self.n_img_per_domain:
stop_sampling = True
return iter(final_idxs)
def __len__(self):
return self.length
class RandomDatasetSampler(Sampler):
"""Random dataset sampler.
How does the sampling work:
1. Randomly sample N datasets (based on the "dsetid" label).
2. From each dataset, randomly sample K images.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid, dsetid).
batch_size (int): batch size.
n_dataset (int): number of datasets to sample in a batch.
"""
def __init__(self, data_source, batch_size, n_dataset):
self.data_source = data_source
# Keep track of image indices for each dataset
self.dataset_dict = defaultdict(list)
for i, items in enumerate(data_source):
dsetid = items[3]
self.dataset_dict[dsetid].append(i)
self.datasets = list(self.dataset_dict.keys())
# Make sure each dataset can be assigned an equal number of images
if n_dataset is None or n_dataset <= 0:
n_dataset = len(self.datasets)
assert batch_size % n_dataset == 0
self.n_img_per_dset = batch_size // n_dataset
self.batch_size = batch_size
self.n_dataset = n_dataset
self.length = len(list(self.__iter__()))
def __iter__(self):
dataset_dict = copy.deepcopy(self.dataset_dict)
final_idxs = []
stop_sampling = False
while not stop_sampling:
selected_datasets = random.sample(self.datasets, self.n_dataset)
for dset in selected_datasets:
idxs = dataset_dict[dset]
selected_idxs = random.sample(idxs, self.n_img_per_dset)
final_idxs.extend(selected_idxs)
for idx in selected_idxs:
dataset_dict[dset].remove(idx)
remaining = len(dataset_dict[dset])
if remaining < self.n_img_per_dset:
stop_sampling = True
return iter(final_idxs)
def __len__(self):
return self.length
def build_train_sampler(
data_source,
train_sampler,
batch_size=32,
num_instances=4,
num_cams=1,
num_datasets=1,
**kwargs
):
"""Builds a training sampler.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid).
train_sampler (str): sampler name (default: ``RandomSampler``).
batch_size (int, optional): batch size. Default is 32.
num_instances (int, optional): number of instances per identity in a
batch (when using ``RandomIdentitySampler``). Default is 4.
num_cams (int, optional): number of cameras to sample in a batch (when using
``RandomDomainSampler``). Default is 1.
num_datasets (int, optional): number of datasets to sample in a batch (when
using ``RandomDatasetSampler``). Default is 1.
"""
assert train_sampler in AVAI_SAMPLERS, \
'train_sampler must be one of {}, but got {}'.format(AVAI_SAMPLERS, train_sampler)
if train_sampler == 'RandomIdentitySampler':
sampler = RandomIdentitySampler(data_source, batch_size, num_instances)
elif train_sampler == 'RandomDomainSampler':
sampler = RandomDomainSampler(data_source, batch_size, num_cams)
elif train_sampler == 'RandomDatasetSampler':
sampler = RandomDatasetSampler(data_source, batch_size, num_datasets)
elif train_sampler == 'SequentialSampler':
sampler = SequentialSampler(data_source)
elif train_sampler == 'RandomSampler':
sampler = RandomSampler(data_source)
return sampler | PypiClean |
/joulescope_ui-1.0.29.tar.gz/joulescope_ui-1.0.29/joulescope_ui/view.py |
from . import pubsub_singleton, register, N_, sanitize, \
get_topic_name, get_unique_id, get_instance
from .styles.manager import style_settings
from PySide6 import QtCore, QtWidgets
import PySide6QtAds as QtAds
import logging
_log = logging.getLogger(__name__)
class DockWidget(QtAds.CDockWidget):
def __init__(self, widget: QtWidgets.QWidget):
super().__init__('') # replaced by widget name
self.setWidget(widget)
topic = get_topic_name(widget)
self._subscribe_fns = [[f'{topic}/settings/name', self._on_setting_name]]
for t, fn in self._subscribe_fns:
pubsub_singleton.subscribe(t, fn, flags=['pub', 'retain'])
self.setFeatures(
QtAds.CDockWidget.DockWidgetClosable |
QtAds.CDockWidget.DockWidgetMovable |
QtAds.CDockWidget.DockWidgetFloatable |
QtAds.CDockWidget.DockWidgetFocusable |
QtAds.CDockWidget.DockWidgetDeleteOnClose |
QtAds.CDockWidget.DockWidgetForceCloseWithArea |
0)
self.closeRequested.connect(self._on_close_request)
def _on_setting_name(self, value):
self.setWindowTitle(value)
def _on_close_request(self):
widget = self.widget()
_log.info('close %s', get_unique_id(widget))
widget.close()
pubsub_singleton.publish('registry/view/actions/!widget_close', get_topic_name(widget))
VIEW_SETTINGS = {
'active': {
'dtype': 'str',
'brief': 'The unique_id for the active view instance.',
'default': None,
'flags': ['hide'],
},
'theme': {
'dtype': 'str',
'brief': N_('The active theme.'),
'default': 'js1',
'options': [['js1', N_('Joulescope standard theme')], ['system', N_('System OS-specific theme')]],
'flags': ['hide'],
},
'color_scheme': {
'dtype': 'str',
'brief': N_('The color scheme name.'),
'default': 'dark',
'options': [['dark', N_('Dark background')], ['light', N_('Light background')]],
},
'font_scheme': {
'dtype': 'str',
'brief': N_('The font scheme name.'),
'default': 'js1',
'options': [['js1', N_('Joulescope standard font theme')]],
},
'ads_state': {
'dtype': 'str',
'brief': 'The Advanced Docking System state for restoring widget layout.',
'default': '',
'flags': ['hide'],
},
'geometry': {
'dtype': 'obj',
'brief': 'The window size for restoring the view.',
'default': None,
'flags': ['hide'],
}
}
class View:
CAPABILITIES = ['view@']
SETTINGS = {**VIEW_SETTINGS, **style_settings(N_('New View'))}
_ui = None
_dock_manager = None
_active_instance = None
def __init__(self):
pass
@property
def is_active(self):
return self == View._active_instance
@staticmethod
def on_cls_setting_active(value):
"""Change the active view."""
style_enable_topic = 'registry/style/settings/enable'
pubsub_singleton.publish(style_enable_topic, False)
view: View = View._active_instance
ui = pubsub_singleton.query('registry/ui/instance', default=None)
if view is not None:
_log.info('active view %s: teardown start', view.unique_id)
topic = get_topic_name(view.unique_id)
if ui is not None:
pubsub_singleton.publish(f'{topic}/settings/geometry', ui.saveGeometry().data())
ads_state = View._dock_manager.saveState()
ads_state = bytes(ads_state).decode('utf-8')
pubsub_singleton.publish(f'{topic}/settings/ads_state', ads_state)
children = pubsub_singleton.query(f'{topic}/children', default=None)
for child in children:
view._widget_suspend(child)
_log.info('active view %s: teardown done', view.unique_id)
View._active_instance = None
if value in ['', None]:
return
topic = get_topic_name(value)
view = get_instance(value, default=None)
if view is None:
# should never happen
_log.warning('active view %s does not exist', value)
return
_log.info('active view %s: setup start', view.unique_id)
children = pubsub_singleton.query(f'{topic}/children', default=None)
if children is not None:
for child in children:
view.on_action_widget_open(child)
View._active_instance = view
ads_state = pubsub_singleton.query(f'{topic}/settings/ads_state', default='')
if ads_state is not None and len(ads_state):
View._dock_manager.restoreState(QtCore.QByteArray(ads_state.encode('utf-8')))
geometry = pubsub_singleton.query(f'{topic}/settings/geometry', default=None)
if ui is not None and geometry is not None:
ui.restoreGeometry(geometry)
pubsub_singleton.publish(style_enable_topic, True)
view._render()
_log.info('active view %s: setup done', view.unique_id)
def on_setting_theme(self):
if self.is_active:
self._render()
def on_setting_color_scheme(self):
if self.is_active:
self._render()
def on_setting_colors(self):
if self.is_active:
self._render()
def on_action_widget_open(self, value):
"""Create a widget, possibly reusing existing settings.
:param value: One of several options:
* The class unique_id or instance
* The instance unique_id, instance or existing widget object
* A dict containing:
* value: topic, unique_id, or instance required
* args: optional positional arguments for constructor
* kwargs: optional keyword arguments for constructor
* floating: optional window float control.
True to make floating on top.
When missing, do not float.
"""
_log.debug('widget_open %s', value)
obj: QtWidgets.QWidget = None
floating = False
unique_id = None
args = []
kwargs = {}
if isinstance(value, dict):
floating = bool(value.get('floating', False))
spec = value['value']
args = value.get('args', args)
kwargs = value.get('kwargs', kwargs)
else:
spec = value
if isinstance(spec, str):
cls_unique_id = get_unique_id(spec)
if ':' in spec:
unique_id = spec
cls_unique_id = unique_id.split(':')[0]
spec = get_instance(cls_unique_id, default=None)
if isinstance(spec, type):
obj = spec(*args, **kwargs)
else:
obj = spec
pubsub_singleton.register(obj, unique_id=unique_id, parent=self)
unique_id = obj.unique_id
obj.setObjectName(unique_id)
obj.dock_widget = DockWidget(obj)
obj.dock_widget.setObjectName(f'{unique_id}__dock')
tab_widget = obj.dock_widget.tabWidget()
tab_widget.setElideMode(QtCore.Qt.TextElideMode.ElideNone)
self._dock_manager.addDockWidget(QtAds.TopDockWidgetArea, obj.dock_widget)
pubsub_singleton.publish('registry/style/actions/!render', unique_id)
if floating:
dw = obj.dock_widget
dw.setFloating()
c = dw.floatingDockContainer()
c.resize(800, 600)
if getattr(obj, 'view_skip_undo', False):
return None
else:
return [['registry/view/actions/!widget_close', unique_id],
['registry/view/actions/!widget_open', unique_id]]
def _widget_suspend(self, value, delete=None):
"""Suspend a widget.
:param value: The topic, unique_id or instance for the
widget to suspend.
:param delete: True to also delete the pubsub entries.
This prevents state restore.
:return: The unique_id for the suspended widget or None
Suspending a widget closes the Qt Widget with the associated
DockWidget, freeing all resources. However, it preserves the
pubsub entries so that it can restore state. Suspend is
normally used when switching views.
"""
_log.debug('widget_suspend(%s, %s)', value, delete)
unique_id = get_unique_id(value)
topic = get_topic_name(unique_id)
instance_topic = f'{topic}/instance'
instance: QtWidgets.QWidget = pubsub_singleton.query(instance_topic, default=None)
if instance is not None:
if delete and hasattr(instance, 'on_widget_close'):
instance.on_widget_close()
dock_widget = instance.dock_widget
try:
dock_widget.deleteLater()
self._dock_manager.removeDockWidget(dock_widget)
except Exception:
_log.exception('Delete or remove dock widget raised exception')
instance.dock_widget = None
try:
instance.close()
instance.deleteLater()
except Exception:
_log.exception('Close or delete widget raised exception')
for child in pubsub_singleton.query(f'{topic}/children', default=[]):
self._widget_suspend(child)
pubsub_singleton.unregister(topic, delete=delete)
return unique_id
def on_action_widget_close(self, value):
"""Destroy an existing widget.
:param value: The topic, unique_id or instance for the
widget to destroy.
Destroying a widget:
* Calls "on_widget_close" method, if exists.
* Closes the Qt widget and associated DockWidget.
* Deletes the associated pubsub entries
* Removes the widget from its view.
"""
_log.debug('widget_close %s', value)
skip_undo = getattr(get_instance(value), 'view_skip_undo', False)
# todo save settings and dock geometry for undo
unique_id = self._widget_suspend(value, delete=True)
if skip_undo:
return None
else:
return [['registry/view/actions/!widget_open', unique_id],
['registry/view/actions/!widget_close', unique_id]]
@staticmethod
def on_cls_action_widget_open(value):
return View._active_instance.on_action_widget_open(value)
@staticmethod
def on_cls_action_widget_close(value):
if value == '*':
topic = get_topic_name(View._active_instance)
for widget in pubsub_singleton.query(f'{topic}/children'):
View._active_instance.on_action_widget_close(widget)
return None
else:
return View._active_instance.on_action_widget_close(value)
@staticmethod
def on_cls_action_add(value):
_log.info('add %s', value)
view = View()
pubsub_singleton.register(view, unique_id=value)
unique_id = view.unique_id
if View._active_instance is None:
pubsub_singleton.publish(f'{View.topic}/settings/active', unique_id)
return [['registry/view/actions/!remove', unique_id],
['registry/view/actions/!add', unique_id]]
@staticmethod
def on_cls_action_remove(value):
_log.info('remove %s', value)
unique_id = get_unique_id(value)
if unique_id == View._active_instance:
raise ValueError('Cannot remove active view')
pubsub_singleton.unregister(value, delete=True)
return [['registry/view/actions/!add', unique_id],
['registry/view/actions/!remove', unique_id]]
@staticmethod
def on_cls_action_ui_connect(value):
"""Connect the UI to the widget"""
View._ui = value['ui']
View._dock_manager = value['dock_manager']
@staticmethod
def on_cls_action_ui_disconnect(value):
"""Disconnect the UI."""
# hack to clean up active view
view_topic = 'registry/view/settings/active'
active_view = pubsub_singleton.query(view_topic)
pubsub_singleton.publish(view_topic, None)
pubsub_singleton.process()
pubsub_singleton._topic_by_name[view_topic].value = active_view
def _render(self):
pubsub_singleton.publish('registry/style/actions/!render', None)
register(View, 'view') | PypiClean |
/model-optimizer-2020.2.12.tar.gz/model-optimizer-2020.2.12/mo/utils/logger.py | import importlib.util
import logging as log
import os
import re
import sys
# WA for abseil bug that affects logging while importing TF starting 1.14 version
# Link to original issue: https://github.com/abseil/abseil-py/issues/99
if importlib.util.find_spec('absl') is not None:
import absl.logging
log.root.removeHandler(absl.logging._absl_handler)
handler_num = 0
class LvlFormatter(log.Formatter):
format_dict = {
log.DEBUG: "[ %(asctime)s ] [ %(levelname)s ] [ %(module)s:%(lineno)d ] %(msg)s",
log.INFO: "[ %(levelname)s ] %(msg)s",
log.WARNING: "[ WARNING ] %(msg)s",
log.ERROR: "[ %(levelname)s ] %(msg)s",
log.CRITICAL: "[ %(levelname)s ] %(msg)s",
'framework_error': "[ FRAMEWORK ERROR ] %(msg)s",
'analysis_info': "[ ANALYSIS INFO ] %(msg)s"
}
def __init__(self, lvl, fmt=None):
log.Formatter.__init__(self, fmt)
self.lvl = lvl
def format(self, record: log.LogRecord):
if self.lvl == 'DEBUG':
self._style._fmt = self.format_dict[log.DEBUG]
else:
self._style._fmt = self.format_dict[record.levelno]
if 'is_warning' in record.__dict__.keys():
self._style._fmt = self.format_dict[log.WARNING]
if 'framework_error' in record.__dict__.keys():
self._style._fmt = self.format_dict['framework_error']
if 'analysis_info' in record.__dict__.keys():
self._style._fmt = self.format_dict['analysis_info']
return log.Formatter.format(self, record)
class TagFilter(log.Filter):
def __init__(self, regex: str):
self.regex = regex
def filter(self, record: log.LogRecord):
if record.__dict__['funcName'] == 'load_grammar': # for nx not to log into our logs
return False
if self.regex:
if 'tag' in record.__dict__.keys():
tag = record.__dict__['tag']
return re.findall(self.regex, tag)
else:
return False
return True # if regex wasn't set print all logs
def init_logger(lvl: str, silent: bool):
global handler_num
log_exp = os.environ.get('MO_LOG_PATTERN')
if silent:
lvl = 'ERROR'
fmt = LvlFormatter(lvl=lvl)
handler = log.StreamHandler()
handler.setFormatter(fmt)
logger = log.getLogger()
logger.setLevel(lvl)
logger.addFilter(TagFilter(regex=log_exp))
if handler_num == 0:
logger.addHandler(handler)
handler_num += 1
def progress_bar(function: callable):
"""
Decorator for model conversion pipeline progress display
Works in combination with function: mo.utils.class_registration.apply_transform
"""
def wrapper(*args, **kwargs):
for arg in ['graph', 'curr_transform_num', 'num_transforms']:
msg = 'Progress bar decorator is enabled for Model Optimizer transformation applying cycle only. ' \
'Argument `{}` {}'
assert arg in kwargs, msg.format(arg, 'is missing')
assert kwargs[arg] is not None, msg.format(arg, 'should not be None')
if kwargs['graph'].graph['cmd_params'].progress:
bar_len = 20
total_replacers_count = kwargs['num_transforms']
def progress(i):
return int((i + 1) / total_replacers_count * bar_len)
def percent(i):
return (i + 1) / total_replacers_count * 100
end = '' if not kwargs['graph'].graph['cmd_params'].stream_output else '\n'
curr_i = kwargs['curr_transform_num']
print('\rProgress: [{:{}}]{:>7.2f}% done'.format('.' * progress(curr_i), bar_len, percent(curr_i)), end=end)
sys.stdout.flush()
function(*args, **kwargs)
return wrapper | PypiClean |