id
stringlengths
1
8
text
stringlengths
6
1.05M
dataset_id
stringclasses
1 value
/monk_kaggle_test-0.0.3-py3-none-any.whl/monk/pytorch/finetune/imports.py
import warnings warnings.filterwarnings("ignore", category=FutureWarning) warnings.simplefilter(action='ignore', category=Warning) import matplotlib.pyplot as plt import time import os import copy import sys import psutil import numpy as np import GPUtil def isnotebook(): try: shell = get_ipython().__class__.__name__ if shell == 'ZMQInteractiveShell': return True # Jupyter notebook or qtconsole elif shell == 'TerminalInteractiveShell': return False # Terminal running IPython else: return False # Other type (?) except NameError: return False if(isnotebook()): from tqdm import tqdm_notebook as tqdm else: from tqdm import tqdm as tqdm import torch import torchvision from tabulate import tabulate from scipy.stats import logistic ################################################################################3 from monk.system.common import read_json from monk.system.common import write_json from monk.system.common import parse_csv from monk.system.common import parse_csv_updated from monk.system.common import save from monk.system.summary import print_summary ################################################################################ ################################################################################ from monk.pytorch.datasets.params import set_input_size from monk.pytorch.datasets.params import set_batch_size from monk.pytorch.datasets.params import set_data_shuffle from monk.pytorch.datasets.params import set_num_processors from monk.pytorch.datasets.params import set_weighted_sampling from monk.pytorch.datasets.csv_dataset import DatasetCustom from monk.pytorch.datasets.csv_dataset import DatasetCustomMultiLabel from monk.pytorch.datasets.paths import set_dataset_train_path from monk.pytorch.datasets.paths import set_dataset_test_path ################################################################################ ################################################################################ from monk.pytorch.transforms.transforms import transform_center_crop from monk.pytorch.transforms.transforms import transform_color_jitter from monk.pytorch.transforms.transforms import transform_random_affine from monk.pytorch.transforms.transforms import transform_random_crop from monk.pytorch.transforms.transforms import transform_random_horizontal_flip from monk.pytorch.transforms.transforms import transform_random_perspective from monk.pytorch.transforms.transforms import transform_random_resized_crop from monk.pytorch.transforms.transforms import transform_grayscale from monk.pytorch.transforms.transforms import transform_random_rotation from monk.pytorch.transforms.transforms import transform_random_vertical_flip from monk.pytorch.transforms.transforms import transform_resize from monk.pytorch.transforms.transforms import transform_normalize from monk.pytorch.transforms.return_transform import set_transform_trainval from monk.pytorch.transforms.return_transform import set_transform_test from monk.pytorch.transforms.retrieve_transform import retrieve_trainval_transforms from monk.pytorch.transforms.retrieve_transform import retrieve_test_transforms ################################################################################ ################################################################################ from monk.system.eda.eda import class_imbalance from monk.system.eda.eda import corrupted_missing_images ################################################################################ ################################################################################ from monk.system.graphs.bar import create_plot from monk.system.graphs.line import create_train_test_plots_accuracy from monk.system.graphs.line import create_train_test_plots_loss ################################################################################ ################################################################################ from monk.pytorch.models.layers import layer_dropout from monk.pytorch.models.layers import layer_linear from monk.pytorch.models.layers import activation_elu from monk.pytorch.models.layers import activation_hardshrink from monk.pytorch.models.layers import activation_hardtanh from monk.pytorch.models.layers import activation_leakyrelu from monk.pytorch.models.layers import activation_logsigmoid from monk.pytorch.models.layers import activation_prelu from monk.pytorch.models.layers import activation_relu from monk.pytorch.models.layers import activation_relu6 from monk.pytorch.models.layers import activation_rrelu from monk.pytorch.models.layers import activation_selu from monk.pytorch.models.layers import activation_celu from monk.pytorch.models.layers import activation_sigmoid from monk.pytorch.models.layers import activation_softplus from monk.pytorch.models.layers import activation_softshrink from monk.pytorch.models.layers import activation_softsign from monk.pytorch.models.layers import activation_tanh from monk.pytorch.models.layers import activation_tanhshrink from monk.pytorch.models.layers import activation_threshold from monk.pytorch.models.layers import activation_softmin from monk.pytorch.models.layers import activation_softmax from monk.pytorch.models.layers import activation_logsoftmax from monk.pytorch.models.params import set_model_name from monk.pytorch.models.params import set_device from monk.pytorch.models.params import set_pretrained from monk.pytorch.models.params import set_freeze_base_network from monk.pytorch.models.params import set_model_path from monk.pytorch.models.common import set_parameter_requires_grad from monk.pytorch.models.common import model_to_device from monk.pytorch.models.common import print_grad_stats from monk.pytorch.models.common import get_num_layers from monk.pytorch.models.common import freeze_layers from monk.pytorch.models.return_model import load_model from monk.pytorch.models.return_model import setup_model from monk.pytorch.models.return_model import debug_create_network from monk.pytorch.models.features import CNNVisualizer ################################################################################ ################################################################################ from monk.pytorch.schedulers.schedulers import scheduler_fixed from monk.pytorch.schedulers.schedulers import scheduler_step from monk.pytorch.schedulers.schedulers import scheduler_multistep from monk.pytorch.schedulers.schedulers import scheduler_exponential from monk.pytorch.schedulers.schedulers import scheduler_plateau from monk.pytorch.schedulers.retrieve_scheduler import retrieve_scheduler from monk.pytorch.schedulers.return_scheduler import load_scheduler ################################################################################ ################################################################################ from monk.pytorch.optimizers.optimizers import adadelta from monk.pytorch.optimizers.optimizers import adagrad from monk.pytorch.optimizers.optimizers import adam from monk.pytorch.optimizers.optimizers import adamw from monk.pytorch.optimizers.optimizers import adamax from monk.pytorch.optimizers.optimizers import rmsprop from monk.pytorch.optimizers.optimizers import momentum_rmsprop from monk.pytorch.optimizers.optimizers import sgd from monk.pytorch.optimizers.optimizers import nesterov_sgd from monk.pytorch.optimizers.retrieve_optimizer import retrieve_optimizer from monk.pytorch.optimizers.return_optimizer import load_optimizer ################################################################################ ################################################################################ from monk.pytorch.losses.losses import l1 from monk.pytorch.losses.losses import l2 from monk.pytorch.losses.losses import softmax_crossentropy from monk.pytorch.losses.losses import crossentropy from monk.pytorch.losses.losses import sigmoid_binary_crossentropy from monk.pytorch.losses.losses import binary_crossentropy from monk.pytorch.losses.losses import kldiv from monk.pytorch.losses.losses import poisson_nll from monk.pytorch.losses.losses import huber from monk.pytorch.losses.losses import hinge from monk.pytorch.losses.losses import squared_hinge from monk.pytorch.losses.losses import multimargin from monk.pytorch.losses.losses import squared_multimargin from monk.pytorch.losses.losses import multilabelmargin from monk.pytorch.losses.losses import multilabelsoftmargin from monk.pytorch.losses.return_loss import load_loss from monk.pytorch.losses.retrieve_loss import retrieve_loss ################################################################################ ################################################################################ from monk.pytorch.training.params import set_num_epochs from monk.pytorch.training.params import set_display_progress_realtime from monk.pytorch.training.params import set_display_progress from monk.pytorch.training.params import set_save_intermediate_models from monk.pytorch.training.params import set_save_training_logs from monk.pytorch.training.params import set_intermediate_model_prefix ################################################################################ ################################################################################ from monk.pytorch.testing.process import process_single from monk.pytorch.testing.process import process_multi ################################################################################
PypiClean
/moai-mdk-0.1.5a16.tar.gz/moai-mdk-0.1.5a16/moai/visualization/visdom/gizmo2d.py
from moai.visualization.visdom.base import Base from moai.utils.arguments import ensure_string_list import torch import visdom import functools import typing import logging import numpy as np import cv2 import colour log = logging.getLogger(__name__) __all__ = ["Gizmo2d"] class Gizmo2d(Base): def __init__(self, images: typing.Union[str, typing.Sequence[str]], gizmos: typing.Union[str, typing.Sequence[str]], gt: typing.Union[str, typing.Sequence[str]], pred: typing.Union[str, typing.Sequence[str]], coords: typing.Union[str, typing.Sequence[str]], color_gt: typing.Union[str, typing.Sequence[str]], color_pred: typing.Union[str, typing.Sequence[str]], name: str="default", ip: str="http://localhost", port: int=8097, reverse_coords: bool=False, ): super(Gizmo2d, self).__init__(name, ip, port) self.images = ensure_string_list(images) self.gizmos = ensure_string_list(gizmos) self.gt = ensure_string_list(gt) self.pred = ensure_string_list(pred) self.color_gt = list(map(colour.web2rgb, ensure_string_list(color_gt))) self.color_pred = list(map(colour.web2rgb, ensure_string_list(color_pred))) self.coords = ensure_string_list(coords) self.reverse = reverse_coords self.gizmo_render = { 'marker_circle': functools.partial(self.__draw_markers, self.visualizer, marker=-1), 'marker_diamond': functools.partial(self.__draw_markers, self.visualizer, marker=cv2.MARKER_DIAMOND), 'marker_star': functools.partial(self.__draw_markers, self.visualizer, marker=cv2.MARKER_STAR), 'marker_cross': functools.partial(self.__draw_markers, self.visualizer, marker=cv2.MARKER_CROSS), 'marker_square': functools.partial(self.__draw_markers, self.visualizer, marker=cv2.MARKER_SQUARE), 'bbox2d': functools.partial(self.__draw_2dbox, self.visualizer), 'bbox3d': functools.partial(self.__draw_3dbox, self.visualizer), 'axes': functools.partial(self.__draw_axes, self.visualizer), #TODO: axes to only receive a scale parameter and have the axis points hardcoded here } self.xforms = { #TODO: extract these into a common module 'ndc': lambda coord, img: torch.addcmul( torch.scalar_tensor(0.5).to(coord), coord, torch.scalar_tensor(0.5).to(coord) ) * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord), 'coord': lambda coord, img: coord, 'norm': lambda coord, img: coord * torch.Tensor([*img.shape[2:]]).to(coord).expand_as(coord), } @property def name(self) -> str: return self.env_name def __call__(self, tensors: typing.Dict[str, torch.Tensor]) -> None: for img, gzm, gt, pred, gt_c, pred_c, coord in zip( self.images, self.gizmos, self.gt, self.pred, self.color_gt, self.color_pred, self.coords ): gt_coord = tensors[gt].detach() pred_coord = tensors[pred].detach() if self.reverse: gt_coord = gt_coord.flip(-1) pred_coord = pred_coord.flip(-1) image = tensors[img].detach() self.gizmo_render[gzm]( image, self.xforms[coord](gt_coord, image), self.xforms[coord](pred_coord, image), np.uint8(np.array(list(reversed(gt_c))) * 255), np.uint8(np.array(list(reversed(pred_c))) * 255), coord, img, img, self.name ) @staticmethod def __draw_2dbox( visdom: visdom.Visdom, images: torch.Tensor, gt_coordinates: torch.Tensor, pred_coordinates: torch.Tensor, gt_color: typing.List[float], pred_color: typing.List[float], coord: str, key: str, win: str, env: str ) -> None: b, c, h, w = images.size() imgs = np.zeros(images.shape, dtype=np.uint8) gt_coords = gt_coordinates.detach().cpu() pred_coords = pred_coordinates.detach().cpu() gt_coords = gt_coords.numpy() pred_coords = pred_coords.numpy() diagonal = torch.norm(torch.Tensor([*imgs.shape[2:]]), p=2) line_size = int(0.005 * diagonal) #TODO: extract percentage param to config? for i in range(imgs.shape[0]): img = images[i, ...].detach().cpu().numpy().transpose(1, 2, 0) * 255.0 img = img.copy().astype(np.uint8) for coords, color in zip( [gt_coords, pred_coords], [gt_color, pred_color] ): coord_i = coords[i, ...] pt1_x , pt1_y , w , h = coord_i pt2_x= pt1_x + w # bottom right pt2_y = pt1_y + h # bottom right cv2.rectangle(img, (pt1_x, pt1_y), (pt2_x, pt2_y), color.tolist(), line_size ) imgs[i, ...] = img.transpose(2, 0, 1) del coord_i visdom.images( np.flip(imgs, axis=1), win=win, env=env, opts={ 'title': key, 'caption': key, 'jpgquality': 50, } ) @staticmethod def __draw_3dbox( visdom: visdom.Visdom, images: torch.Tensor, gt_coordinates: torch.Tensor, pred_coordinates: torch.Tensor, gt_color: typing.List[float], pred_color: typing.List[float], coord: str, key: str, win: str, env: str ) -> None: b , c , h , w = images.size() imgs = np.zeros(images.shape, dtype=np.uint8) gt_coords = gt_coordinates.cpu() pred_coords = pred_coordinates.cpu() gt_coords = torch.flip(gt_coords, dims=[-1]) pred_coords = torch.flip(pred_coords, dims=[-1]) gt_coords = gt_coords.numpy() pred_coords = pred_coords.numpy() diagonal = torch.norm(torch.Tensor([*imgs.shape[2:]]), p=2) line_size = int(0.005 * diagonal) #TODO: extract percentage param to config? for i in range(imgs.shape[0]): img = images[i, ...].detach().cpu().numpy().transpose(1, 2, 0) * 255.0 img = img.copy().astype(np.uint8) for coords, color in zip( [gt_coords, pred_coords], [gt_color, pred_color] ): coord_i = coords[i, ...] for k, key_ in enumerate(coord_i): if (k + 1) % 2 == 0: cv2.line(img, (int(point_1_x), int(point_1_y)), (int(coord_i[k][0]), int(coord_i[k][1])), color.tolist(), line_size) else: point_1_x = coord_i[k][0] point_1_y = coord_i[k][1] for k, key_ in enumerate(coord_i): if k == 0 or k == 1 or k ==4 or k == 5: point_2_x = coord_i[k+2][0] point_2_y = coord_i[k+2][1] cv2.line(img, (int(point_2_x), int(point_2_y)), (int(coord_i[k][0]), int(coord_i[k][1])), color.tolist(), line_size) for k, key_ in enumerate(coord_i): if k == 0 or k == 1 or k == 2 or k == 3: point_2_x = coord_i[k+4][0] point_2_y = coord_i[k+4][1] cv2.line(img, (int(point_2_x), int(point_2_y)), (int(coord_i[k][0]), int(coord_i[k][1])), color.tolist(), line_size) imgs[i, ...] = img.transpose(2, 0, 1) del coord_i visdom.images( np.flip(imgs, axis=1), win=win, env=env, opts={ 'title': key, 'caption': key, 'jpgquality': 50, } ) @staticmethod def __draw_axes( visdom: visdom.Visdom, images: torch.Tensor, gt_axes: torch.Tensor, pred_axes: torch.Tensor, gt_color: typing.List[float], pred_color: typing.List[float], coord: str, key: str, win: str, env: str ) -> None: b , c , h , w = images.size() imgs = np.zeros(images.shape, dtype=np.uint8) gt_axes = gt_axes.cpu() pred_axes = pred_axes.cpu() gt_axes = torch.flip(gt_axes, dims=[-1]) pred_axes = torch.flip(pred_axes, dims=[-1]) gt_axes = gt_axes.numpy() pred_axes = pred_axes.numpy() diagonal = torch.norm(torch.Tensor([*imgs.shape[2:]]), p=2) line_size = int(0.005 * diagonal) for i in range(imgs.shape[0]): img = images[i, ...].detach().cpu().numpy().transpose(1, 2, 0) * 255.0 img = img.copy().astype(np.uint8) for j , (coords, color) in enumerate(zip( [gt_axes, pred_axes], [gt_color, pred_color] )): coord_i = np.int32(coords[i, ...]) #draw lines if j == 1: alpha = 0.4 overlay = img.copy() cv2.arrowedLine(overlay,tuple(coord_i[0]),tuple(coord_i[1]), (0,0,255), line_size) cv2.arrowedLine(overlay,tuple(coord_i[0]),tuple(coord_i[2]), (255,0,0), line_size) cv2.arrowedLine(overlay,tuple(coord_i[0]),tuple(coord_i[3]), (0,255,0), line_size) cv2.addWeighted(overlay, alpha, img, 1 - alpha, 0, img) else: cv2.arrowedLine(img,tuple(coord_i[0]),tuple(coord_i[1]), (0,0,255), line_size) cv2.arrowedLine(img,tuple(coord_i[0]),tuple(coord_i[2]), (255,0,0), line_size) cv2.arrowedLine(img,tuple(coord_i[0]),tuple(coord_i[3]), (0,255,0), line_size) imgs[i, ...] = img.transpose(2, 0, 1) del coord_i visdom.images( np.flip(imgs, axis=1), win=win, env=env, opts={ 'title': key, 'caption': key, 'jpgquality': 50, } ) @staticmethod def __draw_markers( visdom: visdom.Visdom, images: torch.Tensor, gt_coordinates: torch.Tensor, pred_coordinates: torch.Tensor, gt_color: typing.List[float], pred_color: typing.List[float], coord: str, key: str, win: str, env: str, marker: int, ): imgs = np.zeros([images.shape[0], 3, images.shape[2], images.shape[3]], dtype=np.uint8) gt_coords = gt_coordinates.cpu() pred_coords = pred_coordinates.cpu() gt_coords = torch.flip(gt_coords, dims=[-1]) pred_coords = torch.flip(pred_coords, dims=[-1]) gt_coords = gt_coords.numpy() pred_coords = pred_coords.numpy() diagonal = torch.norm(torch.Tensor([*imgs.shape[2:]]), p=2) marker_size = int(0.02 * diagonal) #TODO: extract percentage param to config? line_size = int(0.005 * diagonal) #TODO: extract percentage param to config? for i in range(imgs.shape[0]): img = images[i, ...].cpu().numpy().transpose(1, 2, 0) * 255.0 img = img.copy().astype(np.uint8) if img.shape[2] > 1 else cv2.cvtColor(img.copy().astype(np.uint8), cv2.COLOR_GRAY2RGB) for coords, color in zip( [gt_coords, pred_coords], [gt_color, pred_color] ): coord_i = np.int32(coords[i, ...]) for k, coord in enumerate(coord_i): if marker < 0: cv2.circle(img, tuple(coord), marker_size, color.tolist(), thickness=line_size ) else: cv2.drawMarker(img, tuple(coord), color.tolist(), marker, marker_size, line_size ) imgs[i, ...] = img.transpose(2, 0, 1) visdom.images( np.flip(imgs, axis=1), win=win, env=env, opts={ 'title': key, 'caption': key, 'jpgquality': 50, } )
PypiClean
/minipip-0.1b1-py3-none-any.whl/minipip.py
import io import json import os.path import sys import shlex import shutil import subprocess import tarfile import tempfile import textwrap from typing import Union, List, Dict, Any, Optional from urllib.error import HTTPError from urllib.request import urlopen import pkg_resources import logging from pkg_resources import Requirement logger = logging.getLogger(__name__) MP_ORG_INDEX = "https://micropython.org/pi" DEFAULT_INDEX_URLS = [MP_ORG_INDEX, "https://pypi.org/pypi"] __version__ = "0.1b1" class UserError(RuntimeError): pass class NotUpipCompatible(RuntimeError): pass def install( spec: Union[List[str], str], target_dir: str, index_urls: List[str] = None, port: Optional[str] = None, ): if not index_urls: index_urls = DEFAULT_INDEX_URLS temp_dir = tempfile.mkdtemp() try: _install_to_local_temp_dir(spec, temp_dir, index_urls) if port is not None: _copy_to_micropython_over_serial(temp_dir, port, target_dir) else: _copy_to_local_target_dir(temp_dir, target_dir) finally: shutil.rmtree(temp_dir, ignore_errors=True) def _copy_to_local_target_dir(source_dir: str, target_dir: str): logger.info("Copying files to %s", os.path.abspath(target_dir)) # Copying manually in order to be able to use os.fsync # see https://learn.adafruit.com/adafruit-circuit-playground-express/creating-and-editing-code#1-use-an-editor-that-writes-out-the-file-completely-when-you-save-it for root, dirs, files in os.walk(source_dir): relative_dir = root[len(source_dir) :] full_target_dir = target_dir + relative_dir for dir_name in dirs: full_path = os.path.join(full_target_dir, dir_name) if os.path.isdir(full_path): logger.info("Directory %s already exists", os.path.join(relative_dir, dir_name)) elif os.path.isfile(full_path): raise UserError("Can't treat existing file %s as directory", full_path) else: logger.info("Creating %s", os.path.join(relative_dir, dir_name)) os.makedirs(full_path, 0o700) for file_name in files: full_source_path = os.path.join(root, file_name) full_target_path = os.path.join(full_target_dir, file_name) logger.debug("Preparing %s => %s", full_source_path, full_target_path) if os.path.isfile(full_target_path): logger.info("Overwriting %s", os.path.join(relative_dir, file_name)) elif os.path.isdir(full_target_path): raise UserError("Can't treat existing directory %s as file", full_target_path) else: logger.info("Copying %s", os.path.join(relative_dir, file_name)) with open(full_source_path, "rb") as in_fp, open(full_target_path, "wb") as out_fp: out_fp.write(in_fp.read()) out_fp.flush() os.fsync(out_fp) def _copy_to_micropython_over_serial(source_dir: str, port: str, target_dir: str): assert target_dir.startswith("/") cmd = _get_rshell_command() + ["-p", port, "rsync", source_dir, "/pyboard" + target_dir] logger.debug("Uploading with rsync: %s", shlex.join(cmd)) subprocess.check_call(cmd) def _get_rshell_command() -> Optional[List[str]]: if shutil.which("rshell"): return ["rshell"] else: return None def _install_to_local_temp_dir( spec: Union[List[str], str], temp_install_dir: str, index_urls: List[str] ) -> None: if isinstance(spec, str): specs = [spec] else: specs = spec pip_specs = _install_all_upip_compatible(specs, temp_install_dir, index_urls) if pip_specs: _install_with_pip(pip_specs, temp_install_dir, index_urls) def _install_all_upip_compatible( specs: List[str], install_dir: str, index_urls: List[str] ) -> List[str]: """Returns list of specs which must be installed with pip""" installed_specs = set() specs_to_be_processed = specs.copy() pip_specs = [] while specs_to_be_processed: spec = specs_to_be_processed.pop(0) if spec in installed_specs or spec in pip_specs: continue req = pkg_resources.Requirement.parse(spec) logger.info("Processing '%s'", req) meta = _fetch_metadata(req, index_urls) version = meta["info"]["version"] logger.info("Inspecting version %s", version) assets = meta["releases"][version] if len(assets) != 1 or not assets[0]["url"].endswith(".tar.gz"): logger.info( "'%s' will be installed with pip (not having single tar.gz asset).", req.project_name, ) pip_specs.append(spec) continue try: dep_specs = _install_single_upip_compatible_from_url( req.project_name, assets[0]["url"], install_dir ) installed_specs.add(spec) if dep_specs: logger.info("Dependencies of '%s': %s", spec, dep_specs) for dep_spec in dep_specs: if dep_spec not in installed_specs and dep_spec not in specs_to_be_processed: specs_to_be_processed.append(dep_spec) except NotUpipCompatible: pip_specs.append(spec) return pip_specs def _install_single_upip_compatible_from_url( project_name: str, url: str, target_dir: str ) -> List[str]: with urlopen(url) as fp: download_data = fp.read() tar = tarfile.open(fileobj=io.BytesIO(download_data), mode="r:gz") deps = [] content: Dict[str, Optional[bytes]] = {} for info in tar: if "/" in info.name: dist_name, rel_name = info.name.split("/", maxsplit=1) else: dist_name, rel_name = info.name, "" if rel_name == "setup.py": logger.debug("The archive contains setup.py. The package will be installed with pip") raise NotUpipCompatible() if ".egg-info/PKG-INFO" in rel_name: continue if ".egg-info/requires.txt" in rel_name: for line in tar.extractfile(info): line = line.strip() if line and not line.startswith(b"#"): deps.append(line.decode()) continue if ".egg-info" in rel_name: continue if info.isdir(): content[os.path.join(target_dir, rel_name)] = None elif info.isfile(): content[os.path.join(target_dir, rel_name)] = tar.extractfile(info).read() # write files only after the package is fully inspected and found to be upip compatible logger.info("Extracting '%s' from %s to %s", project_name, url, os.path.abspath(target_dir)) for path in content: data = content[path] if data is None: os.makedirs(path, exist_ok=True) else: os.makedirs(os.path.dirname(path), exist_ok=True) with open(path, "wb") as fp: fp.write(data) return deps def _install_with_pip(specs: List[str], target_dir: str, index_urls: List[str]): logger.info("Installing with pip: %s", specs) suitable_indexes = [url for url in index_urls if url != MP_ORG_INDEX] if not suitable_indexes: raise UserError("No suitable indexes for pip") args = [ "--no-input", "--no-color", "--disable-pip-version-check", "install", "--upgrade", "--target", target_dir, ] args += ["--index-url", suitable_indexes.pop(0)] while suitable_indexes: args += ["--extra-index-url", suitable_indexes.pop(0)] subprocess.check_call( [ sys.executable, "-m", "pip", ] + args + specs ) # delete files not required for MicroPython for root, dirs, files in os.walk(target_dir): for dir_name in dirs: if dir_name.endswith(".dist-info") or dir_name == "__pycache__": shutil.rmtree(os.path.join(root, dir_name)) for file_name in files: if file_name.endswith(".pyc"): os.remove(os.path.join(root, file_name)) def _fetch_metadata(req: Requirement, index_urls: List[str]) -> Dict[str, Any]: ver_specs = req.specs for i, index_url in enumerate(index_urls): try: url = "%s/%s/json" % (index_url, req.project_name) logger.info("Querying package metadata from %s", url) with urlopen(url) as fp: main_meta = json.load(fp) current_version = main_meta["info"]["version"] if not ver_specs: ver_specs = ["==" + current_version] ver = _resolve_version(req, main_meta) if ver is None: logger.info("Could not find suitable version from %s", index_url) continue if ver == current_version: # micropython.org only has main meta return main_meta else: url = "%s/%s/%s/json" % (index_url, req.project_name, ver) logger.debug("Querying version metadata from %s", url) with urlopen(url) as fp: logger.info("Found '%s' from %s", req, index_url) return json.load(fp) except HTTPError as e: if e.code == 404: logger.info("Could not find '%s' from %s", req.project_name, index_url) else: raise raise UserError( "Could not find '%s' from any of the indexes %s" % (req.project_name, index_urls) ) def _read_requirements(req_file: str) -> List[str]: if not os.path.isfile(req_file): raise UserError("Can't find '%s'" % req_file) result = [] with open(req_file, "r", errors="replace") as fp: for line in fp: line = line.strip() if line and not line.startswith("#"): result.append(line) return result def _resolve_version(req: Requirement, main_meta: Dict[str, Any]) -> Optional[str]: matching_versions = [] for ver in main_meta["releases"]: if ver in req and len(main_meta["releases"][ver]) > 0: matching_versions.append(ver) if not matching_versions: return None return sorted(matching_versions, key=pkg_resources.parse_version)[-1] def main(raw_args: Optional[List[str]] = None) -> int: if raw_args is None: raw_args = sys.argv[1:] import argparse description = textwrap.dedent(""" Meant for installing both upip and pip compatible distribution packages from PyPI and micropython.org/pi to a local directory, USB volume or directly to MicroPython filesystem over serial connection (requires rshell). """).strip() parser = argparse.ArgumentParser(description=description) parser.add_argument( "command", help="Currently the only supported command is 'install'", choices=["install"] ) parser.add_argument( "specs", help="Package specification, eg. 'micropython-os' or 'micropython-os>=0.6'", nargs="+", metavar="package_spec", ) parser.add_argument( "-r", "--requirement", help="Install from the given requirements file.", nargs="*", dest="requirement_files", metavar="REQUIREMENT_FILE", default=[], ) parser.add_argument( "-p", "--port", help="Serial port of the device", nargs="?", ) parser.add_argument( "-t", "--target", help="Target directory (on device, if port is given, otherwise local)", default=".", dest="target_dir", metavar="TARGET_DIR", required=True, ) parser.add_argument( "-i", "--index-url", help="Custom index URL", ) parser.add_argument( "-v", "--verbose", help="Show more details about the process", action="store_true", ) parser.add_argument( "-q", "--quiet", help="Don't show non-error output", action="store_true", ) args = parser.parse_args(args=raw_args) all_specs = args.specs for req_file in args.requirement_files: all_specs.extend(_read_requirements(req_file)) if args.index_url: index_urls = [args.index_url] else: index_urls = DEFAULT_INDEX_URLS if args.quiet and args.verbose: print("Can't be quiet and verbose at the same time", file=sys.stderr) sys.exit(1) if args.verbose: logging_level = logging.DEBUG elif args.quiet: logging_level = logging.ERROR else: logging_level = logging.INFO logger.setLevel(logging_level) logger.propagate = True console_handler = logging.StreamHandler(sys.stdout) console_handler.setLevel(logging_level) logger.addHandler(console_handler) if args.port and not _get_rshell_command(): print( "Could not find rshell (required for uploading when serial port is given)", file=sys.stderr, ) return 1 if args.port and not args.target_dir.startswith("/"): print("If port is given then target dir must be absolute Unix-style path") return 1 try: install(all_specs, target_dir=args.target_dir, index_urls=index_urls, port=args.port) except UserError as e: print("ERROR:", e, file=sys.stderr) return 1 except subprocess.CalledProcessError: # assuming the subprocess (pip or rshell) already printed the error return 1 return 0 if __name__ == "__main__": main(sys.argv[1:])
PypiClean
/valohai_yaml-0.31.0.tar.gz/valohai_yaml-0.31.0/valohai_yaml/objs/pipelines/pipeline_parameter.py
from typing import TYPE_CHECKING, List, Optional, Union from valohai_yaml.lint import LintResult from valohai_yaml.objs.base import Item from valohai_yaml.objs.utils import check_type_and_listify from valohai_yaml.types import LintContext, SerializedDict from valohai_yaml.utils.node_socket_utils import split_socket_str if TYPE_CHECKING: from valohai_yaml.objs import Config, Pipeline class PipelineParameter(Item): """Represents a parameter definition within a pipeline definition.""" def __init__( self, *, name: str, targets: Union[List[str], str], value: Optional[str] = None, default: Optional[str] = None, ) -> None: self.name = name self.default = default if value is None else value if isinstance(targets, str): self.targets = [targets] else: self.targets = check_type_and_listify(targets, str) @classmethod def parse(cls, data: SerializedDict) -> 'PipelineParameter': data = data.copy() # targets can be a string or a list of strings if 'target' in data: if 'targets' in data: raise TypeError("Pipeline parameter cannot have both: target and targets") data['targets'] = [data.pop('target')] return super().parse(data) def lint(self, lint_result: LintResult, context: LintContext) -> None: pipeline: Pipeline = context['pipeline'] config: Config = context['config'] steps = config.steps node_map = pipeline.node_map for target in self.targets: target_node_name, socket_type, target_parameter_name = split_socket_str(target) if not socket_type.startswith('parameter'): lint_result.add_error( f'Pipeline "{pipeline.name}" parameter "{self.name}" target "{target}": socket type "{socket_type}"' f' is not supported.' ) if target_node_name not in node_map: lint_result.add_error( f'Pipeline "{pipeline.name}" parameter "{self.name}" target "{target}": the node ' f'"{target_node_name}" does not exist.' ) else: node = node_map[target_node_name] from valohai_yaml.objs import ExecutionNode if isinstance(node, ExecutionNode): step = steps[node.step] if target_parameter_name not in step.parameters: lint_result.add_error( f'Pipeline "{pipeline.name}" parameter "{self.name}" target "{target}": the parameter ' f'"{target_parameter_name}" does not exist in step "{step.name}".' )
PypiClean
/yafowil.widget.slider-2.0a1.tar.gz/yafowil.widget.slider-2.0a1/CHANGES.rst
Changes ======= 2.0a1 (2023-05-15) ------------------ - Add ``webresource`` support. [rnix] - Extend JS by ``slider_on_array_add`` and ``register_array_subscribers`` functions to enable usage in ``yafowil.widget.array``. [lenadax] - Rewrite of Javascript slider widget. No more jQuery UI. [lenadax, rnix] 1.3.1 (2020-05-30) ------------------ - Fix binding scope in ``bdajax`` binder callback. [rnix] 1.3 (2018-07-16) ---------------- - Python 3 compatibility. [rnix] - Convert doctests to unittests. [rnix] 1.2 (2017-03-01) ---------------- - Add dedicated CSS for ``plone5`` theme provided by ``yafowil.plone``. [rnix, 2016-06-28] - Use ``yafowil.utils.entry_point`` decorator. [rnix, 2016-06-28] 1.1 (2015-01-23) ---------------- - Update jquery UI slider to 1.10.3 and use latest jquery ui boostrap styles. [rnix, 2014-07-05] 1.0 --- - Make it work [rnix]
PypiClean
/tw.dojo-0.9.181.tar.gz/tw.dojo-0.9.181/tw/dojo/static/1.8.1/min/dojox/app/widgets/_ScrollableMixin.js.uncompressed.js
define("dojox/app/widgets/_ScrollableMixin", [ "dojo/_base/declare", "dojo/_base/lang", "dojo/dom-class", "dojo/dom-construct", "./scrollable"], function(declare, lang, domClass, domConstruct, Scrollable){ // module: // dojox/mobile/_ScrollableMixin // summary: // Mixin for widgets to have a touch scrolling capability. var cls = declare("dojox.app.widgets._ScrollableMixin", null, { // summary: // Mixin for widgets to have a touch scrolling capability. // description: // Actual implementation is in scrollable.js. // scrollable.js is not a dojo class, but just a collection // of functions. This module makes scrollable.js a dojo class. // scrollableParams: Object // Parameters for dojox/mobile/scrollable.init(). scrollableParams: null, // allowNestedScrolls: Boolean // e.g. Allow ScrollableView in a SwapView. allowNestedScrolls: true, constructor: function(){ this.scrollableParams = {}; }, destroy: function(){ this.cleanup(); this.inherited(arguments); }, startup: function(){ if(this._started){ return; } var params = this.scrollableParams; this.init(params); this.inherited(arguments); this.reparent(); }, // build scrollable container domNode. This method from dojox/mobile/ScrollableView buildRendering: function(){ this.inherited(arguments); domClass.add(this.domNode, "mblScrollableView"); this.domNode.style.overflow = "hidden"; this.domNode.style.top = "0px"; this.containerNode = domConstruct.create("div", {className:"mblScrollableViewContainer"}, this.domNode); this.containerNode.style.position = "absolute"; this.containerNode.style.top = "0px"; // view bar is relative if(this.scrollDir === "v"){ this.containerNode.style.width = "100%"; } }, // This method from dojox/mobile/ScrollableView reparent: function(){ // summary: // Moves all the children to containerNode. var i, idx, len, c; for(i = 0, idx = 0, len = this.domNode.childNodes.length; i < len; i++){ c = this.domNode.childNodes[idx]; // search for view-specific header or footer if(c === this.containerNode){ idx++; continue; } this.containerNode.appendChild(this.domNode.removeChild(c)); } }, // This method from dojox/mobile/ScrollableView resize: function(){ // summary: // Calls resize() of each child widget. this.inherited(arguments); // scrollable#resize() will be called array.forEach(this.getChildren(), function(child){ if(child.resize){ child.resize(); } }); } }); lang.extend(cls, new Scrollable()); return cls; });
PypiClean
/pulumi_azure_native-2.5.1a1693590910.tar.gz/pulumi_azure_native-2.5.1a1693590910/pulumi_azure_native/kusto/v20221229/iot_hub_data_connection.py
import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from ... import _utilities from ._enums import * __all__ = ['IotHubDataConnectionArgs', 'IotHubDataConnection'] @pulumi.input_type class IotHubDataConnectionArgs: def __init__(__self__, *, cluster_name: pulumi.Input[str], consumer_group: pulumi.Input[str], database_name: pulumi.Input[str], iot_hub_resource_id: pulumi.Input[str], kind: pulumi.Input[str], resource_group_name: pulumi.Input[str], shared_access_policy_name: pulumi.Input[str], data_connection_name: Optional[pulumi.Input[str]] = None, data_format: Optional[pulumi.Input[Union[str, 'IotHubDataFormat']]] = None, database_routing: Optional[pulumi.Input[Union[str, 'DatabaseRouting']]] = None, event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, location: Optional[pulumi.Input[str]] = None, mapping_rule_name: Optional[pulumi.Input[str]] = None, retrieval_start_date: Optional[pulumi.Input[str]] = None, table_name: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a IotHubDataConnection resource. :param pulumi.Input[str] cluster_name: The name of the Kusto cluster. :param pulumi.Input[str] consumer_group: The iot hub consumer group. :param pulumi.Input[str] database_name: The name of the database in the Kusto cluster. :param pulumi.Input[str] iot_hub_resource_id: The resource ID of the Iot hub to be used to create a data connection. :param pulumi.Input[str] kind: Kind of the endpoint for the data connection Expected value is 'IotHub'. :param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster. :param pulumi.Input[str] shared_access_policy_name: The name of the share access policy :param pulumi.Input[str] data_connection_name: The name of the data connection. :param pulumi.Input[Union[str, 'IotHubDataFormat']] data_format: The data format of the message. Optionally the data format can be added to each message. :param pulumi.Input[Union[str, 'DatabaseRouting']] database_routing: Indication for database routing information from the data connection, by default only database routing information is allowed :param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: System properties of the iot hub :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] mapping_rule_name: The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message. :param pulumi.Input[str] retrieval_start_date: When defined, the data connection retrieves existing Event hub events created since the Retrieval start date. It can only retrieve events retained by the Event hub, based on its retention period. :param pulumi.Input[str] table_name: The table where the data should be ingested. Optionally the table information can be added to each message. """ pulumi.set(__self__, "cluster_name", cluster_name) pulumi.set(__self__, "consumer_group", consumer_group) pulumi.set(__self__, "database_name", database_name) pulumi.set(__self__, "iot_hub_resource_id", iot_hub_resource_id) pulumi.set(__self__, "kind", 'IotHub') pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "shared_access_policy_name", shared_access_policy_name) if data_connection_name is not None: pulumi.set(__self__, "data_connection_name", data_connection_name) if data_format is not None: pulumi.set(__self__, "data_format", data_format) if database_routing is None: database_routing = 'Single' if database_routing is not None: pulumi.set(__self__, "database_routing", database_routing) if event_system_properties is not None: pulumi.set(__self__, "event_system_properties", event_system_properties) if location is not None: pulumi.set(__self__, "location", location) if mapping_rule_name is not None: pulumi.set(__self__, "mapping_rule_name", mapping_rule_name) if retrieval_start_date is not None: pulumi.set(__self__, "retrieval_start_date", retrieval_start_date) if table_name is not None: pulumi.set(__self__, "table_name", table_name) @property @pulumi.getter(name="clusterName") def cluster_name(self) -> pulumi.Input[str]: """ The name of the Kusto cluster. """ return pulumi.get(self, "cluster_name") @cluster_name.setter def cluster_name(self, value: pulumi.Input[str]): pulumi.set(self, "cluster_name", value) @property @pulumi.getter(name="consumerGroup") def consumer_group(self) -> pulumi.Input[str]: """ The iot hub consumer group. """ return pulumi.get(self, "consumer_group") @consumer_group.setter def consumer_group(self, value: pulumi.Input[str]): pulumi.set(self, "consumer_group", value) @property @pulumi.getter(name="databaseName") def database_name(self) -> pulumi.Input[str]: """ The name of the database in the Kusto cluster. """ return pulumi.get(self, "database_name") @database_name.setter def database_name(self, value: pulumi.Input[str]): pulumi.set(self, "database_name", value) @property @pulumi.getter(name="iotHubResourceId") def iot_hub_resource_id(self) -> pulumi.Input[str]: """ The resource ID of the Iot hub to be used to create a data connection. """ return pulumi.get(self, "iot_hub_resource_id") @iot_hub_resource_id.setter def iot_hub_resource_id(self, value: pulumi.Input[str]): pulumi.set(self, "iot_hub_resource_id", value) @property @pulumi.getter def kind(self) -> pulumi.Input[str]: """ Kind of the endpoint for the data connection Expected value is 'IotHub'. """ return pulumi.get(self, "kind") @kind.setter def kind(self, value: pulumi.Input[str]): pulumi.set(self, "kind", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group containing the Kusto cluster. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="sharedAccessPolicyName") def shared_access_policy_name(self) -> pulumi.Input[str]: """ The name of the share access policy """ return pulumi.get(self, "shared_access_policy_name") @shared_access_policy_name.setter def shared_access_policy_name(self, value: pulumi.Input[str]): pulumi.set(self, "shared_access_policy_name", value) @property @pulumi.getter(name="dataConnectionName") def data_connection_name(self) -> Optional[pulumi.Input[str]]: """ The name of the data connection. """ return pulumi.get(self, "data_connection_name") @data_connection_name.setter def data_connection_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "data_connection_name", value) @property @pulumi.getter(name="dataFormat") def data_format(self) -> Optional[pulumi.Input[Union[str, 'IotHubDataFormat']]]: """ The data format of the message. Optionally the data format can be added to each message. """ return pulumi.get(self, "data_format") @data_format.setter def data_format(self, value: Optional[pulumi.Input[Union[str, 'IotHubDataFormat']]]): pulumi.set(self, "data_format", value) @property @pulumi.getter(name="databaseRouting") def database_routing(self) -> Optional[pulumi.Input[Union[str, 'DatabaseRouting']]]: """ Indication for database routing information from the data connection, by default only database routing information is allowed """ return pulumi.get(self, "database_routing") @database_routing.setter def database_routing(self, value: Optional[pulumi.Input[Union[str, 'DatabaseRouting']]]): pulumi.set(self, "database_routing", value) @property @pulumi.getter(name="eventSystemProperties") def event_system_properties(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ System properties of the iot hub """ return pulumi.get(self, "event_system_properties") @event_system_properties.setter def event_system_properties(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "event_system_properties", value) @property @pulumi.getter def location(self) -> Optional[pulumi.Input[str]]: """ Resource location. """ return pulumi.get(self, "location") @location.setter def location(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "location", value) @property @pulumi.getter(name="mappingRuleName") def mapping_rule_name(self) -> Optional[pulumi.Input[str]]: """ The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message. """ return pulumi.get(self, "mapping_rule_name") @mapping_rule_name.setter def mapping_rule_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "mapping_rule_name", value) @property @pulumi.getter(name="retrievalStartDate") def retrieval_start_date(self) -> Optional[pulumi.Input[str]]: """ When defined, the data connection retrieves existing Event hub events created since the Retrieval start date. It can only retrieve events retained by the Event hub, based on its retention period. """ return pulumi.get(self, "retrieval_start_date") @retrieval_start_date.setter def retrieval_start_date(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "retrieval_start_date", value) @property @pulumi.getter(name="tableName") def table_name(self) -> Optional[pulumi.Input[str]]: """ The table where the data should be ingested. Optionally the table information can be added to each message. """ return pulumi.get(self, "table_name") @table_name.setter def table_name(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "table_name", value) class IotHubDataConnection(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_name: Optional[pulumi.Input[str]] = None, consumer_group: Optional[pulumi.Input[str]] = None, data_connection_name: Optional[pulumi.Input[str]] = None, data_format: Optional[pulumi.Input[Union[str, 'IotHubDataFormat']]] = None, database_name: Optional[pulumi.Input[str]] = None, database_routing: Optional[pulumi.Input[Union[str, 'DatabaseRouting']]] = None, event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, iot_hub_resource_id: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, mapping_rule_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, retrieval_start_date: Optional[pulumi.Input[str]] = None, shared_access_policy_name: Optional[pulumi.Input[str]] = None, table_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Class representing an iot hub data connection. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] cluster_name: The name of the Kusto cluster. :param pulumi.Input[str] consumer_group: The iot hub consumer group. :param pulumi.Input[str] data_connection_name: The name of the data connection. :param pulumi.Input[Union[str, 'IotHubDataFormat']] data_format: The data format of the message. Optionally the data format can be added to each message. :param pulumi.Input[str] database_name: The name of the database in the Kusto cluster. :param pulumi.Input[Union[str, 'DatabaseRouting']] database_routing: Indication for database routing information from the data connection, by default only database routing information is allowed :param pulumi.Input[Sequence[pulumi.Input[str]]] event_system_properties: System properties of the iot hub :param pulumi.Input[str] iot_hub_resource_id: The resource ID of the Iot hub to be used to create a data connection. :param pulumi.Input[str] kind: Kind of the endpoint for the data connection Expected value is 'IotHub'. :param pulumi.Input[str] location: Resource location. :param pulumi.Input[str] mapping_rule_name: The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message. :param pulumi.Input[str] resource_group_name: The name of the resource group containing the Kusto cluster. :param pulumi.Input[str] retrieval_start_date: When defined, the data connection retrieves existing Event hub events created since the Retrieval start date. It can only retrieve events retained by the Event hub, based on its retention period. :param pulumi.Input[str] shared_access_policy_name: The name of the share access policy :param pulumi.Input[str] table_name: The table where the data should be ingested. Optionally the table information can be added to each message. """ ... @overload def __init__(__self__, resource_name: str, args: IotHubDataConnectionArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Class representing an iot hub data connection. :param str resource_name: The name of the resource. :param IotHubDataConnectionArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(IotHubDataConnectionArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, cluster_name: Optional[pulumi.Input[str]] = None, consumer_group: Optional[pulumi.Input[str]] = None, data_connection_name: Optional[pulumi.Input[str]] = None, data_format: Optional[pulumi.Input[Union[str, 'IotHubDataFormat']]] = None, database_name: Optional[pulumi.Input[str]] = None, database_routing: Optional[pulumi.Input[Union[str, 'DatabaseRouting']]] = None, event_system_properties: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, iot_hub_resource_id: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[str]] = None, location: Optional[pulumi.Input[str]] = None, mapping_rule_name: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, retrieval_start_date: Optional[pulumi.Input[str]] = None, shared_access_policy_name: Optional[pulumi.Input[str]] = None, table_name: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = IotHubDataConnectionArgs.__new__(IotHubDataConnectionArgs) if cluster_name is None and not opts.urn: raise TypeError("Missing required property 'cluster_name'") __props__.__dict__["cluster_name"] = cluster_name if consumer_group is None and not opts.urn: raise TypeError("Missing required property 'consumer_group'") __props__.__dict__["consumer_group"] = consumer_group __props__.__dict__["data_connection_name"] = data_connection_name __props__.__dict__["data_format"] = data_format if database_name is None and not opts.urn: raise TypeError("Missing required property 'database_name'") __props__.__dict__["database_name"] = database_name if database_routing is None: database_routing = 'Single' __props__.__dict__["database_routing"] = database_routing __props__.__dict__["event_system_properties"] = event_system_properties if iot_hub_resource_id is None and not opts.urn: raise TypeError("Missing required property 'iot_hub_resource_id'") __props__.__dict__["iot_hub_resource_id"] = iot_hub_resource_id if kind is None and not opts.urn: raise TypeError("Missing required property 'kind'") __props__.__dict__["kind"] = 'IotHub' __props__.__dict__["location"] = location __props__.__dict__["mapping_rule_name"] = mapping_rule_name if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["retrieval_start_date"] = retrieval_start_date if shared_access_policy_name is None and not opts.urn: raise TypeError("Missing required property 'shared_access_policy_name'") __props__.__dict__["shared_access_policy_name"] = shared_access_policy_name __props__.__dict__["table_name"] = table_name __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-native:kusto:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190121:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190515:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20190907:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20191109:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200215:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200614:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20200918:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20210101:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20210827:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20220201:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20220707:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20221111:IotHubDataConnection"), pulumi.Alias(type_="azure-native:kusto/v20230502:IotHubDataConnection")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(IotHubDataConnection, __self__).__init__( 'azure-native:kusto/v20221229:IotHubDataConnection', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'IotHubDataConnection': """ Get an existing IotHubDataConnection resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = IotHubDataConnectionArgs.__new__(IotHubDataConnectionArgs) __props__.__dict__["consumer_group"] = None __props__.__dict__["data_format"] = None __props__.__dict__["database_routing"] = None __props__.__dict__["event_system_properties"] = None __props__.__dict__["iot_hub_resource_id"] = None __props__.__dict__["kind"] = None __props__.__dict__["location"] = None __props__.__dict__["mapping_rule_name"] = None __props__.__dict__["name"] = None __props__.__dict__["provisioning_state"] = None __props__.__dict__["retrieval_start_date"] = None __props__.__dict__["shared_access_policy_name"] = None __props__.__dict__["table_name"] = None __props__.__dict__["type"] = None return IotHubDataConnection(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter(name="consumerGroup") def consumer_group(self) -> pulumi.Output[str]: """ The iot hub consumer group. """ return pulumi.get(self, "consumer_group") @property @pulumi.getter(name="dataFormat") def data_format(self) -> pulumi.Output[Optional[str]]: """ The data format of the message. Optionally the data format can be added to each message. """ return pulumi.get(self, "data_format") @property @pulumi.getter(name="databaseRouting") def database_routing(self) -> pulumi.Output[Optional[str]]: """ Indication for database routing information from the data connection, by default only database routing information is allowed """ return pulumi.get(self, "database_routing") @property @pulumi.getter(name="eventSystemProperties") def event_system_properties(self) -> pulumi.Output[Optional[Sequence[str]]]: """ System properties of the iot hub """ return pulumi.get(self, "event_system_properties") @property @pulumi.getter(name="iotHubResourceId") def iot_hub_resource_id(self) -> pulumi.Output[str]: """ The resource ID of the Iot hub to be used to create a data connection. """ return pulumi.get(self, "iot_hub_resource_id") @property @pulumi.getter def kind(self) -> pulumi.Output[str]: """ Kind of the endpoint for the data connection Expected value is 'IotHub'. """ return pulumi.get(self, "kind") @property @pulumi.getter def location(self) -> pulumi.Output[Optional[str]]: """ Resource location. """ return pulumi.get(self, "location") @property @pulumi.getter(name="mappingRuleName") def mapping_rule_name(self) -> pulumi.Output[Optional[str]]: """ The mapping rule to be used to ingest the data. Optionally the mapping information can be added to each message. """ return pulumi.get(self, "mapping_rule_name") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ The name of the resource """ return pulumi.get(self, "name") @property @pulumi.getter(name="provisioningState") def provisioning_state(self) -> pulumi.Output[str]: """ The provisioned state of the resource. """ return pulumi.get(self, "provisioning_state") @property @pulumi.getter(name="retrievalStartDate") def retrieval_start_date(self) -> pulumi.Output[Optional[str]]: """ When defined, the data connection retrieves existing Event hub events created since the Retrieval start date. It can only retrieve events retained by the Event hub, based on its retention period. """ return pulumi.get(self, "retrieval_start_date") @property @pulumi.getter(name="sharedAccessPolicyName") def shared_access_policy_name(self) -> pulumi.Output[str]: """ The name of the share access policy """ return pulumi.get(self, "shared_access_policy_name") @property @pulumi.getter(name="tableName") def table_name(self) -> pulumi.Output[Optional[str]]: """ The table where the data should be ingested. Optionally the table information can be added to each message. """ return pulumi.get(self, "table_name") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts" """ return pulumi.get(self, "type")
PypiClean
/django-dojo-0.0.1.tar.gz/django-dojo-0.0.1/dojo/static/dojo/dojo/cldr/nls/ja/buddhist.js
define( //begin v1.x content { "eraNames": [ "仏暦" ], "dateFormat-full": "GGGGy年M月d日EEEE", "dateFormat-long": "GGGGy年M月d日", "dateFormat-medium": "Gy/MM/dd", "dateFormat-short": "Gy/MM/dd", "dateFormatItem-d": "d日", "dateFormatItem-Ed": "d日(E)", "dateFormatItem-GGGGyMd": "GGGGy年M月d日", "dateFormatItem-Gy": "Gy年", "dateFormatItem-h": "aK時", "dateFormatItem-H": "H時", "dateFormatItem-hm": "aK:mm", "dateFormatItem-Hm": "H:mm", "dateFormatItem-hms": "aK:mm:ss", "dateFormatItem-Hms": "H:mm:ss", "dateFormatItem-M": "M月", "dateFormatItem-Md": "M/d", "dateFormatItem-MEd": "M/d(E)", "dateFormatItem-MMdd": "MM/dd", "dateFormatItem-MMM": "M月", "dateFormatItem-MMMd": "M月d日", "dateFormatItem-MMMEd": "M月d日(E)", "dateFormatItem-ms": "mm:ss", "dateFormatItem-y": "GGGGy年", "dateFormatItem-yyyy": "GGGGy年", "dateFormatItem-yyyyMd": "Gy/M/d", "dateFormatItem-yyyyMEd": "Gy/M/d(E)", "dateFormatItem-yyyyMM": "Gy/MM", "dateFormatItem-yyyyMMM": "GGGGy年M月", "dateFormatItem-yyyyMMMd": "GGGGy年M月d日", "dateFormatItem-yyyyMMMEd": "GGGGy年M月d日(E)", "dateFormatItem-yyyyQ": "Gy/Q", "days-format-abbr": [ "日", "月", "火", "水", "木", "金", "土" ], "days-format-wide": [ "日曜日", "月曜日", "火曜日", "水曜日", "木曜日", "金曜日", "土曜日" ], "days-standAlone-narrow": [ "日", "月", "火", "水", "木", "金", "土" ], "quarters-format-wide": [ "第1四半期", "第2四半期", "第3四半期", "第4四半期" ], "dayPeriods-format-wide-am": "午前", "dayPeriods-format-wide-pm": "午後", "dateFormatItem-yM": "y/M", "dateFormatItem-yMd": "y/M/d", "dateFormatItem-yMEd": "y/M/d(E)", "dateFormatItem-yMMM": "y年M月", "dateFormatItem-yMMMd": "y年M月d日", "dateFormatItem-yMMMEd": "y年M月d日(E)", "dateFormatItem-yQ": "y/Q", "dateFormatItem-yQQQ": "yQQQ", "timeFormat-full": "H時mm分ss秒 zzzz", "timeFormat-long": "H:mm:ss z", "timeFormat-medium": "H:mm:ss", "timeFormat-short": "H:mm", "months-format-wide": [ "1月", "2月", "3月", "4月", "5月", "6月", "7月", "8月", "9月", "10月", "11月", "12月" ] } //end v1.x content );
PypiClean
/_encoding/_protobuf/converter/_e2e_call.py
__all__ = ["WireToProto", "ProtoToWire"] from iscp._encoding.codegen.e2e_call_pb2 import DownstreamCall as DownstreamCallPB from iscp._encoding.codegen.e2e_call_pb2 import UpstreamCall as UpstreamCallPB from iscp._encoding.codegen.e2e_call_pb2 import UpstreamCallAck as UpstreamCallAckPB from iscp._encoding.codegen.extensions.e2e_call_pb2 import ( DownstreamCallExtensionFields as DownstreamCallExtensionFieldsPB, ) from iscp._encoding.codegen.extensions.e2e_call_pb2 import ( UpstreamCallAckExtensionFields as UpstreamCallAckExtensionFieldsPB, ) from iscp._encoding.codegen.extensions.e2e_call_pb2 import ( UpstreamCallExtensionFields as UpstreamCallExtensionFieldsPB, ) from iscp._message import ( DownstreamCallExtensionFields, UpstreamCall, UpstreamCallAck, UpstreamCallAckExtensionFields, UpstreamCallExtensionFields, ) from iscp._message._e2e import ( DownstreamCall, ) from . import _result_code class WireToProto(object): @classmethod def upstream_call_extension_fields(cls, arg: UpstreamCallExtensionFields) -> UpstreamCallExtensionFieldsPB: return UpstreamCallExtensionFieldsPB() @classmethod def upstream_call_ack_extension_fields(cls, arg: UpstreamCallAckExtensionFields) -> UpstreamCallAckExtensionFieldsPB: return UpstreamCallAckExtensionFieldsPB() @classmethod def downstream_call_extension_fields(cls, arg: DownstreamCallExtensionFields) -> DownstreamCallExtensionFieldsPB: return DownstreamCallExtensionFieldsPB() @classmethod def upstream_call(cls, arg: UpstreamCall) -> UpstreamCallPB: res = UpstreamCallPB() res.call_id = arg.call_id res.request_call_id = arg.request_call_id res.destination_node_id = arg.destination_node_id res.name = arg.name res.type = arg.type res.payload = arg.payload res.extension_fields.CopyFrom(cls.upstream_call_extension_fields(arg.extension_fields)) return res @classmethod def upstream_call_ack(cls, arg: UpstreamCallAck) -> UpstreamCallAckPB: res = UpstreamCallAckPB() res.call_id = arg.call_id res.result_code = _result_code.WireToProto.result_code(arg.result_code) res.result_string = arg.result_string res.extension_fields.CopyFrom(cls.upstream_call_ack_extension_fields(arg.extension_fields)) return res @classmethod def downstream_call(cls, arg: DownstreamCall) -> DownstreamCallPB: res = DownstreamCallPB() res.call_id = arg.call_id res.request_call_id = arg.request_call_id res.source_node_id = arg.source_node_id res.name = arg.name res.type = arg.type res.payload = arg.payload res.extension_fields.CopyFrom(cls.downstream_call_extension_fields(arg.extension_fields)) return res class ProtoToWire(object): @classmethod def upstream_call_extension_fields(cls, _: UpstreamCallExtensionFieldsPB) -> UpstreamCallExtensionFields: return UpstreamCallExtensionFields() @classmethod def upstream_call_ack_extension_fields(cls, _: UpstreamCallAckExtensionFieldsPB) -> UpstreamCallAckExtensionFields: return UpstreamCallAckExtensionFields() @classmethod def downstream_call_extension_fields(cls, _: DownstreamCallExtensionFieldsPB) -> DownstreamCallExtensionFields: return DownstreamCallExtensionFields() @classmethod def upstream_call(cls, arg: UpstreamCallPB) -> UpstreamCall: return UpstreamCall( call_id=arg.call_id, request_call_id=arg.request_call_id, destination_node_id=arg.destination_node_id, name=arg.name, type=arg.type, payload=arg.payload, extension_fields=cls.upstream_call_extension_fields(arg.extension_fields), ) @classmethod def upstream_call_ack(cls, arg: UpstreamCallAckPB) -> UpstreamCallAck: return UpstreamCallAck( call_id=arg.call_id, result_code=_result_code.ProtoToWire.result_code(arg.result_code), result_string=arg.result_string, extension_fields=cls.upstream_call_ack_extension_fields(arg.extension_fields), ) @classmethod def downstream_call(cls, arg: DownstreamCallPB) -> DownstreamCall: return DownstreamCall( call_id=arg.call_id, request_call_id=arg.request_call_id, source_node_id=arg.source_node_id, name=arg.name, type=arg.type, payload=arg.payload, extension_fields=cls.downstream_call_extension_fields(arg.extension_fields), )
PypiClean
/siphon-0.9.tar.gz/siphon-0.9/examples/Radar_Server_Level_3.py
from datetime import datetime import matplotlib.pyplot as plt import numpy as np from siphon.cdmr import Dataset from siphon.radarserver import get_radarserver_datasets, RadarServer ########################################### # First, point to the top-level thredds radar server accessor to find what datasets are # available. ds = get_radarserver_datasets('http://thredds.ucar.edu/thredds/') print(list(ds)) ########################################### # Now create an instance of RadarServer to point to the appropriate # radar server access URL. This is pulled from the catalog reference url. url = ds['NEXRAD Level III Radar from IDD'].follow().catalog_url rs = RadarServer(url) ########################################### # Look at the variables available in this dataset print(rs.variables) ########################################### # Create a new query object to help request the data. Using the chaining # methods, ask for data from radar FTG (Denver) for now for the product # N0Q, which is reflectivity data for the lowest tilt. We see that when the query # is represented as a string, it shows the encoded URL. query = rs.query() query.stations('FTG').time(datetime.utcnow()).variables('N0Q') ########################################### # We can use the RadarServer instance to check our query, to make # sure we have required parameters and that we have chosen valid # station(s) and variable(s) rs.validate_query(query) ########################################### # Make the request, which returns an instance of TDSCatalog. This # handles parsing the catalog catalog = rs.get_catalog(query) ########################################### # We can look at the datasets on the catalog to see what data we found by the query. We # find one NIDS file in the return. print(catalog.datasets) ########################################### # We can pull that dataset out of the dictionary and look at the available access URLs. # We see URLs for OPeNDAP, CDMRemote, and HTTPServer (direct download). ds = list(catalog.datasets.values())[0] print(ds.access_urls) ########################################### # We'll use the CDMRemote reader in Siphon and pass it the appropriate access URL. data = Dataset(ds.access_urls['CdmRemote']) ########################################### # The CDMRemote reader provides an interface that is almost identical to the usual python # NetCDF interface. We pull out the variables we need for azimuth and range, as well as # the data itself. rng = data.variables['gate'][:] / 1000. az = data.variables['azimuth'][:] ref = data.variables['BaseReflectivityDR'][:] ########################################### # Then convert the polar coordinates to Cartesian x = rng * np.sin(np.deg2rad(az))[:, None] y = rng * np.cos(np.deg2rad(az))[:, None] ref = np.ma.array(ref, mask=np.isnan(ref)) ########################################### # Finally, we plot them up using matplotlib. fig, ax = plt.subplots(1, 1, figsize=(9, 8)) ax.pcolormesh(x, y, ref) ax.set_aspect('equal', 'datalim') ax.set_xlim(-460, 460) ax.set_ylim(-460, 460)
PypiClean
/jupyter_enterprise_gateway-3.2.2.tar.gz/jupyter_enterprise_gateway-3.2.2/docs/source/operators/config-file.md
# Configuration file options Placing configuration options into the configuration file `jupyter_enterprise_gateway_config.py` is recommended because this will enabled the use of the [_dynamic configurables_](config-dynamic.md/#dynamic-configurables) functionality. To generate a template configuration file, run the following: ```bash jupyter enterprisegateway --generate-config ``` This command will produce a `jupyter_enterprise_gateway_config.py` file, typically located in the invoking user's `$HOME/.jupyter` directory. The file contains python code, including comments, relative to each available configuration option. The actual option itself will also be commented out. To enable that option, set its value and uncomment the code. ```{Note} Some options may appear duplicated. For example, the `remote_hosts` trait appears on both `c.EnterpriseGatewayConfigMixin` and `c.EnterpriseGatewayApp`. This is due to how configurable traits appear in the class hierarchy. Since `EnterpriseGatewayApp` derives from `EnterpriseGatewayConfigMixin` and both are configurable classes, the output contains duplicated values. If both values are set, the value _closest_ to the derived class will be used (in this case, `EnterpriseGatewayApp`). ``` Here's an example entry. Note that its default value, when defined, is also displayed, along with the corresponding environment variable name: ```python ## Bracketed comma-separated list of hosts on which DistributedProcessProxy # kernels will be launched e.g., ['host1','host2']. # (EG_REMOTE_HOSTS env var - non-bracketed, just comma-separated) # Default: ['localhost'] # c.EnterpriseGatewayConfigMixin.remote_hosts = ['localhost'] ```
PypiClean
/autosys-1.5.1.tar.gz/autosys-1.5.1/AutoSys/dev/profile/system/sys.py
""" as_system.py - system utilities and tools for efficient python3 """ # copyright (c) 2019 Michael Treanor # https://www.github.com/skeptycal # https://www.twitter.com/skeptycal SET_DEBUG: bool = False if True: import os import pathlib import sys import time from pathlib import WindowsPath, PosixPath, PurePath, Path from typing import Any, Dict, FrozenSet, List, Sequence, Tuple def _get_env_path(): """ Return system path """ return os.getenv("PATH") def timeit(method): def timed(*args, **kw): s0 = sys.getsizeof(method) t0 = time.time() result = method(*args, **kw) dt = time.time() - t0 s1 = sys.getsizeof(method) print(method.__name__) fn = var_name(kw.get("func")) # print(func_name) # if 'log_time' in kw: # name = kw.get('log_name', method.__name__.upper()) # kw['log_time'][name] = int((dt) * 1000) # else: print(f"{fn:25.25} - {dt*1000:>6.6} \t{'ms':>3.2}") return result return timed class PPath(pathlib.Path): """ Base Path Object """ def __init__(self, pattern: pathlib.PurePath): self.pattern = pattern if SET_DEBUG: print(self.as_uri()) super().__init__() def subs(self, recursive=False): """ Yield an iterator of subdirectories. """ if recursive: func = self.rglob(self.pattern + "/") else: func = self.glob(self.pattern + "/") for _ in func: # if x.is_dir(): yield _ def exists(self): return self.exists() def is_dir(self): return self.is_dir() def next_line(self): try: with self.open() as f: return f.readline() except OSError as e: return e def get_name(self) -> str: return self.parts[0] def str(self): return str(self) def ls(self, args): for x in self.iterdir(): print(x) def _get_builtins() -> Sequence[str]: return sys.builtin_module_names def _get_basename(filename: str) -> str: """ get only basename from full path """ return Path(filename).resolve().parts()[-1] # pathlib version # return os.path.basename(filename) # os.path version def py_path() -> List[str]: """ Return list of current python path elements. """ try: return os.environ["PYTHONPATH"].split(os.pathsep) except KeyError: return [] def py3up() -> bool: """ Return True if 'Python >= 3' else False If you want to detect pre-Python 3 and don't want to import anything, you can (ab)use list comprehension scoping changes """ # https://stackoverflow.com/questions/1093322/how-do-i-check-what-version-of-python-is-running-my-script/35294211 # https://stackoverflow.com/a/52825819/9878098 return (lambda x: [x for x in [False]] and None or x)(True) def pyver() -> str: """ Returns string with python version number in major.minor.micro format. (e.g. 3.7.3 or 2.7.12) """ return ".".join(str(i) for i in __import__("sys").version_info[:3]) def py_shell() -> str: """ Returns string containing current python shell name. (e.g. ipython notebook, pypy, jupyter notebook, ipython, cpython) """ shell: str = "unknown" PY_ENV = os.environ PY_BASE = os.path.basename(PY_ENV["_"]) if "JPY_PARENT_PID" in PY_ENV: shell = "ipython notebook" elif "pypy" in PY_ENV: shell = "pypy" elif "jupyter-notebook" in PY_BASE: shell = "jupyter notebook" elif "ipython" in PY_BASE: shell = "ipython" else: try: import platform shell = platform.python_implementation() except ImportError: pass # print("pyshell() output: ", shell.strip()) return shell.strip() def _pprint_globals(): """ Pretty Print all global variables. Designed for debugging purposes. """ print() print("Globals: ") print("*" * 40) width = max(len(i) for i in globals()) print(f"(key width: {width})") for s in globals(): print("{:<15.15} : {:<64.64}".format(s, str(globals().get(s)))) print() def iterable(obj): return hasattr(obj, "__iter__") or hasattr(obj, "__getitem__") # def njoin(l: List[str]) -> str: # return '\n'.join(l) @timeit # testing efficiency of version 1 def time_iter_check1(n: int = 10000, var: object = None): if not var: var = "*" * 2000 for _ in range(n): result = iterable(var) return result @timeit # testing efficiency of version 2 def time_iter_check2(n: int = 10000, var: object = None): if not var: var = "*" * 2000 for _ in range(n): try: result = iter(var) result = True except TypeError: result = False def time_iter_check(n: int = 10000, var: object = None): time_iter_check1(n, var) time_iter_check2(n, var) def njoin(s, delimeter: str = ",") -> str: """ Return a string of lines (with LF) from a delimited string or iterable object. """ if isinstance(s, str): return "\n".join(s.split(delimeter)) try: _ = iter(s) return "\n".join(_) except TypeError: pass def _pprint_code_tests(tests: List[Exception]): pass def _get_functions(): pass def _pprint_dict_table( data: Dict[Any, Any] = globals(), title: str = "", borders: str = "text" ): """ Pretty Print dictionary in table format. parameters: data: Dict[Any, Any] - dictionary containing table data title: str - Displayed Title of Data Table (Title Case Used) borders: str - border format (single, double, graphic, text*) """ if title == "": title = data.__repr__() print(data.__repr__()) print(data.__str__()) maxwidth: int = 75 # 80 - 1 for each outer border and 3 for center padding width: int = max(len(i) for i in data) + 1 width2: int = maxwidth - width if SET_DEBUG: print() print("Title: ", title.title()) print("*" * len(title)) print("width: ", width) print("width2: ", width2) fmt = ( "{:<" + str(width) + "." + str(width) + "} : " + "{:<" + str(width2) + "." + str(width2) + "}" ) print("*" * 80) for s in globals(): print(fmt.format(s, str(globals().get(s)))) print("*" * 80) def _execute_test_code(tests: List[str]) -> List[Exception]: result: List[Exception] = [] for test in tests: try: # run test code print("=> ", test) exec(test) except Exception as e: result.append(e) return result def _run_tests(tests: List[str] = []) -> str: if tests == []: tests: List[str] = [ "_pprint_globals()", 'print("basename: ", _get_basename(__file__))', "print(1/0)", ] results = _execute_test_code(tests) for e in results: print("e.__class__.__name__", e.__class__.__name__) # log.exception(e) print("e: ", e) print("e.args: ", e.args) print("type(e): ", type(e)) if __name__ == "__main__": # log = logging.getLogger() print(_run_tests()) print() print("... Tests Complete.") _pprint_dict_table print(njoin(_get_builtins())) print() print(njoin(_get_env_path(), ":")) print(iterable("this")) print(iterable(5343)) print(iterable([1, 2, 3, 4])) print(iterable(["a", "b"])) s1 = set(x for x in "this") print(iterable(s1)) print(iterable(pathlib.Path())) print(time_iter_check(10000000))
PypiClean
/bk_plugin_runtime-2.0.2-py3-none-any.whl/bk_plugin_runtime/static/remote/v3/assets/js/jquery-migrate-1.2.1.min.js
jQuery.migrateMute===void 0&&(jQuery.migrateMute=!0),function(e,t,n){function r(n){var r=t.console;i[n]||(i[n]=!0,e.migrateWarnings.push(n),r&&r.warn&&!e.migrateMute&&(r.warn("JQMIGRATE: "+n),e.migrateTrace&&r.trace&&r.trace()))}function a(t,a,i,o){if(Object.defineProperty)try{return Object.defineProperty(t,a,{configurable:!0,enumerable:!0,get:function(){return r(o),i},set:function(e){r(o),i=e}}),n}catch(s){}e._definePropertyBroken=!0,t[a]=i}var i={};e.migrateWarnings=[],!e.migrateMute&&t.console&&t.console.log&&t.console.log("JQMIGRATE: Logging is active"),e.migrateTrace===n&&(e.migrateTrace=!0),e.migrateReset=function(){i={},e.migrateWarnings.length=0},"BackCompat"===document.compatMode&&r("jQuery is not compatible with Quirks Mode");var o=e("<input/>",{size:1}).attr("size")&&e.attrFn,s=e.attr,u=e.attrHooks.value&&e.attrHooks.value.get||function(){return null},c=e.attrHooks.value&&e.attrHooks.value.set||function(){return n},l=/^(?:input|button)$/i,d=/^[238]$/,p=/^(?:autofocus|autoplay|async|checked|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped|selected)$/i,f=/^(?:checked|selected)$/i;a(e,"attrFn",o||{},"jQuery.attrFn is deprecated"),e.attr=function(t,a,i,u){var c=a.toLowerCase(),g=t&&t.nodeType;return u&&(4>s.length&&r("jQuery.fn.attr( props, pass ) is deprecated"),t&&!d.test(g)&&(o?a in o:e.isFunction(e.fn[a])))?e(t)[a](i):("type"===a&&i!==n&&l.test(t.nodeName)&&t.parentNode&&r("Can't change the 'type' of an input or button in IE 6/7/8"),!e.attrHooks[c]&&p.test(c)&&(e.attrHooks[c]={get:function(t,r){var a,i=e.prop(t,r);return i===!0||"boolean"!=typeof i&&(a=t.getAttributeNode(r))&&a.nodeValue!==!1?r.toLowerCase():n},set:function(t,n,r){var a;return n===!1?e.removeAttr(t,r):(a=e.propFix[r]||r,a in t&&(t[a]=!0),t.setAttribute(r,r.toLowerCase())),r}},f.test(c)&&r("jQuery.fn.attr('"+c+"') may use property instead of attribute")),s.call(e,t,a,i))},e.attrHooks.value={get:function(e,t){var n=(e.nodeName||"").toLowerCase();return"button"===n?u.apply(this,arguments):("input"!==n&&"option"!==n&&r("jQuery.fn.attr('value') no longer gets properties"),t in e?e.value:null)},set:function(e,t){var a=(e.nodeName||"").toLowerCase();return"button"===a?c.apply(this,arguments):("input"!==a&&"option"!==a&&r("jQuery.fn.attr('value', val) no longer sets properties"),e.value=t,n)}};var g,h,v=e.fn.init,m=e.parseJSON,y=/^([^<]*)(<[\w\W]+>)([^>]*)$/;e.fn.init=function(t,n,a){var i;return t&&"string"==typeof t&&!e.isPlainObject(n)&&(i=y.exec(e.trim(t)))&&i[0]&&("<"!==t.charAt(0)&&r("$(html) HTML strings must start with '<' character"),i[3]&&r("$(html) HTML text after last tag is ignored"),"#"===i[0].charAt(0)&&(r("HTML string cannot start with a '#' character"),e.error("JQMIGRATE: Invalid selector string (XSS)")),n&&n.context&&(n=n.context),e.parseHTML)?v.call(this,e.parseHTML(i[2],n,!0),n,a):v.apply(this,arguments)},e.fn.init.prototype=e.fn,e.parseJSON=function(e){return e||null===e?m.apply(this,arguments):(r("jQuery.parseJSON requires a valid JSON string"),null)},e.uaMatch=function(e){e=e.toLowerCase();var t=/(chrome)[ \/]([\w.]+)/.exec(e)||/(webkit)[ \/]([\w.]+)/.exec(e)||/(opera)(?:.*version|)[ \/]([\w.]+)/.exec(e)||/(msie) ([\w.]+)/.exec(e)||0>e.indexOf("compatible")&&/(mozilla)(?:.*? rv:([\w.]+)|)/.exec(e)||[];return{browser:t[1]||"",version:t[2]||"0"}},e.browser||(g=e.uaMatch(navigator.userAgent),h={},g.browser&&(h[g.browser]=!0,h.version=g.version),h.chrome?h.webkit=!0:h.webkit&&(h.safari=!0),e.browser=h),a(e,"browser",e.browser,"jQuery.browser is deprecated"),e.sub=function(){function t(e,n){return new t.fn.init(e,n)}e.extend(!0,t,this),t.superclass=this,t.fn=t.prototype=this(),t.fn.constructor=t,t.sub=this.sub,t.fn.init=function(r,a){return a&&a instanceof e&&!(a instanceof t)&&(a=t(a)),e.fn.init.call(this,r,a,n)},t.fn.init.prototype=t.fn;var n=t(document);return r("jQuery.sub() is deprecated"),t},e.ajaxSetup({converters:{"text json":e.parseJSON}});var b=e.fn.data;e.fn.data=function(t){var a,i,o=this[0];return!o||"events"!==t||1!==arguments.length||(a=e.data(o,t),i=e._data(o,t),a!==n&&a!==i||i===n)?b.apply(this,arguments):(r("Use of jQuery.fn.data('events') is deprecated"),i)};var j=/\/(java|ecma)script/i,w=e.fn.andSelf||e.fn.addBack;e.fn.andSelf=function(){return r("jQuery.fn.andSelf() replaced by jQuery.fn.addBack()"),w.apply(this,arguments)},e.clean||(e.clean=function(t,a,i,o){a=a||document,a=!a.nodeType&&a[0]||a,a=a.ownerDocument||a,r("jQuery.clean() is deprecated");var s,u,c,l,d=[];if(e.merge(d,e.buildFragment(t,a).childNodes),i)for(c=function(e){return!e.type||j.test(e.type)?o?o.push(e.parentNode?e.parentNode.removeChild(e):e):i.appendChild(e):n},s=0;null!=(u=d[s]);s++)e.nodeName(u,"script")&&c(u)||(i.appendChild(u),u.getElementsByTagName!==n&&(l=e.grep(e.merge([],u.getElementsByTagName("script")),c),d.splice.apply(d,[s+1,0].concat(l)),s+=l.length));return d});var Q=e.event.add,x=e.event.remove,k=e.event.trigger,N=e.fn.toggle,T=e.fn.live,M=e.fn.die,S="ajaxStart|ajaxStop|ajaxSend|ajaxComplete|ajaxError|ajaxSuccess",C=RegExp("\\b(?:"+S+")\\b"),H=/(?:^|\s)hover(\.\S+|)\b/,A=function(t){return"string"!=typeof t||e.event.special.hover?t:(H.test(t)&&r("'hover' pseudo-event is deprecated, use 'mouseenter mouseleave'"),t&&t.replace(H,"mouseenter$1 mouseleave$1"))};e.event.props&&"attrChange"!==e.event.props[0]&&e.event.props.unshift("attrChange","attrName","relatedNode","srcElement"),e.event.dispatch&&a(e.event,"handle",e.event.dispatch,"jQuery.event.handle is undocumented and deprecated"),e.event.add=function(e,t,n,a,i){e!==document&&C.test(t)&&r("AJAX events should be attached to document: "+t),Q.call(this,e,A(t||""),n,a,i)},e.event.remove=function(e,t,n,r,a){x.call(this,e,A(t)||"",n,r,a)},e.fn.error=function(){var e=Array.prototype.slice.call(arguments,0);return r("jQuery.fn.error() is deprecated"),e.splice(0,0,"error"),arguments.length?this.bind.apply(this,e):(this.triggerHandler.apply(this,e),this)},e.fn.toggle=function(t,n){if(!e.isFunction(t)||!e.isFunction(n))return N.apply(this,arguments);r("jQuery.fn.toggle(handler, handler...) is deprecated");var a=arguments,i=t.guid||e.guid++,o=0,s=function(n){var r=(e._data(this,"lastToggle"+t.guid)||0)%o;return e._data(this,"lastToggle"+t.guid,r+1),n.preventDefault(),a[r].apply(this,arguments)||!1};for(s.guid=i;a.length>o;)a[o++].guid=i;return this.click(s)},e.fn.live=function(t,n,a){return r("jQuery.fn.live() is deprecated"),T?T.apply(this,arguments):(e(this.context).on(t,this.selector,n,a),this)},e.fn.die=function(t,n){return r("jQuery.fn.die() is deprecated"),M?M.apply(this,arguments):(e(this.context).off(t,this.selector||"**",n),this)},e.event.trigger=function(e,t,n,a){return n||C.test(e)||r("Global events are undocumented and deprecated"),k.call(this,e,t,n||document,a)},e.each(S.split("|"),function(t,n){e.event.special[n]={setup:function(){var t=this;return t!==document&&(e.event.add(document,n+"."+e.guid,function(){e.event.trigger(n,null,t,!0)}),e._data(this,n,e.guid++)),!1},teardown:function(){return this!==document&&e.event.remove(document,n+"."+e._data(this,n)),!1}}})}(jQuery,window);
PypiClean
/infoblox-netmri-3.8.0.0.tar.gz/infoblox-netmri-3.8.0.0/infoblox_netmri/api/broker/v3_5_0/system_backup_broker.py
from ..broker import Broker class SystemBackupBroker(Broker): controller = "system_backup" def create_archive(self, **kwargs): """Creates backup of current system database. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include_date: Defines whether include date in file name or not. :type include_date: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param init: Defines whether to initially create the archive. :type init: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param async_ind: When false, backup creating will be run synchronously, and the API call will block until it is complete. When true, backup creating id will be returned to use for subsequent calls :type async_ind: Boolean **Outputs** """ return self.api_request(self._get_method_fullname("create_archive"), kwargs) def create_archive_status(self, **kwargs): """Backup database status. **Inputs** **Outputs** """ return self.api_request(self._get_method_fullname("create_archive_status"), kwargs) def ssh_authentication_test(self, **kwargs): """Test SSH authentication. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param host: Host name or IP address of the system where archive will be copied. :type host: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param port: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified). :type port: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param user_name: Name of the existing user on the system where archive will be copied. :type user_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` :param password: User password on the system where archive will be copied. :type password: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param use_ssh_keys: Specifies whether to use SSH keys. :type use_ssh_keys: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param directory: Remote host directory where archive will be stored. :type directory: String **Outputs** """ return self.api_request(self._get_method_fullname("ssh_authentication_test"), kwargs) def move_archive_to_remote_host(self, **kwargs): """Moves database archive to remote host via SSH. Note that archive will be removed from NetMRI. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param host: Host name or IP address of the system where archive will be copied. Required if init is set to true. :type host: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param port: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified). :type port: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param user_name: Name of the existing user on the system where archive will be copied. Required if init is set to true. :type user_name: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` :param password: User password on the system where archive will be copied. Required if init is set to true. :type password: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param use_ssh_keys: Specifies whether to use SSH keys. :type use_ssh_keys: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param directory: Specifies directory where archive will be stored on remote host. Default is user home directory. :type directory: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param init: Set to true to initialize moving archive :type init: Boolean **Outputs** """ return self.api_request(self._get_method_fullname("move_archive_to_remote_host"), kwargs) def download_archive(self, **kwargs): """Download database archive. **Inputs** **Outputs** """ return self.api_mixed_request(self._get_method_fullname("download_archive"), kwargs) def download_archive_md5_sum(self, **kwargs): """Download database archive md5 checksum. **Inputs** **Outputs** """ return self.api_mixed_request(self._get_method_fullname("download_archive_md5_sum"), kwargs) def remove_archive(self, **kwargs): """Database archive is stored in temporary directory on NetMRI. It's removed on schedule but you may choose to force remove it. **Inputs** **Outputs** """ return self.api_request(self._get_method_fullname("remove_archive"), kwargs) def schedule_archiving(self, **kwargs): """Schedule NetMRI database archiving. Archive will be stored on up to 2 systems supporting SCP. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param enable: Specifies whether scheduled archiving should be enabled or not. If parameter is not specified then scheduled archiving is set disabled. :type enable: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param host_1: Host name or IP address of the system where archive will be copied. :type host_1: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param port_1: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified). :type port_1: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param user_name_1: Name of the existing user on the system where archive will be copied. :type user_name_1: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` :param password_1: User password on the system where archive will be copied. :type password_1: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param use_ssh_keys_1: Specifies whether to use SSH keys. :type use_ssh_keys_1: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param directory_1: Specifies directory where archive will be stored on remote host. Default is user home directory. :type directory_1: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param host_2: Host name or IP address of the system where archive will be copied. :type host_2: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param port_2: Number of open SSH port on the system where archive will be delivered. Default value is 22 (used if no port number specified). :type port_2: Integer | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param user_name_2: Name of the existing user on the system where archive will be copied. :type user_name_2: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` :param password_2: User password on the system where archive will be copied. :type password_2: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param use_ssh_keys_2: Specifies whether to use SSH keys. :type use_ssh_keys_2: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param directory_2: Specifies directory where archive will be stored on remote host. Default is user home directory. :type directory_2: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include_date_1: Specifies whether to put current date into archive file name or not while saving on remote host 1. :type include_date_1: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param include_date_2: Specifies whether to put current date into archive file name or not while saving on remote host 2. :type include_date_2: Boolean | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param schedule_cron: Cron schedule string. :type schedule_cron: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param schedule_json: NetMRI internal parameters generated by 'cronscheduler' form transmitted in json format for setting cron schedule string. :type schedule_json: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` False :param force_save: If true, changes will be saved even if credentials test failed :type force_save: Boolean **Outputs** """ return self.api_request(self._get_method_fullname("schedule_archiving"), kwargs) def upload_archive(self, **kwargs): """Upload database archive to NetMRI. **Inputs** | ``api version min:`` None | ``api version max:`` None | ``required:`` True | ``default:`` None :param archive: NetMRI database archive file. :type archive: String | ``api version min:`` None | ``api version max:`` None | ``required:`` False | ``default:`` None :param md5: NetMRI database archive MD5 checksum file. :type md5: String **Outputs** """ return self.api_request(self._get_method_fullname("upload_archive"), kwargs) def restore_database(self, **kwargs): """Restores database from the archive which should have been uploaded to NetMRI. **Inputs** **Outputs** """ return self.api_request(self._get_method_fullname("restore_database"), kwargs)
PypiClean
/adafruit-circuitpython-max9744-1.2.15.tar.gz/adafruit-circuitpython-max9744-1.2.15/adafruit_max9744.py
try: # Imports only used for typing. # First check if the the typing module exists, to avoid loading # other typing-only modules when running under circuitpython. import typing # pylint: disable=unused-import import busio except ImportError: pass from micropython import const __version__ = "1.2.15" __repo__ = "https://github.com/adafruit/Adafruit_CircuitPython_MAX9744.git" # Internal constants: _MAX9744_DEFAULT_ADDRESS = const(0b01001011) _MAX9744_COMMAND_VOLUME = const(0b00000000) _MAX9744_COMMAND_FILTERLESS = const(0b01000000) _MAX9744_COMMAND_CLASSIC_PWM = const(0b01000001) _MAX9744_COMMAND_VOLUME_UP = const(0b11000100) _MAX9744_COMMAND_VOLUME_DOWN = const(0b11000101) class MAX9744: """MAX9744 20 watt class D amplifier. :param i2c: The I2C bus for the device. :param address: (Optional) The address of the device if it has been overridden from the default with the AD1, AD2 pins. """ # Global buffer for writing data. This saves memory use and prevents # heap fragmentation. However this is not thread-safe or re-entrant by # design! _BUFFER = bytearray(1) def __init__(self, i2c: busio.I2C, *, address: int = _MAX9744_DEFAULT_ADDRESS): # This device doesn't use registers and instead just accepts a single # command string over I2C. As a result we don't use bus device or # other abstractions and just talk raw I2C protocol. self._i2c = i2c self._address = address def _write(self, val: int) -> None: # Perform a write to update the amplifier state. try: # Make sure bus is locked before write. while not self._i2c.try_lock(): pass # Build bytes to send to device with updated value. self._BUFFER[0] = val & 0xFF self._i2c.writeto(self._address, self._BUFFER) finally: # Ensure bus is always unlocked. self._i2c.unlock() def _set_volume(self, volume: int) -> None: # Set the volume to the specified level (0-63). assert 0 <= volume <= 63 self._write(_MAX9744_COMMAND_VOLUME | (volume & 0x3F)) # pylint: disable=line-too-long volume = property( None, _set_volume, None, "Set the volume of the amplifier. Specify a value from 0-63 where 0 is muted/off and 63 is maximum volume.", ) # pylint: enable=line-too-long def volume_up(self) -> None: """Increase the volume by one level.""" self._write(_MAX9744_COMMAND_VOLUME_UP) def volume_down(self) -> None: """Decrease the volume by one level.""" self._write(_MAX9744_COMMAND_VOLUME_DOWN)
PypiClean
/mxnet_cu112-2.0.0b1-py3-none-manylinux2014_x86_64.whl/mxnet/_ctypes/symbol.py
# coding: utf-8 # pylint: disable=invalid-name, protected-access, too-many-arguments, global-statement """Symbolic configuration API.""" import ctypes from ..base import _LIB from ..base import c_str_array, c_handle_array, c_str, mx_uint from ..base import SymbolHandle from ..base import check_call # The symbol class to be used (Cython or Ctypes) _symbol_cls = None _np_symbol_cls = None class SymbolBase(object): """Symbol is symbolic graph.""" __slots__ = ["handle", "_alive"] # pylint: disable=no-member def __init__(self, handle): """Initialize the function with handle Parameters ---------- handle : SymbolHandle the handle to the underlying C++ Symbol """ self.handle = handle self._alive = True def __del__(self): check_call(_LIB.NNSymbolFree(self.handle)) self._alive = False def _compose(self, *args, **kwargs): """Compose symbol on inputs. This call mutates the current symbol. Parameters ---------- args: provide positional arguments kwargs: provide keyword arguments Returns ------- the resulting symbol """ name = kwargs.pop('name', None) if name: name = c_str(name) if len(args) != 0 and len(kwargs) != 0: raise TypeError('compose only accept input Symbols \ either as positional or keyword arguments, not both') for arg in args: if not isinstance(arg, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') for val in kwargs.values(): if not isinstance(val, SymbolBase): raise TypeError('Compose expect `Symbol` as arguments') num_args = len(args) + len(kwargs) if len(kwargs) != 0: keys = c_str_array(kwargs.keys()) args = c_handle_array(kwargs.values()) else: keys = None args = c_handle_array(kwargs.values()) check_call(_LIB.NNSymbolCompose( self.handle, name, num_args, keys, args)) def _set_attr(self, **kwargs): """Set the attribute of the symbol. Parameters ---------- **kwargs The attributes to set """ keys = c_str_array(kwargs.keys()) vals = c_str_array([str(s) for s in kwargs.values()]) num_args = mx_uint(len(kwargs)) check_call(_LIB.MXSymbolSetAttrs( self.handle, num_args, keys, vals)) def _set_handle(self, handle): """Set handle.""" self.handle = handle def __reduce__(self): return (_symbol_cls, (None,), self.__getstate__()) def _set_symbol_class(cls): """Set the symbolic class to be cls""" global _symbol_cls _symbol_cls = cls def _set_np_symbol_class(cls): """Set the numpy-compatible symbolic class to be cls""" global _np_symbol_cls _np_symbol_cls = cls def _symbol_creator(handle, args, kwargs, keys, vals, name, is_np_op, output_is_list=False): sym_handle = SymbolHandle() check_call(_LIB.MXSymbolCreateAtomicSymbol( ctypes.c_void_p(handle), mx_uint(len(keys)), c_str_array(keys), c_str_array([str(v) for v in vals]), ctypes.byref(sym_handle))) if args and kwargs: raise TypeError( 'Operators with variable length input can only accept input' 'Symbols either as positional or keyword arguments, not both') create_symbol_fn = _np_symbol_cls if is_np_op else _symbol_cls s = create_symbol_fn(sym_handle) if args: s._compose(*args, name=name) elif kwargs: s._compose(name=name, **kwargs) else: s._compose(name=name) if is_np_op: # Determine whether the symbol is a list. if s.num_outputs > 1: return list(s) elif output_is_list: return [s] return s
PypiClean
/superpower_gui-0.3.0.tar.gz/superpower_gui-0.3.0/superpower_gui/views/mod.py
__all__ = ['VariableLanding', 'LEVELS', 'INIT_LEVEL', 'CONT', 'CATG', 'CONT_CATG_OPTS', 'FLOAT_TEXT_STEP', 'DEFAULT_SD_VALUE', 'PROP_MIN', 'PROP_MAX', 'LEVEL_MIN', 'LEVEL_MAX', 'Interactions', 'Equation', 'ModerationLandingView', 'DEPENDENT_VARIABLE', 'PREDICTORS', 'POWER_COVARIATES', 'POWER_CUSTOM_EFFECTS', 'STD', 'UNSTD', 'TOTAL_COV_OPTS', 'PRED_MIN', 'COV_MIN', 'PRED_MAX', 'COV_MAX', 'BIG_BETA', 'LITTLE_BETA', 'CONT', 'CATG'] # Cell import itertools import ipywidgets as ipyw import functools import traitlets # Cell from .mod_plot import ModerationPlot from ..widget_containers import VBox, Box, HBox, GridBox from ..widgets import HTML, Text, Dropdown, FloatText, BoundedFloatText, Label, HTMLMath, Checkbox, Output, Button # Cell LEVELS = 'Levels:' INIT_LEVEL = 'Lvl' CONT = 'Continuous' CATG = 'Categorical' CONT_CATG_OPTS = [CONT, CATG] FLOAT_TEXT_STEP = 0.01 DEFAULT_SD_VALUE = 1 PROP_MIN = 0 PROP_MAX = 1 LEVEL_MIN = 2 LEVEL_MAX = 6 class VariableLanding: def __init__(self, observe_method, model=None): self.observe_method = observe_method self.name = Text(layout=ipyw.Layout(width='100px'), continuous_update=False) self.type = Dropdown(layout=ipyw.Layout(width='100px'), options=CONT_CATG_OPTS) #self.sd = FloatText(value=self.DEFAULT_SD_VALUE, step=self.FLOAT_TEXT_STEP) #self.slope = FloatText(step=self.FLOAT_TEXT_STEP) #self.cur_value = None # Levels - if categorical self.count = Dropdown(description=LEVELS, layout=ipyw.Layout(width='97px'), style={'description_width': '50px'}, options=[i for i in range(LEVEL_MIN, LEVEL_MAX+1)]) self.levels = [] # Names for each level #self.props = [] # Proportions for each level #self.slopes = [] # Slopes for each level #self.cur_values = [] # Current values for each level self.levels = [Text(layout=ipyw.Layout(width='100px')) for i in range(LEVEL_MAX)] for i, level in enumerate(self.levels): level.observe(functools.partial(self.observeLevels, i), 'value') ''' for i in range(LEVEL_MAX): self.levels.append(Text(layout=ipyw.Layout(width='100px'), value=model.name + INIT_LEVEL + str(i+1))) self.props.append(BoundedFloatText(min=self.PROP_MIN, max=self.PROP_MAX, step=self.FLOAT_TEXT_STEP)) self.slopes.append(FloatText(step=self.FLOAT_TEXT_STEP)) self.cur_values.append(None) traitlets.link((self.model, 'name'), (self.name, 'value')) traitlets.link((self.model, 'count'), (self.count, 'value')) traitlets.link((self.model, 'type'), (self.type, 'value')) self.count.observe(observe_method, 'value') self.name.observe(observe_method, 'value') self.type.observe(observe_method, 'value') ''' self.count.observe(self.observeCount, 'value') self.name.observe(self.observeName, 'value') self.type.observe(self.observeType, 'value') if model: self.handshake(model) def handshake(self, model): self.model = model self.setLanding() def setLanding(self): self.name.value = self.model.name self.type.value = self.model.type self.count.value = self.model.count for i in range(len(self.levels)): self.levels[i].value = self.model.name + INIT_LEVEL + str(i+1) def observeCount(self, change): self.model.count = change['new'] self.observe_method(change) def observeName(self, change): self.model.name = change['new'] self.observe_method(change) def observeType(self, change): self.model.type = change['new'] self.observe_method(change) def observeLevels(self, i, change): self.model.levels[i] = change['new'] # Cell class Interactions: """Mange list of interactions for calculation and user selection (include/exclude)""" FLOAT_TEXT_STEP = 0.01 def __init__(self, label, grid, callback, model): self.model = model self.label = label # widget, title of ckbox area self.grid = grid # ckbox output widget self.callback = callback # UI refresh method to be called after ckbox change self.ckboxes = {} # Dict of ckboxes indexed by term ("*X1*X2") def reset(self): self.ckboxes = {} self.model.reset() def add(self, term, var_list): desc = self.model.add(term, var_list) ckbox = Checkbox(value=True, indent=False, description=desc, layout=ipyw.Layout(width='max-content', margin='0 0 0 0')) ckbox.observe(functools.partial(self.on_checkbox, term), 'value') self.ckboxes[term] = ckbox def render(self, show_ckboxes): if self.ckboxes and show_ckboxes: widgets = [] for ckbox in self.ckboxes.values(): widgets.append(ckbox) self.grid.children = widgets self.label.show() else: self.grid.children = [] self.label.hide() def on_checkbox(self, term, change): """Higher order terms depend on the inclusion of lower order terms)""" changed = change['owner'] order_level = changed.description.count('*') for ckbox in self.ckboxes.values(): ckbox.unobserve(None, 'value') # Temporarily disable ckbox's callback if changed.value and ckbox.description.count('*') < order_level: ckbox.value = True # Force inclusion of lower order term self.model.included[term] = True elif not changed.value and ckbox.description.count('*') > order_level: ckbox.value = False # Force exclusion of higher order term self.model.included[term] = False ckbox.observe(self.on_checkbox, 'value') # Re-enable ckbox's callback self.callback(ckbox_activity=True) # Cell class Equation: """Display equation""" EMPTY = u'' SPACE = u'\u2004' EQUAL = u'=' PLUS = u'+' EPSILON = u'𝜀' def __init__(self,left,right,max=106): # max=112, 150 self.left = left # Left side output widget self.right = right # Right side output widget self.max = max # Maximum line length self.line = None # Output buffer, string self.slope_index = None # Next slope subscript, integer self.betaStr = None # Beta character, string self.sub = str.maketrans("Bb0123456789", u"𝛽𝑏₀₁₂₃₄₅₆₇₈₉") self.ital = str.maketrans("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz", u"𝐴𝐵𝐶𝐷𝐸𝐹𝐺𝐻𝐼𝐽𝐾𝐿𝑀𝑁𝑂𝑃𝑄𝑅𝑆𝑇𝑈𝑉𝑊𝑋𝑌𝑍𝑎𝑏𝑐𝑑𝑒𝑓𝑔𝘩𝑖𝑗𝑘𝑙𝑚𝑛𝑜𝑝𝑞𝑟𝘴𝑡𝑢𝑣𝑤𝑥𝑦𝑧") def set_left(self, dep_var): """Left side uses LaTeX to enable hat stretch over variable""" self.left.children = (HTMLMath(value='$\hat{' + dep_var + '} = $'),) def start_right(self, big_beta): """Right side uses Unicode characters for speed""" self.betaStr= 'B' if big_beta else 'b' # will be translated to small beta self.slope_index = 0 # for building equation self.right.clear_output() self.line = self.next_slope() def next_slope(self): slope = self.betaStr+ str(self.slope_index) self.slope_index += 1 return slope.translate(self.sub) def add(self,term): additional = self.SPACE + self.PLUS + self.SPACE + self.next_slope() + term.replace('*','').translate(self.ital) if len(self.line + additional) > self.max: self.flush_right() self.line += additional # buffer output def flush_right(self):# ,force=False): """Flush output buffer, start new line""" if not self.line == self.EMPTY: if len(self.line) > 0 and self.line[0] == self.SPACE: self.line = self.line[1:] with self.right: print(self.line) self.line = self.EMPTY def end_right(self): self.flush_right() self.line = self.SPACE + self.PLUS + self.SPACE + self.EPSILON self.flush_right() # Cell #INIT_DEPVAR = 'Y' #INIT_PRED = 'X' #INIT_COV = 'Cov' DEPENDENT_VARIABLE = 'Dependent variable:' PREDICTORS = 'Predictors:' POWER_COVARIATES = 'Power covariates?' POWER_CUSTOM_EFFECTS = 'Power custom effects:' STD = 'Standardized' UNSTD = 'Unstandardized' TOTAL_COV_OPTS = [('No covariates' , 0),('One covariat' , 1), ('Two covariates' , 2),('Three covariates' , 3), ('Four covariates' , 4),('Five covariates' , 5), ('Six covariates' , 6),('Seven covariates' , 7), ('Eight covariates' , 8),('Nine covariates' , 9), ('Ten covariates' ,10),('Eleven covariates' ,11), ('Twelve covariates' ,12),('Thirteen covariates' ,13), ('Fourteeen covariates',14),('Fifteen covariates' ,15), ('Sixteen covariates' ,16),('Seventeen covariates',17), ('Eighteen covariates' ,18),('Nineteen covariates' ,19), ('Twenty covariates' ,20)] PRED_MIN = 2 COV_MIN = 0 PRED_MAX = 5 COV_MAX = 20 BIG_BETA = u'𝛽' LITTLE_BETA = u'𝑏' CONT = 'Continuous' CATG = 'Categorical' class ModerationLandingView(Box): """Set up and run UI for moderation landing view""" def __init__(self, **kwargs): super().__init__(**kwargs) labelStyle = {'description_width': '135px', 'text-align': 'left'} # 120px # Create one off widgets self.eq_left = Box() # TODO Fixed eqation height (current) or allow height to vary with size of eq? self.eq_right = Output(layout=ipyw.Layout(width='100%', max_height='200px', height='150px', overflow_y='auto', margin='0 0 0 5px')) self.standard = Dropdown(layout=ipyw.Layout(width='max-content', margin='0 0 20px 0'), options=[STD, UNSTD]) self.dep_var = Text(description=DEPENDENT_VARIABLE, style=labelStyle, #value=self.INIT_DEPVAR, layout=ipyw.Layout(width='max-content'), continuous_update=False) # 240px self.num_pred = Dropdown(description=PREDICTORS, layout=ipyw.Layout(width='max-content'), style=labelStyle, options=[i for i in range(2, PRED_MAX + 1)], value = None) self.total_cov = Dropdown(layout=ipyw.Layout(width='max-content'),options=TOTAL_COV_OPTS, value = None) self.pow_cov = Dropdown(description=POWER_COVARIATES, layout=ipyw.Layout(width='max-content' ),#visibility = 'hidden'), style=labelStyle, options=[0], value = None) self.eq = Equation(self.eq_left,self.eq_right) # Mgr for dynamic output of eqation # Create display widgets and storage for interactions self.pow_cust_eff_label = Label(layout=ipyw.Layout(margin='10px 0 0 0')) self.custom_inter = GridBox(layout=ipyw.Layout(grid_template_columns="repeat(4, 25%)")) # Create output areas for variables self.pred_out = Box() self.cov_out = Box() # Lay out widgets self.layout = ipyw.Layout(width='100%') eq_box = HBox([self.eq_left,self.eq_right], layout=ipyw.Layout(width='85%', margin='20px 0 20px 50px', padding='10px 10px 10px 10px', border='solid 1px LightGrey')) left_box = VBox([self.dep_var, self.num_pred, self.pred_out], layout=ipyw.Layout(width='50%')) right_box = VBox([self.standard, self.total_cov, self.pow_cov, self.cov_out], layout=ipyw.Layout(width='50%')) self.children = (VBox([eq_box,HBox([left_box, right_box]), self.pow_cust_eff_label, self.custom_inter], layout = ipyw.Layout(width='100%')),)
PypiClean
/neuron_morphology-1.1.7-py3-none-any.whl/neuron_morphology/feature_extractor/feature_extraction_run.py
from typing import ( AbstractSet, Set, Collection, Optional, Dict, Type, FrozenSet, List) import logging import warnings from neuron_morphology.feature_extractor.data import Data from neuron_morphology.feature_extractor.marked_feature import MarkedFeature from neuron_morphology.feature_extractor.mark import Mark class FeatureExtractionRun: def __init__(self, data): """ Represents a single run of feature extraction on a single dataset. Parameters ---------- data : the dataset from which to extract features """ self.data: Data = data self.selected_marks: Set[Type[Mark]] = set() self.selected_features: List[MarkedFeature] = [] self.results: Optional[Dict] = None def select_marks( self, marks: Collection[Type[Mark]], required_marks: AbstractSet[Type[Mark]] = frozenset() ): """ Choose marks for this run by validating a set of candidates against the data. Parameters ---------- marks : candidate marks to be validated required_marks : if provided, raise an exception if any of these marks do not validate successfully Returns ------- self : This FeatureExtractionRun, with selected_marks updated """ for mark in marks: if mark.validate(self.data): self.selected_marks.add(mark) else: logging.info(f"skipping mark (validation failed): {mark.__class__.__name__}") missing_required = required_marks - self.selected_marks if missing_required: raise ValueError(f"required marks: {missing_required} failed validation!") logging.info(f"selected marks: {self.select_marks}") return self def select_features( self, features: Collection[MarkedFeature], only_marks: Optional[AbstractSet[Type[Mark]]] = None, ): """ Choose features to calculated for this run on the basis of selected marks. Parameters ---------- features : Candidates features for selection only_marks : if provided, reject features not marked with marks in this set Returns ------- self : This FeatureExtractionRun, with selected_features updated """ if only_marks is None: only_marks = set() for feature in features: extra_marks = feature.marks - self.selected_marks if extra_marks: logging.info( f"skipping feature: {feature.name}. " f"Found extra marks: {[mark.__name__ for mark in extra_marks]}") elif only_marks - feature.marks: logging.info(f"skipping feature: {feature.name} (no marks from {only_marks})") else: self.selected_features.append(feature) logging.info(f"selected features: {[feature.name for feature in self.selected_features]}") return self def extract(self): """ For each selected feature, carry out calculation on this run's dataset. Returns ------- self : This FeatureExtractionRun, with results updated """ self.results = {} for feature in self.selected_features: try: self.results[feature.name] = feature(self.data) except: logging.warning(f"feature extraction failed for {feature.name}") raise return self def serialize(self): """ Return a dictionary describing this run """ return { "results": self.results, "selected_marks": [mark.__name__ for mark in self.selected_marks], "selected_features": [ feature.name for feature in self.selected_features] }
PypiClean
/pulumiverse_harbor-3.10.0.tar.gz/pulumiverse_harbor-3.10.0/pulumiverse_harbor/retention_policy.py
import copy import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from . import _utilities from . import outputs from ._inputs import * __all__ = ['RetentionPolicyArgs', 'RetentionPolicy'] @pulumi.input_type class RetentionPolicyArgs: def __init__(__self__, *, rules: pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]], scope: pulumi.Input[str], schedule: Optional[pulumi.Input[str]] = None): """ The set of arguments for constructing a RetentionPolicy resource. :param pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]] rules: Al collection of rule blocks as documented below. :param pulumi.Input[str] scope: The project id of which you would like to apply this policy. :param pulumi.Input[str] schedule: The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. """ pulumi.set(__self__, "rules", rules) pulumi.set(__self__, "scope", scope) if schedule is not None: pulumi.set(__self__, "schedule", schedule) @property @pulumi.getter def rules(self) -> pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]]: """ Al collection of rule blocks as documented below. """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]]): pulumi.set(self, "rules", value) @property @pulumi.getter def scope(self) -> pulumi.Input[str]: """ The project id of which you would like to apply this policy. """ return pulumi.get(self, "scope") @scope.setter def scope(self, value: pulumi.Input[str]): pulumi.set(self, "scope", value) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input[str]]: """ The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "schedule", value) @pulumi.input_type class _RetentionPolicyState: def __init__(__self__, *, rules: Optional[pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]]] = None, schedule: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None): """ Input properties used for looking up and filtering RetentionPolicy resources. :param pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]] rules: Al collection of rule blocks as documented below. :param pulumi.Input[str] schedule: The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. :param pulumi.Input[str] scope: The project id of which you would like to apply this policy. """ if rules is not None: pulumi.set(__self__, "rules", rules) if schedule is not None: pulumi.set(__self__, "schedule", schedule) if scope is not None: pulumi.set(__self__, "scope", scope) @property @pulumi.getter def rules(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]]]: """ Al collection of rule blocks as documented below. """ return pulumi.get(self, "rules") @rules.setter def rules(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['RetentionPolicyRuleArgs']]]]): pulumi.set(self, "rules", value) @property @pulumi.getter def schedule(self) -> Optional[pulumi.Input[str]]: """ The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. """ return pulumi.get(self, "schedule") @schedule.setter def schedule(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "schedule", value) @property @pulumi.getter def scope(self) -> Optional[pulumi.Input[str]]: """ The project id of which you would like to apply this policy. """ return pulumi.get(self, "scope") @scope.setter def scope(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "scope", value) class RetentionPolicy(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RetentionPolicyRuleArgs']]]]] = None, schedule: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None, __props__=None): """ ## Example Usage ## Import Harbor retention policy can be imported using the `retention_policy id` eg, `<break><break>```sh<break> $ pulumi import harbor:index/retentionPolicy:RetentionPolicy main /retentions/10 <break>```<break><break>` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RetentionPolicyRuleArgs']]]] rules: Al collection of rule blocks as documented below. :param pulumi.Input[str] schedule: The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. :param pulumi.Input[str] scope: The project id of which you would like to apply this policy. """ ... @overload def __init__(__self__, resource_name: str, args: RetentionPolicyArgs, opts: Optional[pulumi.ResourceOptions] = None): """ ## Example Usage ## Import Harbor retention policy can be imported using the `retention_policy id` eg, `<break><break>```sh<break> $ pulumi import harbor:index/retentionPolicy:RetentionPolicy main /retentions/10 <break>```<break><break>` :param str resource_name: The name of the resource. :param RetentionPolicyArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(RetentionPolicyArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RetentionPolicyRuleArgs']]]]] = None, schedule: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None, __props__=None): opts = pulumi.ResourceOptions.merge(_utilities.get_resource_opts_defaults(), opts) if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = RetentionPolicyArgs.__new__(RetentionPolicyArgs) if rules is None and not opts.urn: raise TypeError("Missing required property 'rules'") __props__.__dict__["rules"] = rules __props__.__dict__["schedule"] = schedule if scope is None and not opts.urn: raise TypeError("Missing required property 'scope'") __props__.__dict__["scope"] = scope super(RetentionPolicy, __self__).__init__( 'harbor:index/retentionPolicy:RetentionPolicy', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None, rules: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RetentionPolicyRuleArgs']]]]] = None, schedule: Optional[pulumi.Input[str]] = None, scope: Optional[pulumi.Input[str]] = None) -> 'RetentionPolicy': """ Get an existing RetentionPolicy resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['RetentionPolicyRuleArgs']]]] rules: Al collection of rule blocks as documented below. :param pulumi.Input[str] schedule: The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. :param pulumi.Input[str] scope: The project id of which you would like to apply this policy. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = _RetentionPolicyState.__new__(_RetentionPolicyState) __props__.__dict__["rules"] = rules __props__.__dict__["schedule"] = schedule __props__.__dict__["scope"] = scope return RetentionPolicy(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def rules(self) -> pulumi.Output[Sequence['outputs.RetentionPolicyRule']]: """ Al collection of rule blocks as documented below. """ return pulumi.get(self, "rules") @property @pulumi.getter def schedule(self) -> pulumi.Output[Optional[str]]: """ The schedule of when you would like the policy to run. This can be `Hourly`, `Daily`, `Weekly` or can be a custom cron string. """ return pulumi.get(self, "schedule") @property @pulumi.getter def scope(self) -> pulumi.Output[str]: """ The project id of which you would like to apply this policy. """ return pulumi.get(self, "scope")
PypiClean
/KyberPC-0.0.2.tar.gz/KyberPC-0.0.2/Kyber_Polar_Codes/Kyber.py
from sage.all import parent, ZZ, vector, PolynomialRing, GF from sage.all import log, ceil, randint, set_random_seed, random_vector, matrix, floor import numpy as np import random # Kyber parameters n = 256 q = 3329 k = 3 eta1 = 2 eta2 = 2 du = 10 dv = 4 Fq = GF(q) Rq = PolynomialRing(Fq, "x") R, x = PolynomialRing(ZZ, "x").objgen() fx = R([1]+[0]*(n-1)+[1]) def B(eta): return sum(np.random.randint(0,2,eta) - np.random.randint(0,2,eta)) def string_to_bits(s): bits_message = [] for letter in s: for i in range(6, -1, -1): bits_message.append(ord(letter)>>i & 1) return bits_message def bits_to_string(b): s = [] if len(b)==0: return "" sub = b[:7] i=0 while not (sub == [0]*7) and i < len(b)//7: s.append(chr(int(''.join(str(item) for item in sub), 2))) i+=1 sub = b[7*i:7*i+7] return "".join(s) def map_q_to_01(polynomial): new_coeffs = [(coeff + q//4) % q for coeff in polynomial] return [int(new_coeff)*(2/q) - 0.5 for new_coeff in new_coeffs ] def add_noise_v(v, P): noisy_v = [] for vv in v: noise = R([2**(int(random.uniform(0,3))) if random.uniform(0,1) < P else 0 for _ in range(0, n)]) noisy_v.append(vv + noise) return noisy_v def add_noise_u(u, P): noisy_u = [] for uu in u: noise = vector(R, k, [R([ 2**(int(random.uniform(0,9))) if random.uniform(0,1) < P else 0 for _ in range(0, n)]) for _ in range(k)]) noisy_u.append(uu + noise) return noisy_u def compress(poly, d): return R([round(int(coeff) * (2**d)/(q-1)) for coeff in poly]) def compress_u(polys, d): for i in range(k): polys[i] = compress(polys[i], d) return polys def decompress(poly_list, d): return [R([round(int(coeff) * (q-1) /(2**d)) for coeff in poly]) for poly in poly_list] def decompress_u(poly_list, d): for i in range(len(poly_list)): for j in range(k): poly_list[i][j] = decompress([poly_list[i][j]], d)[0] return poly_list def decompress_list(lst, d): return [round(int(coeff) * (q-1) /(2**d)) for coeff in lst] def generate_keys(): A = matrix(Rq, k, k, [Rq.random_element(degree=n-1) for _ in range(k*k)]) e = vector(R, k, [R([(B(eta1)) for _ in range(n)]) for _ in range(k)]) s = vector(R, k, [R([(B(eta1)) for _ in range(n)]) for _ in range(k)]) t = ( A*s + e) % fx return (A, t), s # (pk), sk def encrypt(message, pk): A, t = pk if type(message) == str: message = string_to_bits(message) message = list(message) message = [message[i:i+256] for i in range(0, len(message), 256)] u, v = [], [] r = vector(R, k, [R([(B(eta1)) for _ in range(n)]) for _ in range(k)]) e_1 = vector(R, k, [R([(B(eta2)) for _ in range(n)]) for _ in range(k)]) e_2 = R([(B(eta2)) for _ in range(n)]) for submessage in message: v.append(compress( (r*t + e_2 + R(decompress_list(submessage, 1))) % fx, dv)) u.append(compress_u((r*A + e_1) % fx, du)) return u, v def decrypt(u, v, sk, for_pc = False): message = [] v = decompress(v, dv) u = decompress_u(u, du) for i in range(0, len(v)): decrypted_message = (v[i] - sk*u[i]) % fx if for_pc: message.append(map_q_to_01(decrypted_message)) else: message.append([int(coef > q//4 and coef < 3*q//4) for coef in decrypted_message]) if for_pc: return np.array([bit for submessage in message for bit in submessage]) else: all_messages = [bit for submessage in message for bit in submessage] return "".join([bits_to_string(bits) for bits in [all_messages[i:i+7] for i in range(0, len(all_messages), 7)] ])
PypiClean
/cubicweb-narval-4.2.3.tar.gz/cubicweb-narval-4.2.3/logformat.py
__docformat__ = "restructuredtext en" _ = unicode import logging import re from logilab.mtconverter import xml_escape from cubicweb.utils import make_uid REVERSE_SEVERITIES = { logging.DEBUG: _('DEBUG'), logging.INFO: _('INFO'), logging.WARNING: _('WARNING'), logging.ERROR: _('ERROR'), logging.FATAL: _('FATAL') } # Properly formatted log are expected to use the following form: # # {severity(int)}\t{filename}\t{line number}\t{msg text with potential new line}<br/> # # Messages that did not follow this convention are called "baldy formatted" and # are displayed only at a debug level. However, we split the log stream on # '<br/>' marker contained in "properly formated" log. As we can expect a # message which didn't start properly (with the severity\tfilename\tlineno # header) to not finish properly, it's likely that the bad ouput contained in # the log stream does *not* end with a `<br\>` and that the `<br\>` used for # splitting belongs to properly formated message. We can the try to recover any # properly formatted messages embedded withing a wrongly formatted block. # # GOOD_MSG_STRICT_RE match the start of a perflectly formatted message. Any part # of the stream matching it is considered a message without further processing. # GOOD_MSG_START_RE match the start of propertly formatted message anywhere in # block of text. (but we assume any propertly formatted message if preceded by a # newline). We search for a match of GOOD_MSG_START_RE in the badly formatted # block. If any match is found we split the block in two. GOOD_MSG_STRICT_RE = re.compile(r'^[0-9]+\t.*\t.*\t') GOOD_MSG_START_RE = re.compile(r'^[0-9]+\t.*\t.*\t', re.MULTILINE) def log_to_html(req, domid, data, w, defaultlevel='Info', severities=REVERSE_SEVERITIES): """format logs data to an html table log are encoded in the following format for each record: encodedmsg = u'%s\t%s\t%s\t%s<br/>' % (severity, path, line, xml_escape(msg)) """ req.add_css(('cubes.narval.css', 'cubicweb.tablesorter.css')) _selector(req, domid, w, defaultlevel, severities) _table_header(req, domid, w) _main_table(req, domid, data, w, defaultlevel, severities) def _selector(req, domid, w, defaultlevel, severities): req.add_js(('cubes.narval.js', 'jquery.tablesorter.js')) if defaultlevel != 'Debug': req.add_onload('$("select.log_filter").val("%s").change();' % req.form.get('log_level', defaultlevel)) w(u'<form>') w(u'<label>%s</label>' % req._(u'Message Threshold')) w(u'<select class="log_filter" onchange="filter_log(\'%s\', this.options[this.selectedIndex].value)">' % domid) for level in [level.capitalize() for key, level in sorted(severities.items())]: w('<option value="%s">%s</option>' % (level, req._(level))) w(u'</select>') w(u'</form>') def _table_header(req, domid, w): w(u'<table class="listing table table-condensed" id="%s">' % domid) w(u'<tr><th>%s</th><th>%s</th><th>%s</th><th>%s</th></tr>' % ( req._('severity'), req._('path or command'), req._('line'), req._('message'))) def _main_table(req, domid, data, w, defaultlevel, severities): all_msg = [] #try to cure invalid msg: for msg in data.split('<br/>'): msg = msg.strip() if GOOD_MSG_STRICT_RE.search(msg) is not None: all_msg.append(msg) else: match = GOOD_MSG_START_RE.search(msg) if match is not None: # We found some real message inside, let's save it pos = match.start() junk = xml_escape(msg[:pos]) all_msg.append(junk) all_msg.append(msg[pos:]) else: # hopeless junk all_msg.append(xml_escape(msg)) for msg_idx, record in enumerate(all_msg): record = record.strip() if not record: continue try: srecord = record.split('\t', 3) if len(srecord) < 4: # some fields are missing, let's fill with '' srec = srecord srecord = [''] * 4 srecord[:len(srec)] = srec severity, path, line, msg = srecord severityname = severities[int(severity)] except (KeyError, ValueError): req.warning('badly formated log %s' % record) path = line = u'' severity = logging.DEBUG severityname = severities[int(severity)] msg = record log_msg_id = 'log_msg_%i' % msg_idx w(u'<tr class="log%s" id="%s">' % (severityname.capitalize(), log_msg_id)) w(u'<td class="logSeverity" cubicweb:sortvalue="%s">' % severity) data = { 'severity': req._(severities[int(severity)]), 'title': _('permalink to this message'), 'msg_id': log_msg_id, } w(u'''<a class="internallink" href="javascript:;" title="%(title)s" ''' u'''onclick="document.location.hash='%(msg_id)s';">&#182;</a>''' u'''&#160;%(severity)s''' % data) w(u'</td>') w(u'<td class="logPath">%s</td>' % (path or u'&#160;')) w(u'<td class="logLine">%s</td>' % (line or u'&#160;')) w(u'<td class="logMsg">') SNIP_OVER = 7 lines = msg.splitlines() if len(lines) <= SNIP_OVER: w(u'<div class="rawtext">%s</div>' % msg) else: # The make_uid argument have not specific meaning here. div_snip_id = make_uid(u'log_snip_') div_full_id = make_uid(u'log_full_') divs_id = (div_snip_id, div_full_id) snip = u'\n'.join((lines[0], lines[1], u' ...', u' %i more lines [double click to expand]' % (len(lines)-4), u' ...', lines[-2], lines[-1])) divs = ( (div_snip_id, snip, u'expand', "class='collapsed'"), (div_full_id, u'<pre>' + msg + u'</pre>', u'collapse', "class='hidden'") ) for div_id, content, button, h_class in divs: text = _(button) js = u"toggleVisibility('%s'); toggleVisibility('%s');" % divs_id w(u'<div id="%s" %s>' % (div_id, h_class)) w(u'<div class="raw_test" ondblclick="javascript: %s" ' 'title="%s" style="display: block;">' % (js, text)) w(u'%s' % content) w(u'</div>') w(u'</div>') w(u'</td>') w(u'</tr>\n') w(u'</table>')
PypiClean
/compare_my_stocks-1.0.5-py3-none-any.whl/compare_my_stocks/common/common.py
import logging import dataclasses from collections import namedtuple from datetime import datetime,date from functools import wraps import time import numpy as np import pytz import psutil from common.simpleexceptioncontext import simple_exception_handling, SimpleExceptionContext, print_formatted_traceback def timeit(func): @wraps(func) def timeit_wrapper(*args, **kwargs): start_time = time.perf_counter() result = func(*args, **kwargs) end_time = time.perf_counter() total_time = end_time - start_time logging.debug(f'Function {func.__name__} Took {total_time:.4f} seconds') return result return timeit_wrapper def checkIfProcessRunning(processName): ''' Check if there is any running process that contains the given name processName. ''' #Iterate over the all the running process for proc in psutil.process_iter(): try: # Check if process name contains the given name string. if processName.lower() in proc.name().lower(): return True except (psutil.NoSuchProcess, psutil.AccessDenied, psutil.ZombieProcess): pass return False def localize_it (x): if x is None: return None return (pytz.UTC.localize(x, True) if not x.tzinfo else x) def unlocalize_it(date): d=localize_it(date) return d.replace(tzinfo=None) from django.core.serializers.json import DjangoJSONEncoder def to_yaml(enum_class): def to_yaml_inner(cls,enum_value): return cls.represent_scalar(f'{enum_class.__name__}',u'{.name}'.format(enum_value),style=' ' ) def from_yaml(loader, node): #x=loader.construct_scalar(node) return enum_class[node.value] def add_stuff(yaml): # add the representer to the yaml serializer #yaml.add_representer(enum_class, to_yaml_inner) yaml.add_constructor(f'{enum_class.__name__}',from_yaml,Loader=yaml.CLoader) # add the to_yaml method to the enum class enum_class.to_yaml = to_yaml_inner #lambda self: yaml.dump({self.name: self}, default_flow_style=False) enum_class.from_yaml = from_yaml enum_class.add_stuff = add_stuff return enum_class def index_of(val, in_list): try: return in_list.index(val) except ValueError: return -1 from enum import Flag, auto, Enum @to_yaml class CombineStrategy(int,Flag): PREFERSTOCKS=auto() PREFERIB=auto() class Types(int,Flag): ABS = 0 PRICE=auto() VALUE=auto() PROFIT = auto() TOTPROFIT = auto() #Though I wanted it to be X | Y RELPROFIT = auto() PERATIO = auto() PRICESELLS=auto() THEORTICAL_PROFIT=auto() RELTOMAX=auto() RELTOMIN=auto() RELTOSTART=auto() RELTOEND=auto() PRECENTAGE=auto() DIFF=auto() COMPARE=auto() PRECDIFF = PRECENTAGE | DIFF @to_yaml class VerifySave(int,Enum): DONT=0, Ask=1, ForceSave=2 @to_yaml class UseCache(int,Enum): DONT=0 USEIFAVALIABLE=1 FORCEUSE=2 class LimitType(int,Flag): RANGE=0 MIN=auto() MAX=auto() class UniteType(int,Flag): NONE=0 SUM=auto() AVG=auto() MIN=auto() MAX=auto() ADDTOTAL=auto() ADDPROT=auto() ADDTOTALS= ADDPROT | ADDTOTAL #did this trick to keep ADDTOTAL @to_yaml class InputSourceType(Flag): Cache=0 IB=auto() InvestPy=auto() def neverthrow(f,*args,default=None,**kwargs): try: return f(*args,**kwargs) except: return default def addAttrs(attr_names): def deco(cls): for attr_name in attr_names: def getAttr(self, attr_name=attr_name): return getattr(self, "_" + attr_name) def setAttr(self, value, attr_name=attr_name): setattr(self, "_" + attr_name, value) prop = property(getAttr, setAttr) setattr(cls, attr_name, prop) #setattr(cls, "_" + attr_name, None) # Default value for that attribute return cls return deco EPS=0.0001 def get_first_where_all_are_good(arr,remove_zeros=False,last=0): try: arr[np.abs(arr) < EPS] = 0 except: logging.debug(('err EPS')) pass ind = np.isnan(arr) if remove_zeros: ind = np.bitwise_or(ind ,arr ==0) getnan = np.any(ind, axis=0) ls = list(getnan) if last: ls.reverse() ind=index_of(False,ls) return ( ind * (-1 if last else 1)) if ind!=-1 else -1 class NoDataException(Exception): pass class MySignal: def __init__(self,typ): from PySide6 import QtCore from PySide6.QtCore import Signal Emitter = type('Emitter', (QtCore.QObject,), {'signal': Signal(typ)}) self.emitter = Emitter() def emit(self,*args,**kw): self.emitter.signal.emit(*args,**kw) def connect(self, slot): self.emitter.signal.connect(slot) class SafeSignal: def __init__(self,signal,cond): self._signal=signal self._cond=cond def emit(self,*args,**kw): if self._cond(): self._signal.emit(*args,**kw) def connect(self, slot): self._signal.connect(slot) Serialized=namedtuple('Serialized', ['origdata','beforedata','afterdata','act','parameters','Groups']) dictfilt = lambda x, y: dict([(i, x[i]) for i in x if i in set(y)]) dictnfilt = lambda x, y: dict([(i, x[i]) for i in x if not(i in set(y))]) lmap= lambda x,y: list(map(x,y)) smap = lambda x,y: set(map(x,y)) # def ifnn(t,v,els=None): # if t is not None: # return v # else: # return els #Write a function that gets two dates and converts the first to timezone aware if the other one is timezone aware def tzawareness(d1,d2): if d2.tzinfo is not None: return localize_it(d1) else: return unlocalize_it(d1) def ifnotnan(t, v, els=lambda x: None): if t is not None: return v(t) else: return els(t) def selfifnn(t,el): if t is not None: return t else: return el def ifnn(t, v, els=lambda: None): if t is not None: return v() else: return els() import dateutil.parser def conv_date(dat,premissive=True): if premissive and dat is None: dat=datetime.now() if type(dat)==str: return dateutil.parser.parse(dat) elif type(dat)==datetime: return dat elif 'Timestamp' in str(type(dat)): return dat.to_pydatetime() elif type(dat)==date: return datetime.fromordinal(dat.toordinal()) else: raise AttributeError("no attr") class EnhancedJSONEncoder(DjangoJSONEncoder): def default(self, o): if dataclasses.is_dataclass(o): return dataclasses.asdict(o) try: return super().default(o) except TypeError: if hasattr(o,"dic"): #SimpleSymbol return o.dic logging.debug((f"{o,type(o)} is not json.. ")) return o.__dict__ def need_add_process(config): return config.Input.INPUTSOURCE== InputSourceType.IB def log_conv(*tup): return '\t'.join([str(x) for x in tup ]) # class TmpHook: # EXCEPTIONHOOK=MySignal(Exception) # MyHook=None # @classmethod # def GetExceptionHook(cls): # if cls.MyHook is None: # cls.MyHook=TmpHook() # return cls.MyHook.EXCEPTIONHOOK @to_yaml class TransactionSourceType(Flag): Cache=0 IB=auto() MyStock=auto() Both= IB | MyStock
PypiClean
/leonardo-horizon-2016.8.3.tar.gz/leonardo-horizon-2016.8.3/doc/source/ref/local_conf.rst
========== local.conf ========== Configuring DevStack for Horizon ================================ Place the following content into `devstack/local.conf` to start the services that Horizon supports in DevStack when `stack.sh` is run. :: [[local|localrc]] ADMIN_PASSWORD=secretadmin MYSQL_PASSWORD=secretadmin RABBIT_PASSWORD=secretadmin SERVICE_PASSWORD=secretadmin SERVICE_TOKEN=a682f596-76f3-11e3-b3b2-e716f9080d50 # Recloning will insure that your stack is up to date. The downside # is overhead on restarts and potentially losing a stable environment. # If set to yes, will reclone all repos every time stack.sh is run. # The default is no. #RECLONE=yes # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly without # Internet access. ``stack.sh`` must have been previously run with Internet # access to install prerequisites and fetch repositories. # OFFLINE=True # Note: there are several network setting changes that may be # required to get networking properly configured in your environment. # This file is just using the defaults set up by devstack. # For a more detailed treatment of devstack network configuration # options, please see: http://devstack.org/guides/single-machine.html ### SERVICES # Enable Swift (Object Store) without replication enable_service s-proxy s-object s-container s-account SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 SWIFT_REPLICAS=1 SWIFT_DATA_DIR=$DEST/data/swift # Enable Neutron (Networking) # to use nova net rather than neutron, comment out the following group disable_service n-net enable_plugin neutron https://git.openstack.org/openstack/neutron enable_service q-svc enable_service q-agt enable_service q-dhcp enable_service q-l3 enable_service q-meta enable_service q-metering enable_service q-qos # end group # Enable VPN plugin for neutron enable_plugin neutron-vpnaas https://git.openstack.org/openstack/neutron-vpnaas # Enable Firewall plugin for neutron enable_plugin neutron-fwaas https://git.openstack.org/openstack/neutron-fwaas # Enable Load Balancer plugin for neutron enable_plugin neutron-lbaas https://git.openstack.org/openstack/neutron-lbaas # Enable Ceilometer (Metering) enable_service ceilometer-acompute ceilometer-acentral ceilometer-anotification ceilometer-collector ceilometer-api ### PLUGINS # Enable Sahara (Data Processing) enable_plugin sahara git://git.openstack.org/openstack/sahara # Enable Trove (Database) enable_plugin trove git://git.openstack.org/openstack/trove [[post-config|$GLANCE_API_CONF]] [DEFAULT] default_store=file
PypiClean
/msgraph_beta_sdk-1.0.0a9-py3-none-any.whl/msgraph/generated/drives/item/items/item/workbook/functions/im_cot/im_cot_request_builder.py
from __future__ import annotations from dataclasses import dataclass from kiota_abstractions.get_path_parameters import get_path_parameters from kiota_abstractions.method import Method from kiota_abstractions.request_adapter import RequestAdapter from kiota_abstractions.request_information import RequestInformation from kiota_abstractions.request_option import RequestOption from kiota_abstractions.response_handler import ResponseHandler from kiota_abstractions.serialization import Parsable, ParsableFactory from typing import Any, Callable, Dict, List, Optional, TYPE_CHECKING, Union if TYPE_CHECKING: from . import im_cot_post_request_body from ........models import workbook_function_result from ........models.o_data_errors import o_data_error class ImCotRequestBuilder(): """ Provides operations to call the imCot method. """ def __init__(self,request_adapter: RequestAdapter, path_parameters: Optional[Union[Dict[str, Any], str]] = None) -> None: """ Instantiates a new ImCotRequestBuilder and sets the default values. Args: pathParameters: The raw url or the Url template parameters for the request. requestAdapter: The request adapter to use to execute the requests. """ if path_parameters is None: raise Exception("path_parameters cannot be undefined") if request_adapter is None: raise Exception("request_adapter cannot be undefined") # Url template to use to build the URL for the current request builder self.url_template: str = "{+baseurl}/drives/{drive%2Did}/items/{driveItem%2Did}/workbook/functions/imCot" url_tpl_params = get_path_parameters(path_parameters) self.path_parameters = url_tpl_params self.request_adapter = request_adapter async def post(self,body: Optional[im_cot_post_request_body.ImCotPostRequestBody] = None, request_configuration: Optional[ImCotRequestBuilderPostRequestConfiguration] = None) -> Optional[workbook_function_result.WorkbookFunctionResult]: """ Invoke action imCot Args: body: The request body requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. Returns: Optional[workbook_function_result.WorkbookFunctionResult] """ if body is None: raise Exception("body cannot be undefined") request_info = self.to_post_request_information( body, request_configuration ) from ........models.o_data_errors import o_data_error error_mapping: Dict[str, ParsableFactory] = { "4XX": o_data_error.ODataError, "5XX": o_data_error.ODataError, } if not self.request_adapter: raise Exception("Http core is null") from ........models import workbook_function_result return await self.request_adapter.send_async(request_info, workbook_function_result.WorkbookFunctionResult, error_mapping) def to_post_request_information(self,body: Optional[im_cot_post_request_body.ImCotPostRequestBody] = None, request_configuration: Optional[ImCotRequestBuilderPostRequestConfiguration] = None) -> RequestInformation: """ Invoke action imCot Args: body: The request body requestConfiguration: Configuration for the request such as headers, query parameters, and middleware options. Returns: RequestInformation """ if body is None: raise Exception("body cannot be undefined") request_info = RequestInformation() request_info.url_template = self.url_template request_info.path_parameters = self.path_parameters request_info.http_method = Method.POST request_info.headers["Accept"] = ["application/json"] if request_configuration: request_info.add_request_headers(request_configuration.headers) request_info.add_request_options(request_configuration.options) request_info.set_content_from_parsable(self.request_adapter, "application/json", body) return request_info @dataclass class ImCotRequestBuilderPostRequestConfiguration(): """ Configuration for the request such as headers, query parameters, and middleware options. """ # Request headers headers: Optional[Dict[str, Union[str, List[str]]]] = None # Request options options: Optional[List[RequestOption]] = None
PypiClean
/metanetx-post-0.5.1.tar.gz/metanetx-post-0.5.1/README.rst
============================= MetaNetX Post-Processing ============================= .. image:: https://img.shields.io/pypi/v/metanetx-post.svg :target: https://pypi.org/project/metanetx-post/ :alt: Current PyPI Version .. image:: https://img.shields.io/pypi/pyversions/metanetx-post.svg :target: https://pypi.org/project/metanetx-post/ :alt: Supported Python Versions .. image:: https://img.shields.io/pypi/l/metanetx-post.svg :target: https://www.apache.org/licenses/LICENSE-2.0 :alt: Apache Software License Version 2.0 .. image:: https://img.shields.io/badge/Contributor%20Covenant-v1.4%20adopted-ff69b4.svg :target: https://github.com/Midnighter/metanetx-post/blob/master/.github/CODE_OF_CONDUCT.md :alt: Code of Conduct .. image:: https://img.shields.io/travis/Midnighter/metanetx-post/master.svg?label=Travis%20CI :target: https://travis-ci.org/Midnighter/metanetx-post :alt: Travis CI .. image:: https://ci.appveyor.com/api/projects/status/github/Midnighter/metanetx-post?branch=master&svg=true :target: https://ci.appveyor.com/project/Midnighter/metanetx-post :alt: AppVeyor .. image:: https://codecov.io/gh/Midnighter/metanetx-post/branch/master/graph/badge.svg :target: https://codecov.io/gh/Midnighter/metanetx-post :alt: Codecov .. image:: https://img.shields.io/badge/code%20style-black-000000.svg :target: https://github.com/ambv/black :alt: Black .. image:: https://readthedocs.org/projects/metanetx-post/badge/?version=latest :target: https://metanetx-post.readthedocs.io/en/latest/?badge=latest :alt: Documentation Status .. summary-start Enrich the information coming from MetaNetX from additional sources. Usage ===== Please use the command line program help to explore use-cases. .. code-block:: console mnx-post --help Install ======= It's as simple as: .. code-block:: console pip install metanetx-post Copyright ========= * Copyright © 2020, Moritz E. Beber. * Free software distributed under the `Apache Software License 2.0 <https://www.apache.org/licenses/LICENSE-2.0>`_. .. summary-end
PypiClean
/tuf-2.0.0.tar.gz/tuf-2.0.0/docs/_posts/2022-05-04-ngclient-design.md
--- title: "What's new in Python-TUF ngclient?" author: Jussi Kukkonen --- We recently released a new TUF client implementation, `ngclient`, in Python-TUF. This post explains why we ended up doing that when a client already existed. # Simpler implementation, "correct" abstractions The legacy code had a few problems that could be summarized as non-optimal abstractions: Significant effort had been put to code re-use, but not enough attention had been paid to ensure the expectations and promises of that shared code were the same in all cases of re-use. This combined with Pythons type ambiguity, use of dictionaries as "blob"-like data structures and extensive use of global state meant touching the shared functions was a gamble: there was no way to be sure something wouldn't break. During the redesign, we really concentrated on finding abstractions that fit the processes we wanted to implement. It may be worth mentioning that in some cases this meant abstractions that have no equivalent in the TUF specification: some of the issues in the legacy implementation look like the result of mapping the TUF specifications [_Detailed client workflow_](https://theupdateframework.github.io/specification/latest/#detailed-client-workflow) directly into code. Here are the core abstractions we ended up with (number of lines of code in parenthesis to provide a bit of context, alongside links to sources and docs): * `Metadata` (900 SLOC, [docs](https://theupdateframework.readthedocs.io/en/latest/api/tuf.api.html)) handles everything related to individual pieces of TUF metadata: deserialization, signing, and verifying * `TrustedMetadataSet` (170 SLOC) is a collection of local, trusted metadata. It defines rules for how new metadata can be added into the set and ensures that metadata in it is always consistent and valid: As an example, if `TrustedMetadataSet` contains a targets metadata, the set guarantees that the targets metadata is signed by trusted keys and is part of a currently valid TUF snapshot * `Updater` (250 SLOC, [docs](https://theupdateframework.readthedocs.io/en/latest/api/tuf.ngclient.updater.html)) makes decisions on what metadata should be loaded into `TrustedMetadataSet`, both from the local cache and from a remote repository. While `TrustedMetadataSet` always raises an exception if a metadata is not valid, `Updater` considers the context and handles some failures as a part of the process and some as actual errors. `Updater` also handles persisting validated metadata and targets onto local storage and provides the user-facing API * `FetcherInterface` (100 SLOC, [docs](https://theupdateframework.readthedocs.io/en/latest/api/tuf.ngclient.fetcher.html)) is the abstract file downloader. By default, a Requests-based implementation is used but clients can use custom fetchers to tweak how downloads are done No design is perfect but so far we're quite happy with the above split. It has dramatically simplified the implementation: The code is subjectively easier to understand but also has significantly lower code branching counts for the same operations. # PyPI client requirements A year ago we added TUF support into pip as a prototype: this revealed some design issues that made the integration more difficult than it needed to be. As the potential pip integration is a goal for Python-TUF we wanted to smooth those rough edges. The main addition here was the `FetcherInterface`: it allows pip to keep doing all of the HTTP tweaks they have collected over the years. There were a bunch of smaller API tweaks as well: as an example, legacy Python-TUF had not anticipated downloading target files from a different host than it downloads metadata from. This is the design that PyPI uses with pypi.org and files.pythonhosted.org. # better API Since we knew we had to break API with the legacy implementation anyway, we also fixed multiple paper cuts in the API: * Actual data structures are now exposed instead of dictionary "blobs" * Configuration was removed or made non-global * Exceptions are defined in a way that is useful to client applications # Plain old software engineering In addition to the big-ticket items, the rewrite allowed loads of improvements in project engineering practices. Some highlights: * Type annotations are now used extensively * Coding style is now consistent (and is now a common Python style) * There is a healthy culture of review in the project: bar for accepting changes is where it should be for a security project * Testing has so many improvements they probably need a blog post of their own These are not `ngclient` features as such but we expect they will show in the quality of products built with it.
PypiClean
/zenodo-accessrequests-1.0.0a3.tar.gz/zenodo-accessrequests-1.0.0a3/docs/changes.rst
.. This file is part of Zenodo. Copyright (C) 2015 CERN. Zenodo is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Zenodo is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Zenodo; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. In applying this license, CERN does not waive the privileges and immunities granted to it by virtue of its status as an Intergovernmental Organization or submit itself to any jurisdiction. .. include:: ../CHANGES.rst
PypiClean
/pulumi_azure_nextgen-0.6.2a1613157620.tar.gz/pulumi_azure_nextgen-0.6.2a1613157620/pulumi_azure_nextgen/blueprint/artifact.py
import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union from .. import _utilities, _tables from ._enums import * __all__ = ['Artifact'] class Artifact(pulumi.CustomResource): def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, artifact_name: Optional[pulumi.Input[str]] = None, blueprint_name: Optional[pulumi.Input[str]] = None, kind: Optional[pulumi.Input[Union[str, 'ArtifactKind']]] = None, resource_scope: Optional[pulumi.Input[str]] = None, __props__=None, __name__=None, __opts__=None): """ Represents a blueprint artifact. API Version: 2018-11-01-preview. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] artifact_name: Name of the blueprint artifact. :param pulumi.Input[str] blueprint_name: Name of the blueprint definition. :param pulumi.Input[Union[str, 'ArtifactKind']] kind: Specifies the kind of blueprint artifact. :param pulumi.Input[str] resource_scope: The scope of the resource. Valid scopes are: management group (format: '/providers/Microsoft.Management/managementGroups/{managementGroup}'), subscription (format: '/subscriptions/{subscriptionId}'). """ if __name__ is not None: warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning) resource_name = __name__ if __opts__ is not None: warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning) opts = __opts__ if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = dict() if artifact_name is None and not opts.urn: raise TypeError("Missing required property 'artifact_name'") __props__['artifact_name'] = artifact_name if blueprint_name is None and not opts.urn: raise TypeError("Missing required property 'blueprint_name'") __props__['blueprint_name'] = blueprint_name if kind is None and not opts.urn: raise TypeError("Missing required property 'kind'") __props__['kind'] = kind if resource_scope is None and not opts.urn: raise TypeError("Missing required property 'resource_scope'") __props__['resource_scope'] = resource_scope __props__['name'] = None __props__['type'] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:blueprint/v20181101preview:Artifact")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Artifact, __self__).__init__( 'azure-nextgen:blueprint:Artifact', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Artifact': """ Get an existing Artifact resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = dict() return Artifact(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def kind(self) -> pulumi.Output[str]: """ Specifies the kind of blueprint artifact. """ return pulumi.get(self, "kind") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Name of this resource. """ return pulumi.get(self, "name") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Type of this resource. """ return pulumi.get(self, "type") def translate_output_property(self, prop): return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop def translate_input_property(self, prop): return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
PypiClean
/symautomata-0.0.12.tar.gz/symautomata-0.0.12/README.md
# symautomata Automata classes A python framework for working with Automata. The framework contains a python implementation and C bindings using pywrapfst (optional). This framework is part of the [lightbulb-framework](https://github.com/lightbulb-framework/lightbulb-framework). ## Changelog 0.0.12: Added support for latest Flex version in flex2fst file. ## Contributors * George Argyros * Ioannis Stais * Suman Jana * Angelos D. Keromytis * Aggelos Kiayias ## References * *G. Argyros, I. Stais, S. Jana, A. D. Keromytis, and A. Kiayias. 2016. SFADiff: Automated Evasion Attacks and Fingerprinting Using Black-box Differential Automata Learning. In Proceedings of the 2016 ACM SIGSAC Conference on Computer and Communications Security (CCS '16). ACM, New York, NY, USA, 1690-1701. doi: 10.1145/2976749.2978383* * *G. Argyros, I. Stais, A. Kiayias and A. D. Keromytis, "Back in Black: Towards Formal, Black Box Analysis of Sanitizers and Filters," 2016 IEEE Symposium on Security and Privacy (SP), San Jose, CA, 2016, pp. 91-109. doi: 10.1109/SP.2016.14* ## Acknowledgements This research was partly supported by ERC project CODAMODA, #259152. ## License MIT License as described in LICENSE file
PypiClean
/django-m2m-fix-1.0.0.tar.gz/django-m2m-fix-1.0.0/django/contrib/gis/management/commands/ogrinspect.py
import argparse from django.contrib.gis import gdal from django.core.management.base import BaseCommand, CommandError from django.utils.inspect import get_func_args class LayerOptionAction(argparse.Action): """ Custom argparse action for the `ogrinspect` `layer_key` keyword option which may be an integer or a string. """ def __call__(self, parser, namespace, value, option_string=None): try: setattr(namespace, self.dest, int(value)) except ValueError: setattr(namespace, self.dest, value) class ListOptionAction(argparse.Action): """ Custom argparse action for `ogrinspect` keywords that require a string list. If the string is 'True'/'true' then the option value will be a boolean instead. """ def __call__(self, parser, namespace, value, option_string=None): if value.lower() == 'true': setattr(namespace, self.dest, True) else: setattr(namespace, self.dest, value.split(',')) class Command(BaseCommand): help = ( 'Inspects the given OGR-compatible data source (e.g., a shapefile) and outputs\n' 'a GeoDjango model with the given model name. For example:\n' ' ./manage.py ogrinspect zipcode.shp Zipcode' ) requires_system_checks = False def add_arguments(self, parser): parser.add_argument('data_source', help='Path to the data source.') parser.add_argument('model_name', help='Name of the model to create.') parser.add_argument( '--blank', action=ListOptionAction, default=False, help='Use a comma separated list of OGR field names to add ' 'the `blank=True` option to the field definition. Set to `true` ' 'to apply to all applicable fields.', ) parser.add_argument( '--decimal', action=ListOptionAction, default=False, help='Use a comma separated list of OGR float fields to ' 'generate `DecimalField` instead of the default ' '`FloatField`. Set to `true` to apply to all OGR float fields.', ) parser.add_argument( '--geom-name', default='geom', help='Specifies the model name for the Geometry Field (defaults to `geom`)' ) parser.add_argument( '--layer', dest='layer_key', action=LayerOptionAction, default=0, help='The key for specifying which layer in the OGR data ' 'source to use. Defaults to 0 (the first layer). May be ' 'an integer or a string identifier for the layer.', ) parser.add_argument( '--multi-geom', action='store_true', help='Treat the geometry in the data source as a geometry collection.', ) parser.add_argument( '--name-field', help='Specifies a field name to return for the __str__() method.', ) parser.add_argument( '--no-imports', action='store_false', dest='imports', help='Do not include `from django.contrib.gis.db import models` statement.', ) parser.add_argument( '--null', action=ListOptionAction, default=False, help='Use a comma separated list of OGR field names to add ' 'the `null=True` option to the field definition. Set to `true` ' 'to apply to all applicable fields.', ) parser.add_argument( '--srid', help='The SRID to use for the Geometry Field. If it can be ' 'determined, the SRID of the data source is used.', ) parser.add_argument( '--mapping', action='store_true', help='Generate mapping dictionary for use with `LayerMapping`.', ) def handle(self, *args, **options): data_source, model_name = options.pop('data_source'), options.pop('model_name') # Getting the OGR DataSource from the string parameter. try: ds = gdal.DataSource(data_source) except gdal.GDALException as msg: raise CommandError(msg) # Returning the output of ogrinspect with the given arguments # and options. from django.contrib.gis.utils.ogrinspect import _ogrinspect, mapping # Filter options to params accepted by `_ogrinspect` ogr_options = {k: v for k, v in options.items() if k in get_func_args(_ogrinspect) and v is not None} output = [s for s in _ogrinspect(ds, model_name, **ogr_options)] if options['mapping']: # Constructing the keyword arguments for `mapping`, and # calling it on the data source. kwargs = { 'geom_name': options['geom_name'], 'layer_key': options['layer_key'], 'multi_geom': options['multi_geom'], } mapping_dict = mapping(ds, **kwargs) # This extra legwork is so that the dictionary definition comes # out in the same order as the fields in the model definition. rev_mapping = {v: k for k, v in mapping_dict.items()} output.extend(['', '', '# Auto-generated `LayerMapping` dictionary for %s model' % model_name, '%s_mapping = {' % model_name.lower()]) output.extend(" '%s': '%s'," % ( rev_mapping[ogr_fld], ogr_fld) for ogr_fld in ds[options['layer_key']].fields ) output.extend([" '%s': '%s'," % (options['geom_name'], mapping_dict[options['geom_name']]), '}']) return '\n'.join(output) + '\n'
PypiClean
/p4a-django-1.9.1.tar.gz/p4a-django-1.9.1/django/db/migrations/optimizer.py
from __future__ import unicode_literals from django.db.migrations import ( AddField, AlterField, AlterIndexTogether, AlterModelTable, AlterOrderWithRespectTo, AlterUniqueTogether, CreateModel, DeleteModel, RemoveField, RenameField, RenameModel, ) from django.utils import six class MigrationOptimizer(object): """ Powers the optimization process, where you provide a list of Operations and you are returned a list of equal or shorter length - operations are merged into one if possible. For example, a CreateModel and an AddField can be optimized into a new CreateModel, and CreateModel and DeleteModel can be optimized into nothing. """ def __init__(self): self.model_level_operations = ( CreateModel, AlterModelTable, AlterUniqueTogether, AlterIndexTogether, AlterOrderWithRespectTo, ) self.field_level_operations = ( AddField, AlterField, ) self.reduce_methods = { # (model operation, model operation) (CreateModel, DeleteModel): self.reduce_create_model_delete_model, (CreateModel, RenameModel): self.reduce_create_model_rename_model, (RenameModel, RenameModel): self.reduce_rename_model_rename_model, (AlterIndexTogether, AlterIndexTogether): self.reduce_alter_model_alter_model, (AlterModelTable, AlterModelTable): self.reduce_alter_model_alter_model, (AlterOrderWithRespectTo, AlterOrderWithRespectTo): self.reduce_alter_model_alter_model, (AlterUniqueTogether, AlterUniqueTogether): self.reduce_alter_model_alter_model, (AlterIndexTogether, DeleteModel): self.reduce_alter_model_delete_model, (AlterModelTable, DeleteModel): self.reduce_alter_model_delete_model, (AlterOrderWithRespectTo, DeleteModel): self.reduce_alter_model_delete_model, (AlterUniqueTogether, DeleteModel): self.reduce_alter_model_delete_model, # (model operation, field operation) (CreateModel, AddField): self.reduce_create_model_add_field, (CreateModel, AlterField): self.reduce_create_model_alter_field, (CreateModel, RemoveField): self.reduce_create_model_remove_field, (CreateModel, RenameField): self.reduce_create_model_rename_field, (AlterIndexTogether, AddField): self.reduce_alter_model_addalterremove_field, (AlterIndexTogether, AlterField): self.reduce_alter_model_addalterremove_field, (AlterIndexTogether, RemoveField): self.reduce_alter_model_addalterremove_field, (AlterOrderWithRespectTo, AddField): self.reduce_alter_model_addalterremove_field, (AlterOrderWithRespectTo, AlterField): self.reduce_alter_model_addalterremove_field, (AlterOrderWithRespectTo, RemoveField): self.reduce_alter_model_addalterremove_field, (AlterUniqueTogether, AddField): self.reduce_alter_model_addalterremove_field, (AlterUniqueTogether, AlterField): self.reduce_alter_model_addalterremove_field, (AlterUniqueTogether, RemoveField): self.reduce_alter_model_addalterremove_field, (AlterIndexTogether, RenameField): self.reduce_alter_model_rename_field, (AlterOrderWithRespectTo, RenameField): self.reduce_alter_model_rename_field, (AlterUniqueTogether, RenameField): self.reduce_alter_model_rename_field, # (field operation, field operation) (AddField, AlterField): self.reduce_add_field_alter_field, (AddField, RemoveField): self.reduce_add_field_remove_field, (AddField, RenameField): self.reduce_add_field_rename_field, (AlterField, RemoveField): self.reduce_alter_field_remove_field, (AlterField, RenameField): self.reduce_alter_field_rename_field, (RenameField, RenameField): self.reduce_rename_field_rename_field, } def optimize(self, operations, app_label=None): """ Main optimization entry point. Pass in a list of Operation instances, get out a new list of Operation instances. Unfortunately, due to the scope of the optimization (two combinable operations might be separated by several hundred others), this can't be done as a peephole optimization with checks/output implemented on the Operations themselves; instead, the optimizer looks at each individual operation and scans forwards in the list to see if there are any matches, stopping at boundaries - operations which can't be optimized over (RunSQL, operations on the same field/model, etc.) The inner loop is run until the starting list is the same as the result list, and then the result is returned. This means that operation optimization must be stable and always return an equal or shorter list. The app_label argument is optional, but if you pass it you'll get more efficient optimization. """ # Internal tracking variable for test assertions about # of loops self._iterations = 0 while True: result = self.optimize_inner(operations, app_label) self._iterations += 1 if result == operations: return result operations = result def optimize_inner(self, operations, app_label=None): """ Inner optimization loop. """ new_operations = [] for i, operation in enumerate(operations): # Compare it to each operation after it for j, other in enumerate(operations[i + 1:]): result = self.reduce(operation, other, operations[i + 1:i + j + 1]) if result is not None: # Optimize! Add result, then remaining others, then return new_operations.extend(result) new_operations.extend(operations[i + 1:i + 1 + j]) new_operations.extend(operations[i + j + 2:]) return new_operations if not self.can_optimize_through(operation, other, app_label): new_operations.append(operation) break else: new_operations.append(operation) return new_operations # REDUCTION def reduce(self, operation, other, in_between=None): """ Either returns a list of zero, one or two operations, or None, meaning this pair cannot be optimized. """ method = self.reduce_methods.get((type(operation), type(other))) if method: return method(operation, other, in_between or []) return None def model_to_key(self, model): """ Takes either a model class or a "appname.ModelName" string and returns (appname, modelname) """ if isinstance(model, six.string_types): return model.split(".", 1) else: return ( model._meta.app_label, model._meta.object_name, ) # REDUCE METHODS: (MODEL OPERATION, MODEL OPERATION) def reduce_create_model_delete_model(self, operation, other, in_between): """ Folds a CreateModel and a DeleteModel into nothing. """ if (operation.name_lower == other.name_lower and not operation.options.get("proxy", False)): return [] def reduce_create_model_rename_model(self, operation, other, in_between): """ Folds a model rename into its create """ if operation.name_lower == other.old_name_lower: return [ CreateModel( other.new_name, fields=operation.fields, options=operation.options, bases=operation.bases, managers=operation.managers, ) ] def reduce_rename_model_rename_model(self, operation, other, in_between): """ Folds a model rename into another one """ if operation.new_name_lower == other.old_name_lower: return [ RenameModel( operation.old_name, other.new_name, ) ] def reduce_alter_model_alter_model(self, operation, other, in_between): """ Folds two AlterModelTable, AlterFooTogether, or AlterOrderWithRespectTo operations into the latter. """ if operation.name_lower == other.name_lower: return [other] def reduce_alter_model_delete_model(self, operation, other, in_between): """ Folds an AlterModelSomething and a DeleteModel into just delete. """ if operation.name_lower == other.name_lower: return [other] # REDUCE METHODS: (MODEL OPERATION, FIELD OPERATION) def reduce_create_model_add_field(self, operation, other, in_between): if operation.name_lower == other.model_name_lower: # Don't allow optimizations of FKs through models they reference if hasattr(other.field, "remote_field") and other.field.remote_field: for between in in_between: # Check that it doesn't point to the model app_label, object_name = self.model_to_key(other.field.remote_field.model) if between.references_model(object_name, app_label): return None # Check that it's not through the model if getattr(other.field.remote_field, "through", None): app_label, object_name = self.model_to_key(other.field.remote_field.through) if between.references_model(object_name, app_label): return None # OK, that's fine return [ CreateModel( operation.name, fields=operation.fields + [(other.name, other.field)], options=operation.options, bases=operation.bases, managers=operation.managers, ) ] def reduce_create_model_alter_field(self, operation, other, in_between): if operation.name_lower == other.model_name_lower: return [ CreateModel( operation.name, fields=[ (n, other.field if n == other.name else v) for n, v in operation.fields ], options=operation.options, bases=operation.bases, managers=operation.managers, ) ] def reduce_create_model_remove_field(self, operation, other, in_between): if operation.name_lower == other.model_name_lower: return [ CreateModel( operation.name, fields=[ (n, v) for n, v in operation.fields if n.lower() != other.name_lower ], options=operation.options, bases=operation.bases, managers=operation.managers, ) ] def reduce_create_model_rename_field(self, operation, other, in_between): if operation.name_lower == other.model_name_lower: return [ CreateModel( operation.name, fields=[ (other.new_name if n == other.old_name else n, v) for n, v in operation.fields ], options=operation.options, bases=operation.bases, managers=operation.managers, ) ] def reduce_alter_model_addalterremove_field(self, operation, other, in_between): if (operation.name_lower == other.model_name_lower and not operation.references_field(other.model_name, other.name)): return [other, operation] def reduce_alter_model_rename_field(self, operation, other, in_between): if (operation.name_lower == other.model_name_lower and not operation.references_field(other.model_name, other.old_name)): return [other, operation] # REDUCE METHODS: (FIELD OPERATION, FIELD OPERATION) def reduce_add_field_alter_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.name_lower == other.name_lower): return [ AddField( model_name=operation.model_name, name=operation.name, field=other.field, ) ] def reduce_add_field_remove_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.name_lower == other.name_lower): return [] def reduce_add_field_rename_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.name_lower == other.old_name_lower): return [ AddField( model_name=operation.model_name, name=other.new_name, field=operation.field, ) ] def reduce_alter_field_remove_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.name_lower == other.name_lower): return [other] def reduce_alter_field_rename_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.name_lower == other.old_name_lower): return [ other, AlterField( model_name=operation.model_name, name=other.new_name, field=operation.field, ), ] def reduce_rename_field_rename_field(self, operation, other, in_between): if (operation.model_name_lower == other.model_name_lower and operation.new_name_lower == other.old_name_lower): return [ RenameField( operation.model_name, operation.old_name, other.new_name, ), ] # THROUGH CHECKS def can_optimize_through(self, operation, other, app_label=None): """ Returns True if it's possible to optimize 'operation' with something the other side of 'other'. This is possible if, for example, they affect different models. """ # If it's a model level operation, let it through if there's # nothing that looks like a reference to us in 'other'. if isinstance(operation, self.model_level_operations): if not other.references_model(operation.name, app_label): return True # If it's field level, only let it through things that don't reference # the field (which includes not referencing the model) if isinstance(operation, self.field_level_operations): if not other.references_field(operation.model_name, operation.name, app_label): return True return False
PypiClean
/retro_data_structures-0.23.0-py3-none-any.whl/retro_data_structures/properties/dkc_returns/objects/TimeAttackEOLDisplay.py
import dataclasses import struct import typing from retro_data_structures.game_check import Game from retro_data_structures.properties.base_property import BaseObjectType from retro_data_structures.properties.dkc_returns.archetypes.EditorProperties import EditorProperties @dataclasses.dataclass() class TimeAttackEOLDisplay(BaseObjectType): editor_properties: EditorProperties = dataclasses.field(default_factory=EditorProperties) @classmethod def game(cls) -> Game: return Game.DKC_RETURNS def get_name(self) -> typing.Optional[str]: return self.editor_properties.name def set_name(self, name: str) -> None: self.editor_properties.name = name @classmethod def object_type(cls) -> str: return 'TEOL' @classmethod def from_stream(cls, data: typing.BinaryIO, size: typing.Optional[int] = None, default_override: typing.Optional[dict] = None): struct_id, size, property_count = struct.unpack(">LHH", data.read(8)) assert struct_id == 0xFFFFFFFF root_size_start = data.tell() - 2 present_fields = default_override or {} for _ in range(property_count): property_id, property_size = struct.unpack(">LH", data.read(6)) start = data.tell() try: property_name, decoder = _property_decoder[property_id] present_fields[property_name] = decoder(data, property_size) except KeyError: raise RuntimeError(f"Unknown property: 0x{property_id:08x}") assert data.tell() - start == property_size assert data.tell() - root_size_start == size return cls(**present_fields) def to_stream(self, data: typing.BinaryIO, default_override: typing.Optional[dict] = None): default_override = default_override or {} data.write(b'\xff\xff\xff\xff') # struct object id root_size_offset = data.tell() data.write(b'\x00\x00') # placeholder for root struct size data.write(b'\x00\x01') # 1 properties data.write(b'%ZE\x80') # 0x255a4580 before = data.tell() data.write(b'\x00\x00') # size placeholder self.editor_properties.to_stream(data) after = data.tell() data.seek(before) data.write(struct.pack(">H", after - before - 2)) data.seek(after) struct_end_offset = data.tell() data.seek(root_size_offset) data.write(struct.pack(">H", struct_end_offset - root_size_offset - 2)) data.seek(struct_end_offset) @classmethod def from_json(cls, data: dict): return cls( editor_properties=EditorProperties.from_json(data['editor_properties']), ) def to_json(self) -> dict: return { 'editor_properties': self.editor_properties.to_json(), } def _decode_editor_properties(data: typing.BinaryIO, property_size: int): return EditorProperties.from_stream(data, property_size) _property_decoder: typing.Dict[int, typing.Tuple[str, typing.Callable[[typing.BinaryIO, int], typing.Any]]] = { 0x255a4580: ('editor_properties', _decode_editor_properties), }
PypiClean
/monk_pytorch_cuda100_test-0.0.1.tar.gz/monk_pytorch_cuda100_test-0.0.1/monk/tf_keras_1/finetune/level_11_optimizers_main.py
from monk.tf_keras_1.finetune.imports import * from monk.system.imports import * from monk.tf_keras_1.finetune.level_10_schedulers_main import prototype_schedulers class prototype_optimizers(prototype_schedulers): ''' Main class for all optimizers in expert mode Args: verbose (int): Set verbosity levels 0 - Print Nothing 1 - Print desired details ''' @accepts("self", verbose=int, post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def __init__(self, verbose=1): super().__init__(verbose=verbose); ############################################################################################################################################### @warning_checks(None, ["lt", 1], momentum=["lt", 1.5], weight_decay=["lt", 0.01], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], momentum=["gte", 0], weight_decay=["gte", 0], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], momentum=[int, float], weight_decay=[int, float], momentum_dampening_rate=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_sgd(self, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0): ''' Select stochastic gradient descent optimizer Args: learning_rate (float): Initial base learning rate momentum (float): Momentum value for driving the weights towards minima weight_decay (float): Value for regularizing weights post every update momentum_dampening_rate (float): Reduction rate for momentum clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = sgd(self.system_dict, learning_rate, momentum=momentum, weight_decay=weight_decay, momentum_dampening_rate=momentum_dampening_rate, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: momentum_dampening_rate is active only for pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], momentum=["lt", 1.5], weight_decay=["lt", 0.01], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], momentum=["gte", 0], weight_decay=["gte", 0], momentum_dampening_rate=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], momentum=[int, float], weight_decay=[int, float], momentum_dampening_rate=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_nesterov_sgd(self, learning_rate, momentum=0, weight_decay=0, momentum_dampening_rate=0, clipnorm=0.0, clipvalue=0.0): ''' Select stochastic gradient descent optimizer with nesterov acceleration Args: learning_rate (float): Initial base learning rate momentum (float): Momentum value for driving the weights towards minima weight_decay (float): Value for regularizing weights post every update momentum_dampening_rate (float): Reduction rate for momentum clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = nesterov_sgd(self.system_dict, learning_rate, momentum=momentum, weight_decay=weight_decay, momentum_dampening_rate=momentum_dampening_rate, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: momentum_dampening_rate is active only for pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], decay_rate=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=None) @error_checks(None, ["gt", 0], decay_rate=["gt", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], decay_rate=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_rmsprop(self, learning_rate, decay_rate=0.99, epsilon=1e-08, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select root mean score prop optimizer Args: learning_rate (float): Initial base learning rate decay_rate (float): A decay factor of moving average over past squared gradient. epsilon (float): A value to avoid division by zero weight_decay (float): Value for regularizing weights post every update clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = rmsprop(self.system_dict , learning_rate, decay_rate=decay_rate, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt, 1"], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], amsgrad=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epssilon=["gte", 0], weight_decay=["gte", 0], amsgrad=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool, clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adam(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False, clipnorm=0.0, clipvalue=0.0): ''' Select ADAM optimizer Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update amsgrad (bool): If True, AMSGrad variant of this algorithm is used epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adam(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, amsgrad=amsgrad, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: amsgrad is active only for keras and pytorch in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt, 1"], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], amsgrad=None, momentum_decay=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epssilon=["gte", 0], weight_decay=["gte", 0], amsgrad=None, momentum_decay=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], amsgrad=bool, momentum_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_nesterov_adam(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, amsgrad=False, momentum_decay=0.004, clipnorm=0.0, clipvalue=0.0): ''' Select ADAM optimizer with nesterov momentum acceleration Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update amsgrad (bool): If True, AMSGrad variant of this algorithm is used epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = nesterov_adam(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, amsgrad=amsgrad, momentum_decay=momentum_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("OptimizerWarning: nesterov adam is active only for keras and gluon in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: amsgrad is inactive in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], beta1=["lt", 1], beta2=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], beta1=["gte", 0], beta2=["gte", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], beta1=[int, float], beta2=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adamax(self, learning_rate, beta1=0.9, beta2=0.999, epsilon=1e-08, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select Adamax optimizer Args: learning_rate (float): Initial base learning rate beta1 (float): Exponential decay rate for first momentum estimates beta2 (float): Exponential decay rate for first second estimates weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adamax(self.system_dict, learning_rate, beta1=beta1, beta2=beta2, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], rho=["lt", 1], epsilon=["lt", 0.001], weight_decay=["lt", 0.01], clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], rho=["gt", 0], epsilon=["gte", 0], weight_decay=["gte", 0], clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], rho=[int, float], epsilon=[int, float], weight_decay=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adadelta(self, learning_rate, rho=0.9, epsilon=1e-06, weight_decay=0, clipnorm=0.0, clipvalue=0.0): ''' Select Adadelta optimizer Args: learning_rate (float): Initial base learning rate rho (float): Exponential decay rate for momentum estimates weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adadelta(self.system_dict, learning_rate, rho=rho, epsilon=epsilon, weight_decay=weight_decay, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ############################################################################################################################################### ############################################################################################################################################### @warning_checks(None, ["lt", 1], learning_rate_decay=None, weight_decay=["lt", 0.01], epsilon=None, clipnorm=None, clipvalue=None, post_trace=False) @error_checks(None, ["gt", 0], learning_rate_decay=None, weight_decay=["gte", 0], epsilon=None, clipnorm=None, clipvalue=None, post_trace=False) @accepts("self", [int, float], learning_rate_decay=[int, float], weight_decay=[int, float], epsilon=[int, float], clipnorm=[int, float], clipvalue=[int, float], post_trace=False) #@TraceFunction(trace_args=True, trace_rv=True) def optimizer_adagrad(self, learning_rate, learning_rate_decay=0, weight_decay=0, epsilon=1e-08, clipnorm=0.0, clipvalue=0.0): ''' Select Adagrad optimizer Args: learning_rate (float): Initial base learning rate learning_rate_decay (float): Learning rate decay factor weight_decay (float): Value for regularizing weights post every update epsilon (float): A value to avoid division by zero clipnorm (float): Gradient clipping factor clipvalue (float): Value for clipping Returns: None ''' self.system_dict = adagrad(self.system_dict, learning_rate, learning_rate_decay=learning_rate_decay, weight_decay=weight_decay, epsilon=epsilon, clipnorm=clipnorm, clipvalue=clipvalue); self.custom_print("Optimizer"); self.custom_print(" Name: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["name"])); self.custom_print(" Learning rate: {}".format(self.system_dict["hyper-parameters"]["learning_rate"])); self.custom_print(" Params: {}".format(self.system_dict["hyper-parameters"]["optimizer"]["params"])); self.custom_print(""); ConstraintWarning("ArgumentWarning: clipnorm and clipvalue are active only for keras in current version of Monk"); self.custom_print(""); ConstraintWarning("ArgumentWarning: learning_rate_decay is active only for pytorch in current version of Monk"); self.custom_print(""); ###############################################################################################################################################
PypiClean
/UncertaintyWrapper-0.4.2a0.tar.gz/UncertaintyWrapper-0.4.2a0/README.rst
.. image:: https://travis-ci.org/SunPower/UncertaintyWrapper.svg?branch=master :target: https://travis-ci.org/SunPower/UncertaintyWrapper UncertaintyWrapper ================== Use ``@unc_wrapper`` decorator to wrap any Python callable to append the covariance and Jacobian matrices to the return values. See documentation and tests for usage and examples. Installation ------------ Use ``pip install UncertaintyWrapper`` to install from `PyPI <https://pypi.python.org/pypi/UncertaintyWrapper>`_ or download a source distribution, extract and use ``python setup.py install``. Requirements ------------ * `NumPy <http://www.numpy.org/>`_ Optional Requirements ~~~~~~~~~~~~~~~~~~~~~ * `Nose <https://nose.readthedocs.org/en/latest/index.html>`_ for testing. * `Sphinx <http://www.sphinx-doc.org/en/stable/>`_ to build documentation. * `NREL SOLPOS <http://rredc.nrel.gov/solar/codesandalgorithms/solpos/>`_ for testing * `AlgoPy <https://pythonhosted.org/algopy/>`_ for testing Usage ----- Example:: from uncertainty_wrapper import unc_wraper import numpy as np @unc_wrapper def f(x): return np.exp(x) x, cov = np.array([[1.0]]), np.array([[0.1]]) f(x, __covariance__=cov) Returns:: (array([[ 2.71828183]]), # exp(1.0) array([[[ 0.73890561]]]), # (delta-f)^2 = (df/dx)^2 * (delta-x)^2 array([[[ 2.71828183]]])) # df/dx = exp(x) History ------- Releases are named after `geological eons, periods and epochs <https://en.wikipedia.org/wiki/Geologic_time_scale>`_. `v0.4.1 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.4.1>`_ `Paleozoic Era <https://en.wikipedia.org/wiki/Paleozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Jagged arrays of covariance keys work now. * simplify `v0.4 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.4>`_ `Phanerozoic Era <https://en.wikipedia.org/wiki/Phanerozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixes #5, ``ValueError`` if covariance keys have multiple observations * fix covariance cross terms not scaled correctly `v0.3.3 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.3.3>`_ `Neoproterozoic Era <https://en.wikipedia.org/wiki/Neoproterozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixes #4, ``ValueError`` if just one observation `v0.3.2 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.3.2>`_ `Mesoproterozoic Era <https://en.wikipedia.org/wiki/Mesoproterozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixes #2, don't need to tile scalar x for multiple observations * Fixes #3, use sparse matrices for dot product instead of dense * uses pvlib example instead of proprietary solar_utils `v0.3.1 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.3.1>`_ `Paleoproterozoic Era <https://en.wikipedia.org/wiki/Paleoproterozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fixes #1 works with Pint's @ureg.wraps() * Use indices for positional arguments. Don't use inspect.argspec since not guaranteed to be the same for wrapped or decorated functions * Test Jacobian estimate for IV with `AlgoPy <https://pythonhosted.org/algopy/>`_ * Show Jacobian errors plot in getting started docs. `v0.3 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.3>`_ `Proterozoic Eon <https://en.wikipedia.org/wiki/Proterozoic>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * new ``unc_wrapper_args()`` allows selection of independent variables that the partial derivatives are with respect to and also grouping those arguments together so that in the original function they can stay unpacked. * return values are grouped correctly so that they can remain unpacked in original function. These allow Uncertainty Wrapper to be used with `Pint's wrapper <http://pint.readthedocs.org/en/latest/wrapping.html>`_ * covariance now specified as dimensionaless fraction of square of arguments * more complex tests: IV curve and solar position (requires `NREL's solpos <http://rredc.nrel.gov/solar/codesandalgorithms/solpos/>`_) `v0.2.1 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.2>`_ `Eoarchean Era <https://en.wikipedia.org/wiki/Eoarchean>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * update documentation `v0.2 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.2>`_ `Archean Eon <https://en.wikipedia.org/wiki/Archean>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * Fix nargs and nf order mixup in Jacobian * add more complex test * fix tile cov by nobs * move partial derivative to subfunction * try threading, but same speed, and would only work with NumPy anyway `v0.1 <https://github.com/SunPower/UncertaintyWrapper/releases/tag/v0.1>`_ `Hadean Eon <https://en.wikipedia.org/wiki/Hadean>`_ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * adds covariance to output * allows __covariance__ to be passed as input * uses estimate Jacobian based on central finite difference method
PypiClean
/wasp-general-0.0.3.3.tar.gz/wasp-general-0.0.3.3/wasp_general/network/clients/base.py
# TODO: document the code # TODO: write tests for the code # noinspection PyUnresolvedReferences from wasp_general.version import __author__, __version__, __credits__, __license__, __copyright__, __email__ # noinspection PyUnresolvedReferences from wasp_general.version import __status__ from enum import Enum from abc import abstractmethod, abstractclassmethod from wasp_general.network.clients.proto import WNetworkClientCapabilityProto, WNetworkClientProto from wasp_general.verify import verify_type from wasp_general.uri import WURI class WCommonNetworkClientCapability(Enum): current_dir = 'current_dir' change_dir = 'change_dir' list_dir = 'list_dir' make_dir = 'make_dir' upload_file = 'upload_file' remove_file = 'remove_file' # noinspection PyAbstractClass class WBasicNetworkClientCapability(WNetworkClientCapabilityProto): def __init__(self, network_agent): WNetworkClientCapabilityProto.__init__(self) self.__network_agent = network_agent def network_agent(self): return self.__network_agent def capability_id(self): return self.common_capability().value @abstractclassmethod def common_capability(cls): raise NotImplementedError('This method is abstract') @classmethod def create_capability(cls, network_agent): return cls(network_agent) # noinspection PyAbstractClass class WBasicNetworkClientProto(WNetworkClientProto): def __init__(self, uri): self.__uri = uri self.__capabilities = \ {x.common_capability(): x.create_capability(self) for x in self.agent_capabilities()} self.__is_closed = False def uri(self): return self.__uri def capabilities(self): return self.__capabilities.values() def request(self, capability_id, *args, **kwargs): if self.is_closed() is True: raise RuntimeError('Operation requested on this closed client') if self.is_capable(capability_id) is True: common_capability = WCommonNetworkClientCapability(capability_id) for capability in self.capabilities(): if capability.common_capability() == common_capability: return capability.request(*args, **kwargs) raise RuntimeError('Unable to execute unsupported capability: %s' % str(capability_id)) def is_closed(self): return self.__is_closed def close(self): if self.__is_closed is False: self._close() self.__is_closed = True @classmethod def is_capable(cls, capability_id): common_capability = WCommonNetworkClientCapability(capability_id) for agent_cap in cls.agent_capabilities(): if agent_cap.common_capability() == common_capability: return True return False @classmethod @verify_type(uri=WURI) def create_handler(cls, uri): return cls(uri) @abstractmethod def _close(self): raise NotImplementedError('This method is abstract') @abstractclassmethod def agent_capabilities(cls): raise NotImplementedError('This method is abstract') # noinspection PyAbstractClass class WBasicNetworkClientChangeDirCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.change_dir # noinspection PyAbstractClass class WBasicNetworkClientListDirCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.list_dir # noinspection PyAbstractClass class WBasicNetworkClientMakeDirCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.make_dir # noinspection PyAbstractClass class WBasicNetworkClientCurrentDirCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.current_dir # noinspection PyAbstractClass class WBasicNetworkClientUploadFileCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.upload_file # noinspection PyAbstractClass class WBasicNetworkClientRemoveFileCapability(WBasicNetworkClientCapability): @classmethod def common_capability(cls): return WCommonNetworkClientCapability.remove_file
PypiClean
/PyCIM-15.15.0.tar.gz/PyCIM-15.15.0/CIM15/CDPSM/Connectivity/IEC61970/Core/VoltageLevel.py
from CIM15.CDPSM.Connectivity.IEC61970.Core.EquipmentContainer import EquipmentContainer class VoltageLevel(EquipmentContainer): """A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these. """ def __init__(self, lowVoltageLimit=0.0, highVoltageLimit=0.0, Bays=None, BaseVoltage=None, Substation=None, *args, **kw_args): """Initialises a new 'VoltageLevel' instance. @param lowVoltageLimit: The bus bar's low voltage limit @param highVoltageLimit: The bus bar's high voltage limit @param Bays: The association is used in the naming hierarchy. @param BaseVoltage: The base voltage used for all equipment within the VoltageLevel. @param Substation: The association is used in the naming hierarchy. """ #: The bus bar's low voltage limit self.lowVoltageLimit = lowVoltageLimit #: The bus bar's high voltage limit self.highVoltageLimit = highVoltageLimit self._Bays = [] self.Bays = [] if Bays is None else Bays self._BaseVoltage = None self.BaseVoltage = BaseVoltage self._Substation = None self.Substation = Substation super(VoltageLevel, self).__init__(*args, **kw_args) _attrs = ["lowVoltageLimit", "highVoltageLimit"] _attr_types = {"lowVoltageLimit": float, "highVoltageLimit": float} _defaults = {"lowVoltageLimit": 0.0, "highVoltageLimit": 0.0} _enums = {} _refs = ["Bays", "BaseVoltage", "Substation"] _many_refs = ["Bays"] def getBays(self): """The association is used in the naming hierarchy. """ return self._Bays def setBays(self, value): for x in self._Bays: x.VoltageLevel = None for y in value: y._VoltageLevel = self self._Bays = value Bays = property(getBays, setBays) def addBays(self, *Bays): for obj in Bays: obj.VoltageLevel = self def removeBays(self, *Bays): for obj in Bays: obj.VoltageLevel = None def getBaseVoltage(self): """The base voltage used for all equipment within the VoltageLevel. """ return self._BaseVoltage def setBaseVoltage(self, value): if self._BaseVoltage is not None: filtered = [x for x in self.BaseVoltage.VoltageLevel if x != self] self._BaseVoltage._VoltageLevel = filtered self._BaseVoltage = value if self._BaseVoltage is not None: if self not in self._BaseVoltage._VoltageLevel: self._BaseVoltage._VoltageLevel.append(self) BaseVoltage = property(getBaseVoltage, setBaseVoltage) def getSubstation(self): """The association is used in the naming hierarchy. """ return self._Substation def setSubstation(self, value): if self._Substation is not None: filtered = [x for x in self.Substation.VoltageLevels if x != self] self._Substation._VoltageLevels = filtered self._Substation = value if self._Substation is not None: if self not in self._Substation._VoltageLevels: self._Substation._VoltageLevels.append(self) Substation = property(getSubstation, setSubstation)
PypiClean
/neural_search-0.6.3.tar.gz/neural_search-0.6.3/jina/parser.py
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved." __license__ = "Apache-2.0" import argparse def add_arg_group(parser, title): return parser.add_argument_group(title) def set_base_parser(): from . import __version__ from .helper import colored, get_full_version, format_full_version_info # create the top-level parser urls = { 'Jina 101': ('🐣', 'https://101.jina.ai'), 'Docs': ('📚', 'https://docs.jina.ai'), 'Examples': ('🚀‍', 'https://learn.jina.ai'), 'Dashboard': ('📊', 'https://dashboard.jina.ai'), 'Code': ('🧑‍💻', 'https://opensource.jina.ai'), 'Hiring!': ('🙌', '[email protected]') } url_str = '\n'.join(f'{v[0]} {k:10.10}\t{colored(v[1], "cyan", attrs=["underline"])}' for k, v in urls.items()) parser = argparse.ArgumentParser( epilog=f'Jina (v{colored(__version__, "green")}) is the cloud-native neural search solution ' 'powered by AI and deep learning technology.\n' 'It provides a universal solution for large-scale index and query ' 'of media contents.\n' f'{url_str}', formatter_class=_chf, description='Jina Command Line Interface' ) parser.add_argument('-v', '--version', action='version', version=__version__, help='show Jina version') parser.add_argument('-vf', '--version-full', action='version', version=format_full_version_info(*get_full_version()), help='show Jina and all dependencies versions') return parser def set_logger_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument('--groupby-regex', type=str, default=r'(.*@\d+)\[', help='the regular expression for grouping logs') parser.add_argument('--refresh-time', type=int, default=5, help='refresh time interval in seconds, set to -1 to persist all grouped logs') return parser def set_hub_base_parser(parser=None): import os if not parser: parser = set_base_parser() parser.add_argument('--username', type=str, help='the registry username', default=os.environ.get('JINAHUB_USERNAME', '')) # _gp = parser.add_mutually_exclusive_group() # _gp.add_argument('--password-stdin', type=argparse.FileType('r'), # default=(sys.stdin if sys.stdin.isatty() else None), # help='take the password from stdin') parser.add_argument('--password', type=str, help='the plaintext password', default=os.environ.get('JINAHUB_PASSWORD', '')) parser.add_argument('--registry', type=str, default='https://index.docker.io/v1/', help='the URL to the registry, e.g. https://index.docker.io/v1/') return parser def set_hub_new_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument('--output-dir', type=str, default='.', help='where to output the generated project dir into.') parser.add_argument('--template', type=str, default='https://github.com/jina-ai/cookiecutter-jina-hub.git', help='cookiecutter template directory containing a project template directory, or a URL to a git repository. Only used when "--type template"') parser.add_argument('--type', type=str, default='pod', choices=['pod', 'app', 'template'], help='create a template for executor hub pod or app using cookiecutter.') parser.add_argument('--overwrite', action='store_true', default=False, help='overwrite the contents of output directory if it exists') return parser def set_hub_build_parser(parser=None): if not parser: parser = set_base_parser() set_hub_base_parser(parser) parser.add_argument('path', type=str, help='path to the directory containing ' 'Dockerfile, manifest.yml, README.md ' 'zero or more yaml config, ' 'zero or more Python file. ' 'All files in this directory will be shipped into a Docker image') parser.add_argument('--pull', action='store_true', default=False, help='downloads any updates to the FROM image in Dockerfiles') parser.add_argument('--push', action='store_true', default=False, help='push the built image to the registry') parser.add_argument('--dry-run', action='store_true', default=False, help='only check path and validility, no real building') parser.add_argument('--prune-images', action='store_true', default=False, help='prune unused images after building, this often saves disk space') parser.add_argument('--raise-error', action='store_true', default=False, help='raise any error and exit with code 1') parser.add_argument('--test-uses', action='store_true', default=False, help='after the build, test the image in "uses" with Flow API') parser.add_argument('--host-info', action='store_true', default=False, help='store the host information during bookkeeping') parser.add_argument('--daemon', action='store_true', default=False, help='run the test Pea/Pod as a daemon process, see "jina pea --help" for details') return parser def set_hub_pushpull_parser(parser=None): if not parser: parser = set_base_parser() set_hub_base_parser(parser) parser.add_argument('name', type=str, help='the name of the image.') return parser def set_hw_parser(parser=None): if not parser: parser = set_base_parser() from .helper import get_random_identity from pkg_resources import resource_filename gp = add_arg_group(parser, 'general arguments') gp.add_argument('--workdir', type=str, default=get_random_identity(), help='the workdir for hello-world demo, ' 'all data, indices, shards and outputs will be saved there') gp.add_argument('--logserver', action='store_true', default=False, help='start a log server for the dashboard') gp.add_argument('--logserver-config', type=str, default=resource_filename('jina', '/'.join(('resources', 'logserver.default.yml'))), help='the yaml config of the log server') gp.add_argument('--download-proxy', type=str, help='specify the proxy when downloading sample data') gp = add_arg_group(parser, 'scalability arguments') gp.add_argument('--shards', type=int, default=2, help='number of shards when index and query') gp.add_argument('--parallel', type=int, default=2, help='number of parallel when index and query') gp = add_arg_group(parser, 'index arguments') gp.add_argument('--index-uses', type=str, default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.index.yml'))), help='the yaml path of the index flow') gp.add_argument('--index-data-url', type=str, default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz', help='the url of index data (should be in idx3-ubyte.gz format)') gp.add_argument('--index-batch-size', type=int, default=1024, help='the batch size in indexing') gp = add_arg_group(parser, 'query arguments') gp.add_argument('--query-uses', type=str, default=resource_filename('jina', '/'.join(('resources', 'helloworld.flow.query.yml'))), help='the yaml path of the query flow') gp.add_argument('--query-data-url', type=str, default='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz', help='the url of query data (should be in idx3-ubyte.gz format)') gp.add_argument('--query-batch-size', type=int, default=32, help='the batch size in searching') gp.add_argument('--num-query', type=int, default=128, help='number of queries to visualize') gp.add_argument('--top-k', type=int, default=50, help='top-k results to retrieve and visualize') return parser def set_flow_parser(parser=None): if not parser: parser = set_base_parser() from .enums import FlowOutputType, FlowOptimizeLevel gp = add_arg_group(parser, 'flow arguments') gp.add_argument('--uses', type=str, help='a yaml file represents a flow') from pkg_resources import resource_filename gp.add_argument('--logserver', action='store_true', default=False, help='start a log server for the dashboard') gp.add_argument('--logserver-config', type=str, default=resource_filename('jina', '/'.join(('resources', 'logserver.default.yml'))), help='the yaml config of the log server') gp.add_argument('--optimize-level', type=FlowOptimizeLevel.from_string, default=FlowOptimizeLevel.NONE, help='removing redundant routers from the flow. Note, this may change the gateway zmq socket to BIND \ and hence not allow multiple clients connected to the gateway at the same time.') gp.add_argument('--output-type', type=FlowOutputType.from_string, choices=list(FlowOutputType), default=FlowOutputType.SHELL_PROC, help='type of the output') gp.add_argument('--output-path', type=argparse.FileType('w', encoding='utf8'), help='output path of the flow') return parser def set_pea_parser(parser=None): from .enums import SocketType, PeaRoleType, OnErrorSkip from .helper import random_port, get_random_identity from . import __default_host__ import os show_all = 'JINA_FULL_CLI' in os.environ if not parser: parser = set_base_parser() gp0 = add_arg_group(parser, 'pea basic arguments') gp0.add_argument('--name', type=str, help='the name of this pea, used to identify the pod and its logs.') gp0.add_argument('--identity', type=str, default=get_random_identity(), help='the identity of the sockets, default a random string') gp0.add_argument('--uses', type=str, default='_pass', help='the config of the executor, it could be ' '> a YAML file path, ' '> a supported executor\'s class name, ' '> one of "_clear", "_route", "_pass", "_logforward", "_merge" ' '> the content of YAML config (must starts with "!")' '> a docker image') # pod(no use) -> pea gp0.add_argument('--py-modules', type=str, nargs='*', help='the customized python modules need to be imported before loading the' ' executor') gp1 = add_arg_group(parser, 'pea container arguments') gp1.add_argument('--uses-internal', type=str, default='BaseExecutor', help='The executor config that is passed to the docker image if a docker image is used in uses. ' 'It cannot be another docker image ') gp1.add_argument('--entrypoint', type=str, help='the entrypoint command overrides the ENTRYPOINT in docker image. ' 'when not set then the docker image ENTRYPOINT takes effective.') gp1.add_argument('--pull-latest', action='store_true', default=False, help='pull the latest image before running') gp1.add_argument('--volumes', type=str, nargs='*', help='the path on the host to be mounted inside the container. ' 'they will be mounted to the root path, i.e. /user/test/my-workspace will be mounted to ' '/my-workspace inside the container. all volumes are mounted with read-write mode.') gp2 = add_arg_group(parser, 'pea network arguments') gp2.add_argument('--port-in', type=int, default=random_port(), help='port for input data, default a random port between [49152, 65535]') gp2.add_argument('--port-out', type=int, default=random_port(), help='port for output data, default a random port between [49152, 65535]') gp2.add_argument('--host-in', type=str, default=__default_host__, help=f'host address for input, by default it is {__default_host__}') gp2.add_argument('--host-out', type=str, default=__default_host__, help=f'host address for output, by default it is {__default_host__}') gp2.add_argument('--socket-in', type=SocketType.from_string, choices=list(SocketType), default=SocketType.PULL_BIND, help='socket type for input port') gp2.add_argument('--socket-out', type=SocketType.from_string, choices=list(SocketType), default=SocketType.PUSH_BIND, help='socket type for output port') gp2.add_argument('--port-ctrl', type=int, default=os.environ.get('JINA_CONTROL_PORT', random_port()), help='port for controlling the pod, default a random port between [49152, 65535]') gp2.add_argument('--ctrl-with-ipc', action='store_true', default=False, help='use ipc protocol for control socket') gp2.add_argument('--timeout', type=int, default=-1, help='timeout (ms) of all requests, -1 for waiting forever') gp2.add_argument('--timeout-ctrl', type=int, default=5000, help='timeout (ms) of the control request, -1 for waiting forever') gp2.add_argument('--timeout-ready', type=int, default=10000, help='timeout (ms) of a pea is ready for request, -1 for waiting forever') gp3 = add_arg_group(parser, 'pea IO arguments') gp3.add_argument('--dump-interval', type=int, default=240, help='serialize the model in the pod every n seconds if model changes. ' '-1 means --read-only. ') gp3.add_argument('--exit-no-dump', action='store_true', default=False, help='do not serialize the model when the pod exits') gp3.add_argument('--read-only', action='store_true', default=False, help='do not allow the pod to modify the model, ' 'dump_interval will be ignored') gp3.add_argument('--separated-workspace', action='store_true', default=False, help='the data and config files are separated for each pea in this pod, ' 'only effective when BasePod\'s `parallel` > 1') gp3.add_argument('--replica-id', type=int, default=-1, help='the id of the storage of this replica, only effective when `separated_workspace=True`') gp5 = add_arg_group(parser, 'pea messaging arguments') gp5.add_argument('--check-version', action='store_true', default=False, help='comparing the jina and proto version of incoming message with local setup, ' 'mismatch raise an exception') gp5.add_argument('--compress-hwm', type=int, default=-1, help='the high watermark that triggers the message compression. ' 'message bigger than this HWM (in bytes) will be compressed by lz4 algorithm.' 'set this to -1 to disable this feature.') gp5.add_argument('--compress-lwm', type=float, default=1., help='the low watermark that enables the sending of a compressed message. ' 'compression rate (after_size/before_size) lower than this LWM will be considered as successeful ' 'compression, and will be sent. Otherwise, it will send the original message without compression') gp5.add_argument('--num-part', type=int, default=0, **(dict( help='the number of replicated message sent to the next Pod, 0 and 1 means single part' if show_all else argparse.SUPPRESS))) gp5.add_argument('--role', type=PeaRoleType.from_string, choices=list(PeaRoleType), help='the role of this pea in a pod') gp5.add_argument('--skip-on-error', type=OnErrorSkip.from_string, choices=list(OnErrorSkip), default=OnErrorSkip.NONE, help='skip strategy on error message. ') gp6 = add_arg_group(parser, 'pea EXPERIMENTAL arguments') gp6.add_argument('--memory-hwm', type=int, default=-1, help='memory high watermark of this pod in Gigabytes, pod will restart when this is reached. ' '-1 means no restriction') gp6.add_argument('--runtime', type=str, choices=['thread', 'process'], default='process', help='the parallel runtime of the pod') gp6.add_argument('--max-idle-time', type=int, default=60, help='label this pea as inactive when it does not ' 'process any request after certain time (in second)') gp6.add_argument('--daemon', action='store_true', default=False, help='when a process exits, it attempts to terminate all of its daemonic child processes. ' 'setting it to true basically tell the context manager do not wait on this Pea') gp7 = add_arg_group(parser, 'logging arguments') gp7.add_argument('--log-sse', action='store_true', default=False, help='turn on server-side event logging') gp7.add_argument('--log-remote', action='store_true', default=False, help='turn on remote logging') gp7.add_argument('--log-profile', action='store_true', default=False, help='turn on the profiling logger') gp7.add_argument('--log-with-own-name', action='store_true', default=False, help='turn on to let each logger outputs in its own name (i.e. parent class name as the context), ' 'by default it is off so all logs from the same pod will have the same prefix. ' 'turn on to help debugging, turn off to have more clear logs and better grouping in dashboard') _set_grpc_parser(parser) return parser def set_pod_parser(parser=None): from .enums import PollingType, SchedulerType if not parser: parser = set_base_parser() set_pea_parser(parser) gp4 = add_arg_group(parser, 'pod replica arguments') gp4.add_argument('--parallel', '--shards', type=int, default=1, help='number of parallel peas in the pod running at the same time, ' '`port_in` and `port_out` will be set to random, ' 'and routers will be added automatically when necessary') gp4.add_argument('--polling', type=PollingType.from_string, choices=list(PollingType), default=PollingType.ANY, help='ANY: only one (whoever is idle) replica polls the message; ' 'ALL: all workers poll the message (like a broadcast)') gp4.add_argument('--scheduling', type=SchedulerType.from_string, choices=list(SchedulerType), default=SchedulerType.LOAD_BALANCE, help='the strategy of scheduling workload among peas') gp4.add_argument('--uses-before', type=str, help='the executor used before sending to all parallels, ' 'accepted type follows "--uses"') gp4.add_argument('--uses-after', type=str, help='the executor used after receiving from all parallels, ' 'accepted type follows "--uses"') gp4.add_argument('--shutdown-idle', action='store_true', default=False, help='shutdown this pod when all peas are idle') # disable the pod level logserver for now # gp5 = add_arg_group(parser, 'pod log-server arguments') # # from pkg_resources import resource_filename # gp5.add_argument('--logserver', action='store_true', default=False, # help='start a log server for the dashboard') # gp5.add_argument('--logserver-config', type=str, # default=resource_filename('jina', # '/'.join(('resources', 'logserver.default.yml'))), # help='the yaml config of the log server') return parser def set_ping_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument('host', type=str, help='host address of the target pod/pea, e.g. 0.0.0.0') parser.add_argument('port', type=int, help='the control port of the target pod/pea') parser.add_argument('--timeout', type=int, default=3000, help='timeout (ms) of one check, -1 for waiting forever') parser.add_argument('--retries', type=int, default=3, help='max number of tried health checks before exit 1') parser.add_argument('--print-response', action='store_true', default=False, help='print the response when received') return parser def set_check_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument('--summary-exec', type=str, help='the markdown file path for all executors summary') parser.add_argument('--summary-driver', type=str, help='the markdown file path for all drivers summary') return parser def set_export_api_parser(parser=None): if not parser: parser = set_base_parser() parser.add_argument('--yaml-path', type=str, nargs='*', help='the YAML file path for storing the exported API') parser.add_argument('--json-path', type=str, nargs='*', help='the JSON file path for storing the exported API') return parser def _set_grpc_parser(parser=None): if not parser: parser = set_base_parser() from .helper import random_port from . import __default_host__ gp1 = add_arg_group(parser, 'grpc and remote arguments') gp1.add_argument('--host', type=str, default=__default_host__, help=f'host address of the pea/gateway, by default it is {__default_host__}.') gp1.add_argument('--port-expose', '--port-grpc', type=int, default=random_port(), help='host port of the gateway, "port-grpc" alias will be removed in future versions') gp1.add_argument('--max-message-size', type=int, default=-1, help='maximum send and receive size for grpc server in bytes, -1 means unlimited') gp1.add_argument('--proxy', action='store_true', default=False, help='respect the http_proxy and https_proxy environment variables. ' 'otherwise, it will unset these proxy variables before start. ' 'gRPC seems to prefer --no-proxy') return parser # def set_grpc_service_parser(parser=None): # if not parser: # parser = set_base_parser() # set_pod_parser(parser) # _set_grpc_parser(parser) # # parser.add_argument('--pb2-path', # type=str, # required=True, # help='the path of the python file protocol buffer compiler') # parser.add_argument('--pb2-grpc-path', # type=str, # required=True, # help='the path of the python file generated by the gRPC Python protocol compiler plugin') # parser.add_argument('--stub-name', # type=str, # required=True, # help='the name of the gRPC Stub') # parser.add_argument('--api-name', # type=str, # required=True, # help='the api name for calling the stub') # return parser def set_gateway_parser(parser=None): from .enums import SocketType if not parser: parser = set_base_parser() set_pea_parser(parser) gp1 = add_arg_group(parser, 'gateway arguments') gp1.set_defaults(name='gateway', socket_in=SocketType.PULL_CONNECT, # otherwise there can be only one client at a time socket_out=SocketType.PUSH_CONNECT, ctrl_with_ipc=True, # otherwise ctrl port would be conflicted read_only=True) gp1.add_argument('--prefetch', type=int, default=50, help='the number of pre-fetched requests from the client') gp1.add_argument('--prefetch-on-recv', type=int, default=1, help='the number of additional requests to fetch on every receive') gp1.add_argument('--allow-spawn', action='store_true', default=False, help='accept the spawn requests sent from other remote Jina') gp1.add_argument('--rest-api', action='store_true', default=False, help='use REST-API as the interface instead of gRPC with port number ' 'set to the value of "port-expose"') # gp1.add_argument('--to-datauri', action='store_true', default=False, # help='always represent the result document with data URI, instead of using buffer/blob/text') return parser def set_client_cli_parser(parser=None): if not parser: parser = set_base_parser() from .enums import ClientMode _set_grpc_parser(parser) gp1 = add_arg_group(parser, 'client-specific arguments') gp1.add_argument('--batch-size', type=int, default=100, help='the number of documents in each request') gp1.add_argument('--mode', choices=list(ClientMode), type=ClientMode.from_string, # required=True, help='the mode of the client and the server') gp1.add_argument('--top-k', type=int, default=10, help='top_k results returned in the search mode') gp1.add_argument('--mime-type', type=str, help='MIME type of the input, useful when input-type is set to BUFFER') gp1.add_argument('--callback-on-body', action='store_true', default=False, help='callback function works directly on the request body') gp1.add_argument('--first-request-id', type=int, default=0, help='the starting number of request id, the consequent request_id will increment by one') gp1.add_argument('--timeout-ready', type=int, default=10000, help='timeout (ms) of a pea is ready for request, -1 for waiting forever') gp1.add_argument('--filter-by', type=str, nargs='*', help='field names to search on') gp1.add_argument('--skip-dry-run', action='store_true', default=False, help='skip dry run (connectivity test) before sending every request') return parser def get_main_parser(): # create the top-level parser parser = set_base_parser() import os show_all = 'JINA_FULL_CLI' in os.environ sp = parser.add_subparsers(dest='cli', description='use "%(prog)-8s [sub-command] --help" ' 'to get detailed information about each sub-command', required=True) set_hw_parser(sp.add_parser('hello-world', help='👋 Hello World! Hello Jina!', description='Start the hello-world demo, a simple end2end image index and search demo ' 'without any extra dependencies.', formatter_class=_chf)) # cli set_pod_parser(sp.add_parser('pod', help='start a pod', description='Start a Jina pod', formatter_class=_chf)) set_flow_parser(sp.add_parser('flow', description='Start a Jina flow that consists of multiple pods', help='start a flow from a YAML file', formatter_class=_chf)) set_gateway_parser(sp.add_parser('gateway', description='Start a Jina gateway that receives client remote requests via gRPC', help='start a gateway', formatter_class=_chf)) set_ping_parser( sp.add_parser('ping', help='ping a pod and check the network connectivity', description='Ping a remote pod and check the network connectivity', formatter_class=_chf)) set_check_parser( sp.add_parser('check', help='check the import status all executors and drivers', description='Check the import status all executors and drivers', formatter_class=_chf)) pp = sp.add_parser('hub', help='build, push, pull Jina Hub images', description='Build, push, pull Jina Hub images', formatter_class=_chf) spp = pp.add_subparsers(dest='hub', description='use "%(prog)-8s [sub-command] --help" ' 'to get detailed information about each sub-command', required=True) set_hub_new_parser( spp.add_parser('new', aliases=['init', 'create'], help='create a new Hub executor or app using cookiecutter', description='Create a new Hub executor or app using cookiecutter', formatter_class=_chf)) set_hub_build_parser( spp.add_parser('build', help='build a directory into Jina hub image', description='Build a directory into Jina hub image', formatter_class=_chf)) set_hub_pushpull_parser( spp.add_parser('push', help='push an image to the Jina hub registry', description='Push an image to the Jina hub registry', formatter_class=_chf)) set_hub_pushpull_parser( spp.add_parser('pull', help='pull an image from the Jina hub registry to local', description='Pull an image to the Jina hub registry to local', formatter_class=_chf)) set_pea_parser(sp.add_parser('pea', description='Start a Jina pea. ' 'You should rarely use this directly unless you ' 'are doing low-level orchestration', formatter_class=_chf, **(dict(help='start a pea')) if show_all else {})) set_logger_parser(sp.add_parser('log', description='Receive piped log output and beautify the log. ' 'Depreciated, use Jina Dashboard instead', formatter_class=_chf, **(dict(help='beautify the log')) if show_all else {})) set_client_cli_parser( sp.add_parser('client', description='Start a Python client that connects to a remote Jina gateway', formatter_class=_chf, **(dict(help='start a client')) if show_all else {})) set_export_api_parser(sp.add_parser('export-api', description='Export Jina API to JSON/YAML file for 3rd party applications', formatter_class=_chf, **(dict(help='export Jina API to file')) if show_all else {})) return parser class _ColoredHelpFormatter(argparse.ArgumentDefaultsHelpFormatter): class _Section(object): def __init__(self, formatter, parent, heading=None): self.formatter = formatter self.parent = parent self.heading = heading self.items = [] def format_help(self): # format the indented section if self.parent is not None: self.formatter._indent() join = self.formatter._join_parts item_help = join([func(*args) for func, args in self.items]) if self.parent is not None: self.formatter._dedent() # return nothing if the section was empty if not item_help: return '' # add the heading if the section was non-empty if self.heading is not argparse.SUPPRESS and self.heading is not None: from .helper import colored current_indent = self.formatter._current_indent captial_heading = ' '.join(v[0].upper() + v[1:] for v in self.heading.split(' ')) heading = '⚙️ %*s%s\n' % ( current_indent, '', colored(captial_heading, 'cyan', attrs=['underline', 'bold', 'reverse'])) else: heading = '' # join the section-initial newline, the heading and the help return join(['\n', heading, item_help, '\n']) def start_section(self, heading): self._indent() section = self._Section(self, self._current_section, heading) self._add_item(section.format_help, []) self._current_section = section def _get_help_string(self, action): help = action.help if '%(default)' not in action.help: if action.default is not argparse.SUPPRESS: from .helper import colored defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] if isinstance(action, argparse._StoreTrueAction): help += colored(' (default: %s)' % ( 'enabled' if action.default else 'disabled, use "--%s" to enable it' % action.dest), attrs=['dark']) elif action.choices: choices_str = '{%s}' % ', '.join([str(c) for c in action.choices]) help += colored(' (choose from: ' + choices_str + '; default: %(default)s)', attrs=['dark']) elif action.option_strings or action.nargs in defaulting_nargs: help += colored(' (type: %(type)s; default: %(default)s)', attrs=['dark']) return help def _get_default_metavar_for_optional(self, action): return '' # def _get_default_metavar_for_positional(self, action): # return '' def _expand_help(self, action): params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is argparse.SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ return self._get_help_string(action) % params def _metavar_formatter(self, action, default_metavar): if action.metavar is not None: result = action.metavar elif action.choices is not None: if len(action.choices) > 4: choice_strs = ', '.join([str(c) for c in action.choices][:4]) result = '{%s ... %d more choices}' % (choice_strs, len(action.choices) - 4) else: choice_strs = ', '.join([str(c) for c in action.choices]) result = '{%s}' % choice_strs else: result = default_metavar def format(tuple_size): if isinstance(result, tuple): return result else: return (result,) * tuple_size return format def _fill_text(self, text, width, indent): return ''.join(indent + line for line in text.splitlines(keepends=True)) _chf = _ColoredHelpFormatter
PypiClean
/yuan-tool-2.57.tar.gz/yuan-tool-2.57/yuantool/database/mysql_obj.py
import pymysql from dbutils.pooled_db import PooledDB, SharedDBConnection import logging logger = logging.getLogger(__name__) class Mysql(object): """ 注意:insert,update,delete操作时需要commit """ def __init__(self, db, host, port, user, pwd): self.db = pymysql.connect(host=host, port=port, user=str(user), password=str(pwd)) self.cursor = self.db.cursor() self.current_db = db def __del__(self): self.cursor.close() self.db.close() def execute_sql(self, sql): try: self.db.ping(reconnect=True) self.db.select_db(self.current_db) # sql = sql.lower() self.cursor.execute(sql) method = sql.split()[0].lower() if method.startswith(('insert', 'update', 'delete')): self.db.commit() return True elif method.startswith('select'): return list(self.cursor.fetchall()) else: self.db.commit() logger.error('未知sql类型'.format(sql)) return False except Exception as e: logger.error('sql执行出错\nSQL: {} ,错误原因 {}'.format(sql, e), exc_info=True) return False def insert(self, tb, dt): """ :param tb: 目标插入表 :param dt: 字典型 :return: """ ls = [(k, dt[k]) for k in dt if dt[k] is not None] sql = 'insert %s (' % tb + ','.join(i[0] for i in ls) + \ ') values (' + ','.join('%r' % i[1] for i in ls) + ');' self.execute_sql(sql) def select(self, tb, *columns, **factor): """ :param tb: 目标插入表 :param columns:select内容,空为全部 :param factor: where内容 example: self.select('arm_entry_services', asset_ip='192.168.90.26') """ where = '' columns = '*' if columns == () or '' else ','.join(columns) if len(factor) > 0: where = 'where 1=1 ' for column in factor: if factor[column] == '': continue elif factor[column].startswith('like'): conditional = 'and {} {}'.format(column, factor[column]) else: conditional = 'and {}={}'.format(column, "%r" % factor[column]) if column == 'limit': where += f'{column} {factor[column]}' else: where += conditional + ' ' sql = f'select {columns} from {tb} {where}' return self.execute_sql(sql) def update(self, tb, target_dic, set_dic): ts = [(k, target_dic[k]) for k in target_dic if target_dic[k] is not None] ss = [(k, set_dic[k]) for k in set_dic if set_dic[k] is not None] sql = 'UPDATE {} SET {} WHERE {}'.format(tb, ','.join([i[0] + '=' + '%r' % i[1] for i in ss]), ' AND '.join([i[0] + '=' + '%r' % i[1] for i in ts])) self.execute_sql(sql) class MysqlPool(object): """ mysql连接池,用于处理多线程 """ def __init__(self, db, host, port, user, pwd, **kwargs): pool_config = { 'creator': pymysql, # 使用链接数据库的模块 'maxconnections': 6, # 连接池允许的最大连接数,0和None表示不限制连接数 'mincached': 2, # 初始化时,链接池中至少创建的空闲的链接,0表示不创建 'maxcached': 5, # 链接池中最多闲置的链接,0和None不限制 'maxshared': 3, # 链接池中最多共享的链接数量,0和None表示全部共享。PS: 无用,因为pymysql和MySQLdb等模块的 threadsafety都为1,所有值无论设置为多少,_maxcached永远为0,所以永远是所有链接都共享。 'blocking': True, # 连接池中如果没有可用连接后,是否阻塞等待。True,等待;False,不等待然后报错 'maxusage': None, # 一个链接最多被重复使用的次数,None表示无限制 'setsession': [], # 开始会话前执行的命令列表。如:["set datestyle to ...", "set time zone ..."] 'ping': 1, # ping MySQL服务端,检查是否服务可用。# 如:0 = None = never, 1 = default = whenever it is requested, 2 = when a cursor is created, 4 = when a query is executed, 7 = always 'host': host, 'port': port, 'user': user, 'password': pwd, 'database': db, 'charset': 'utf8' } pool_config.update(**kwargs) self.pool = PooledDB(**pool_config) def __new__(cls, *args, **kw): ''' 启用单例模式 :param args: :param kw: :return: ''' if not hasattr(cls, '_instance'): cls._instance = object.__new__(cls) return cls._instance def __del__(self): try: self.pool.close() except Exception: pass def execute_sql(self, sql, **args): try: sql = sql.lower() conn, cursor = self.connect() cursor.execute(sql, args) method = sql.split()[0].lower() if method.startswith(('insert', 'update', 'delete')): conn.commit() self.connect_close(conn, cursor) return True elif method.startswith('select'): record_list = cursor.fetchall() self.connect_close(conn, cursor) return record_list else: conn.commit() # logger.error('未知sql类型'.format(sql)) return True except Exception as e: logger.error('sql执行出错\nSQL: {} ,错误原因 {}'.format(sql, e), exc_info=True) return False def connect(self): ''' 启动连接 :return: ''' conn = self.pool.connection() cursor = conn.cursor(cursor=pymysql.cursors.DictCursor) return conn, cursor def connect_close(self, conn, cursor): ''' 关闭连接 :param conn: :param cursor: :return: ''' cursor.close() conn.close() def select_all(self, sql, args): ''' 批量查询 :param sql: :param args: :return: ''' conn, cursor = self.connect() cursor.execute(sql, args) record_list = cursor.fetchall() self.connect_close(conn, cursor) return record_list def select_one(self, sql, args): ''' 查询单条数据 :param sql: :param args: :return: ''' conn, cursor = self.connect() cursor.execute(sql, args) result = cursor.fetchone() self.connect_close(conn, cursor) return result def execute(self, sql, args): """ 执行insert/delete/update操作 :param sql: :param args: :return: """ conn, cursor = self.connect() row = cursor.execute(sql, args) conn.commit() self.connect_close(conn, cursor) return row class TaskMysql(MysqlPool, Mysql): def __init__(self, db, host, port, user, pwd, **kwargs): super().__init__(db=str(db), host=host, port=port, user=str(user), pwd=str(pwd), **kwargs)
PypiClean
/slixmppfix-1.4.3.tar.gz/slixmppfix-1.4.3/slixmpp/util/misc_ops.py
import builtins import sys import hashlib def unicode(text): if not isinstance(text, str): return text.decode('utf-8') else: return text def bytes(text): """ Convert Unicode text to UTF-8 encoded bytes. Since Python 2.6+ and Python 3+ have similar but incompatible signatures, this function unifies the two to keep code sane. :param text: Unicode text to convert to bytes :rtype: bytes (Python3), str (Python2.6+) """ if text is None: return b'' if isinstance(text, builtins.bytes): # We already have bytes, so do nothing return text if isinstance(text, list): # Convert a list of integers to bytes return builtins.bytes(text) else: # Convert UTF-8 text to bytes return builtins.bytes(text, encoding='utf-8') def quote(text): """ Enclose in quotes and escape internal slashes and double quotes. :param text: A Unicode or byte string. """ text = bytes(text) return b'"' + text.replace(b'\\', b'\\\\').replace(b'"', b'\\"') + b'"' def num_to_bytes(num): """ Convert an integer into a four byte sequence. :param integer num: An integer to convert to its byte representation. """ bval = b'' bval += bytes(chr(0xFF & (num >> 24))) bval += bytes(chr(0xFF & (num >> 16))) bval += bytes(chr(0xFF & (num >> 8))) bval += bytes(chr(0xFF & (num >> 0))) return bval def bytes_to_num(bval): """ Convert a four byte sequence to an integer. :param bytes bval: A four byte sequence to turn into an integer. """ num = 0 num += ord(bval[0] << 24) num += ord(bval[1] << 16) num += ord(bval[2] << 8) num += ord(bval[3]) return num def XOR(x, y): """ Return the results of an XOR operation on two equal length byte strings. :param bytes x: A byte string :param bytes y: A byte string :rtype: bytes """ # This operation is faster with a list comprehension than with a # generator, as of 2016 on python 3.5. return builtins.bytes([a ^ b for a, b in zip(x, y)]) def hash(name): """ Return a hash function implementing the given algorithm. :param name: The name of the hashing algorithm to use. :type name: string :rtype: function """ name = name.lower() if name.startswith('sha-'): name = 'sha' + name[4:] if name in dir(hashlib): return getattr(hashlib, name) return None def hashes(): """ Return a list of available hashing algorithms. :rtype: list of strings """ t = [] if 'md5' in dir(hashlib): t = ['MD5'] if 'md2' in dir(hashlib): t += ['MD2'] hashes = ['SHA-' + h[3:] for h in dir(hashlib) if h.startswith('sha')] return t + hashes def setdefaultencoding(encoding): """ Set the current default string encoding used by the Unicode implementation. Actually calls sys.setdefaultencoding under the hood - see the docs for that for more details. This method exists only as a way to call find/call it even after it has been 'deleted' when the site module is executed. :param string encoding: An encoding name, compatible with sys.setdefaultencoding """ func = getattr(sys, 'setdefaultencoding', None) if func is None: import gc import types for obj in gc.get_objects(): if (isinstance(obj, types.BuiltinFunctionType) and obj.__name__ == 'setdefaultencoding'): func = obj break if func is None: raise RuntimeError("Could not find setdefaultencoding") sys.setdefaultencoding = func return func(encoding)
PypiClean
/ConceptNet-5.7.0.tar.gz/ConceptNet-5.7.0/conceptnet5/readers/wiktionary.py
import os import pathlib import sqlite3 from collections import Counter from conceptnet5.edges import make_edge from conceptnet5.formats.json_stream import read_json_stream from conceptnet5.formats.msgpack_stream import MsgpackStreamWriter from conceptnet5.languages import ALL_LANGUAGES, valid_language from conceptnet5.nodes import standardized_concept_uri from conceptnet5.uri import Licenses, uri_prefix PARSER_RULE = '/s/process/wikiparsec/2' def prepare_db(inputs, dbfile): """ Build a SQLite database that extracts some information from our parsed versions of Wiktionary. This is information that is needed by later reader steps, such as which words are known in which languages, and which words are forms of other words. """ # If the database already exists, delete it first try: os.unlink(dbfile) except FileNotFoundError: pass db = sqlite3.connect(dbfile) make_tables(db) try: for filename in inputs: filepath = pathlib.Path(filename) file_language = filepath.name.split('.')[0] for item in read_json_stream(filename): if 'rel' in item: tfrom = item['from'] tto = item['to'] # For all non-definition relations, record the fact that # the given entry name exists in the given language. We'll # use these to disambiguate definitions later. if item['rel'] != 'definition': if 'language' in tfrom and valid_language(tfrom['language']): add_title( db, file_language, tfrom['language'], tfrom['text'] ) if 'language' in tto and valid_language(tto['language']): add_title(db, file_language, tto['language'], tto['text']) # Record word forms so we can build a lemmatizer from them. if item['rel'].startswith('form/'): form_name = item['rel'][5:] # Look for the part of speech, first in the 'from' term, # then in the 'to' term. pos = tfrom.get('pos', tto.get('pos', '?')) # Use only Etymology 1 entries for learning word forms. if (tfrom.get('etym') or '1') == '1': language = tfrom.get('language', tto.get('language')) if ( valid_language(language) and tfrom['text'] != tto['text'] ): add_form( db, file_language, language, tfrom['text'], pos, tto['text'], form_name, ) db.commit() finally: db.close() def make_tables(db): db.execute( "CREATE TABLE titles " "(id integer primary key, site_language text, language text, " "title text)" ) db.execute( "CREATE UNIQUE INDEX titles_uniq ON titles " "(site_language, language, title)" ) db.execute("CREATE INDEX titles_search ON titles (language, title)") db.execute( "CREATE TABLE forms " "(id integer primary key, site_language text, language text, " "word text, pos text, root text, form text)" ) db.execute("CREATE INDEX forms_search ON forms (language, word)") def add_title(db, file_language, language, title): db.execute( "INSERT OR IGNORE INTO titles (site_language, language, title) " "VALUES (?, ?, ?)", (file_language, language, title.lower()), ) def add_form(db, file_language, language, word, pos, root, form): db.execute( "INSERT INTO forms (site_language, language, word, pos, root, form) " "VALUES (?, ?, ?, ?, ?, ?)", ( file_language, language, word.lower(), pos.lower(), root.lower(), form.lower(), ), ) WIKT_RELATIONS = { "link": ("/r/RelatedTo", False), "related": ("/r/RelatedTo", False), "synonym": ("/r/Synonym", False), "antonym": ("/r/Antonym", False), "distinct": ("/r/DistinctFrom", False), "hypernym": ("/r/IsA", False), "holonym": ("/r/PartOf", False), "troponym": ("/r/MannerOf", True), "context": ("/r/HasContext", False), "derived": ("/r/DerivedFrom", True), "derived/etym": ("/r/EtymologicallyDerivedFrom", True), "related/etym": ("/r/EtymologicallyRelatedTo", False), "form": ("/r/FormOf", False), "variant": ("/r/FormOf", True), "diminutive": ("/r/FormOf", True), "augmentative": ("/r/FormOf", True), "coordinate": ("/r/SimilarTo", False), "quasi-synonym": ("/r/SimilarTo", False), "translation": ("/r/Synonym", False), "definition": (None, False), } def transform_relation(rel): if rel.startswith('form/'): return "/r/FormOf", False else: return WIKT_RELATIONS[rel] def transform_term(data_language, termdata, assumed_languages, db, use_etyms=True): text = termdata['text'] # Sometimes - is used to fill a slot in a Wiktionary template where the # term would usually be. It typically means "don't show this part", with # the implication "the term in question is obvious from context". # # Context is hard, so let's just cope with a hyphen as the term by # discarding it. if text == '-': return None language = termdata.get('language') if language is None: language = disambiguate_language(text, assumed_languages, db) if not valid_language(language): return None # Remove unnecessary subtags from the Wiktionary language if '-' in language and language not in ALL_LANGUAGES: language = language.split('-')[0] if 'pos' not in termdata: return standardized_concept_uri(language, text) else: pos = termdata['pos'] etym_sense = None if use_etyms: etym_sense = etym_label(data_language, termdata) if etym_sense is not None: return standardized_concept_uri(language, text, pos, 'wikt', etym_sense) else: return standardized_concept_uri(language, text, pos) def etym_label(language, term): if 'etym' not in term or not term['etym']: return None return "{}_{}".format(language, term['etym']) def disambiguate_language(text, assumed_languages, db): """ Some Wiktionary links simply point to a term without specifying what language it's in. In that case, we have to guess. The possible languages are: - The language of the Wiktionary it's in - The language of the other term in the assertion We accept one of the possible languages if we have seen the term defined in that language in Wiktionary. Ideally, this leaves us with one possibility. But if we're left with 2 or 0, we default to the language of the other term. """ if len(assumed_languages) == 1: return assumed_languages[0] ok_languages = [] for language in assumed_languages: c = db.cursor() c.execute( 'SELECT * from titles where language=? and title=? limit 1', (language, text), ) if c.fetchone(): ok_languages.append(language) if len(ok_languages) == 0: return None else: return ok_languages[0] def segmented_stream(input_file): """ Read a JSON stream delimited by 'heading' entries, marking where the parser started parsing a new page. We distinguish these entries by the fact that they contain a 'title' key. Yield tuples of (heading, [items]), where [items] are the stream items that appear under the given heading. """ heading = None items = [] for item in read_json_stream(input_file): if 'title' in item: if heading is not None: yield heading, items heading = item items.clear() else: items.append(item) if heading is not None: yield heading, items def read_wiktionary(input_file, db_file, output_file): """ Convert a stream of parsed Wiktionary data into ConceptNet edges. A `db_file` containing all known words in all languages must have already been prepared from the same data. """ db = sqlite3.connect(db_file) out = MsgpackStreamWriter(output_file) for heading, items in segmented_stream(input_file): language = heading['language'] title = heading['title'] dataset = '/d/wiktionary/{}'.format(language) url_title = heading['title'].replace(' ', '_') web_url = 'http://{}.wiktionary.org/wiki/{}'.format(language, url_title) web_source = '/s/resource/wiktionary/{}'.format(language) source = {'contributor': web_source, 'process': PARSER_RULE} # Scan through the 'from' items, such as the start nodes of # translations, looking for distinct etymologies. If we get more than # one etymology for a language, we need to distinguish them as # different senses in that language. all_etyms = { (item['from']['language'], etym_label(language, item['from'])) for item in items if 'language' in item['from'] and item['from']['text'] == title and etym_label(language, item['from']) is not None } word_languages = {wlang for (wlang, _) in all_etyms} for wlang in sorted(word_languages): if valid_language(wlang): cpage = standardized_concept_uri(wlang, title) ld_edge = make_edge( '/r/ExternalURL', cpage, web_url, dataset=dataset, weight=0.25, sources=[source], license=Licenses.cc_sharealike, ) out.write(ld_edge) etym_to_translation_sense = {} language_etym_counts = Counter(lang for (lang, etym) in all_etyms) polysemous_languages = { lang for lang in language_etym_counts if language_etym_counts[lang] > 1 } for item in items: tfrom = item['from'] tto = item['to'] assumed_languages = [language] lang1 = tfrom.get('language') lang2 = tto.get('language') if lang1 and (lang1 not in assumed_languages) and valid_language(lang1): assumed_languages.append(lang1) if lang2 and (lang2 not in assumed_languages) and valid_language(lang2): assumed_languages.append(lang2) cfrom = transform_term( language, tfrom, assumed_languages, db, use_etyms=(lang1 in polysemous_languages), ) cpage = cfrom cto = transform_term( language, tto, assumed_languages, db, use_etyms=(lang2 in polysemous_languages), ) if cfrom is None or cto is None: continue if uri_prefix(cfrom, 3) == uri_prefix(cto, 3): continue rel, switch = transform_relation(item['rel']) if rel is None: continue if switch: cfrom, cto = cto, cfrom # When translations are separated by sense, use only the first # sense we see for each etymology. That will have the most # representative translations. if item['rel'] == 'translation': etym_key = (tfrom['language'], etym_label(language, tfrom)) sense = tfrom.get('sense', '') if etym_key in etym_to_translation_sense: if etym_to_translation_sense[etym_key] != sense: continue else: etym_to_translation_sense[etym_key] = sense weight = 1. if rel == '/r/EtymologicallyRelatedTo': weight = 0.25 edge = make_edge( rel, cfrom, cto, dataset=dataset, weight=weight, sources=[source], surfaceStart=tfrom['text'], surfaceEnd=tto['text'], license=Licenses.cc_sharealike, ) out.write(edge) out.close()
PypiClean
/graphql-from-struct-1.0.3.tar.gz/graphql-from-struct-1.0.3/README.rst
|License| |Release| |Docs| |Code Coverage| |Build Status Travis CI| |Blog| GraphQL-From-Struct =================== A simple one-method library makes a `GraphQL <https://graphql.org/>`__ query from Python data structures. Table of Contents ----------------- 1. `Installation`_ 2. `Usage`_ 3. `Exceptions`_ 4. `Parameters`_ 5. `Reserved keywords`_ 6. `Examples`_ - `Fields`_ - `Arguments`_ - `Default arguments`_ - `Aliases`_ - `Fragments`_ - `Using variables inside fragments`_ - `Operation name`_ - `Variables`_ - `Default variables`_ - `Directives`_ - `Mutations`_ - `Inline Fragments`_ - `Meta fields`_ Installation ------------- :: pip install graphql_from_struct Usage ------ :: # 1. Import GqlFromStruct class from graphql_from_struct import GqlFromStruct # 2. Make a query struct = {'hero':{'@fields':['name']}} # 3. Generate GraphQL gql = GqlFromStruct.from_struct(struct) # Or use OOP-style: foo = GqlFromStruct(struct) gql = foo.query() print (gql) You should see such result: :: query{ hero{ name } } Exceptions ---------- The module raises ``GqlFromStructException`` in case of empty or wrong data structure input. Parameters ---------- ``GqlFromStruct()`` constructor and ``.from_struct()`` method take 3 arguments: a **struct** (default None), a **minimize** (optional, default False) flag and a **force_quotes** (optional, default 0) setting. Code: :: foo = GqlFromStruct({'hero':{'@fields':['name']}}, True) # or foo = GqlFromStruct(struct = {'hero':{'@fields':['name']}}, minimize = True) gql = foo.query() # or gql = GqlFromStruct.from_struct({'hero':{'@fields':['name']}}, True) print (gql) gives you: :: query{hero{name}} By default the GraphQL-From-Struct sets quotes for any string with spaces. You can change it with the **force_quotes** flag. It enforces quoting parameters and arguments with 1 value, disables any quotes with -1 or enables only arguments quoting with 2: :: gql = GqlFromStruct.from_struct({'hero':{'@fields':['name']}}, True, 1) print (gql) gives you: :: "query"{"hero"{"name"}} Or :: gql = GqlFromStruct.from_struct({'he ro':{'@fields':['name']}}, True, -1) print (gql) gives you: :: query{he ro{name}} Or :: gql = GqlFromStruct.from_struct('human':{'@fields':['name', 'height'], '@args':{'id':['foo', 'bar']}}, True, 2) print (gql) gives you: :: query{human(id:["foo", "bar"]){name height}} Reserved keywords ------------------ Words ``@alias, @args, @fields, @fragments, @fragment_name, @directives, @include, @mutations, @operation_name, @queries, @query, @skip, @variables`` are reserved and used for query constructing. Examples --------- Examples are shown in the same order as in the `GraphQL <https://graphql.org/learn/queries/>`__ documentation. Fields ~~~~~~ Use ``@fields`` keyword: :: struct = {'hero':{'@fields':['name']}} print (GqlFromStruct.from_struct(struct)) Output: :: query{ hero{ name } } You can use arbitrary field nesting: :: struct = {'hero':{'@fields':['name', {'friends':{'@fields':['name']}}]}} print (GqlFromStruct.from_struct(struct)) Output: :: query{ hero{ name friends{ name } } } Arguments ~~~~~~~~~~ Use ``@args`` keyword: :: struct = {'human':{'@fields':['name', 'height'], '@args':{'id':'"1000"'}}} print (GqlFromStruct.from_struct(struct)) Output: :: query{ human( id : "1000" ){ name height } } or: :: struct = { 'human': { '@fields': ['name', { 'height': { '@args': { 'unit': 'FOOT' } } }], '@args': { 'id': "1000" } } } print (GqlFromStruct.from_struct(struct)) Output: :: query{ human( id : 1000 ){ name height( unit : FOOT ) } } Note: GraphQL-From-Struct puts double quotes by default only for values with spaces. Like that: :: query = {'human':{'@fields':['name', 'height'], '@args':{'id':'1000 meters'}}} Output: :: query{ human( id : "1000 meters" ){ name height } } Single words or numerical values are output in the form in which you passed them. :: query = {'human':{'@fields':['name', 'height'], '@args':{'id':1000}}} query{ human( id : 1000 ){ name height } } Default arguments ^^^^^^^^^^^^^^^^^^ You can set default values of arguments: :: struct = {'human':{'@fields':['name', 'height'], '@args':{'$first': {'Int':'3'}}} print (GqlFromStruct.from_struct(struct)) Output: :: query{ human( $first : Int = 3 ){ name height } } Aliases ~~~~~~~~ Use ``@alias`` keyword: :: struct = [{ 'hero': { '@alias': 'empireHero', '@args': { 'episode': "EMPIRE" }, '@fields': ['name'] } }, { 'hero': { '@alias': 'jediHero', '@args': { 'episode': "JEDI" }, '@fields': ['name'] } }] print (GqlFromStruct.from_struct(struct)) Output: :: query{ empireHero : hero( episode : EMPIRE ){ name } jediHero : hero( episode : JEDI ){ name } } Fragments ~~~~~~~~~~ Use ``@fragments`` and ``@fragment_name`` keywords for fragments setting up. Use ``@query`` and ``@queries`` for join some queries into one. :: struct = { "@queries": [{ '@query': [{ 'hero': { '@alias': 'leftComparison', '@args': { 'episode': "EMPIRE" }, '@fields': ['...comparisonFields'] } }, { 'hero': { '@alias': 'rightComparison', '@args': { 'episode': "JEDI" }, '@fields': ['...comparisonFields'] } } ] }], "@fragments": [{ 'Character': { '@fragment_name': 'comparisonFields', '@fields': ['name', 'appearsIn', { 'friends': { '@fields': ['name'] } }] } }] } print (GqlFromStruct.from_struct(struct)) Output: :: query{ leftComparison : hero( episode : EMPIRE ){ ...comparisonFields } rightComparison : hero( episode : JEDI ){ ...comparisonFields } } fragment comparisonFields on Character{ name appearsIn friends{ name } } Using variables inside fragments ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ :: struct = { "@queries": [{ '@args': { '$first': { 'Int': '3' } }, '@operation_name': 'HeroComparison', '@query': [{ 'hero': { '@alias': 'leftComparison', '@args': { 'episode': "EMPIRE" }, '@fields': ['...comparisonFields'] } }, { 'hero': { '@alias': 'rightComparison', '@args': { 'episode': "JEDI" }, '@fields': ['...comparisonFields'] } } ] }], "@fragments": [{ 'Character': { '@fragment_name': 'comparisonFields', '@fields': ['name', { 'friendsConnection': { '@args': { 'first': '$first' }, '@fields': ['totalCount', { 'edges': { '@fields': [{ 'node': { '@fields': ['name'] } }] } }] } }] } }] } print (GqlFromStruct.from_struct(struct)) Output: :: query HeroComparison ( $first : Int = 3 ){ leftComparison : hero( episode : EMPIRE ){ ...comparisonFields } rightComparison : hero( episode : JEDI ){ ...comparisonFields } } fragment comparisonFields on Character{ name friendsConnection( first : $first ){ totalCount edges{ node{ name } } } } Operation name ~~~~~~~~~~~~~~~ Use ``@operation_name`` keyword: :: struct = { '@queries': [{ '@operation_name': 'HeroNameAndFriends', '@query': { 'hero': { '@fields': ['name', { 'friends': { '@fields': ['name'] } }] } } }] } print (GqlFromStruct.from_struct(struct)) Output: :: query HeroNameAndFriends{ hero{ name friends{ name } } } Variables ~~~~~~~~~~ Use ``@variables`` block at the same high level nesting as ``@queries``: :: struct = { '@queries': [{ '@operation_name': 'HeroNameAndFriends', '@query': { 'hero': { '@fields': ['name', { 'friends': { '@fields': ['name'] } }] } } }], '@variables': { "episode": "JEDI" } } print (GqlFromStruct.from_struct(struct)) Output: :: query HeroNameAndFriends{ hero{ name friends{ name } } } { "episode": "JEDI" } Default variables ^^^^^^^^^^^^^^^^^^ Use ``@fields`` keyword: :: struct = { '@queries': [{ '@operation_name': 'HeroNameAndFriends', '@args': { '$episode': { 'Episode': 'JEDI' } }, '@query': { 'hero': { '@fields': ['name', { 'friends': { '@fields': ['name'] } }] } } }], '@variables': { "episode": "JEDI" } } print (GqlFromStruct.from_struct(struct)) Output: :: query HeroNameAndFriends ( $episode : Episode = JEDI ){ hero{ name friends{ name } } } { "episode": "JEDI" } Directives ~~~~~~~~~~~ Use ``@directives`` keyword and ``@skip`` or ``@include`` as directives: :: struct = { '@queries': [{ '@operation_name': 'Hero', '@args': { '$episode': 'Episode', '$withFriends': 'Boolean!' }, '@query': { 'hero': { '@args': { 'episode': '$episode' }, '@fields': ['name', { 'friends': { '@fields': ['name'], '@directives': { '@include': '$withFriends' } } }] } } }], '@variables': { "episode": "JEDI" } } print (GqlFromStruct.from_struct(struct)) Output: :: query Hero ( $episode : Episode, $withFriends : Boolean! ){ hero( episode : $episode ){ name friends @include (if : $withFriends){ name } } } { "episode": "JEDI" } Mutations ~~~~~~~~~~ Use ``@mutations`` keyword: :: struct = { '@mutations': [{ '@operation_name': 'CreateReviewForEpisode', '@args': { '$episode': 'Episode!', '$review': 'ReviewInput!' }, '@query': { 'createReview': { '@args': { 'episode': '$ep', 'review': '$review' }, '@fields': ['stars', 'commentary'] } } }], '@variables': { "episode": "JEDI", "review": { "stars": 5, "commentary": "This is a great movie!" } } } print (GqlFromStruct.from_struct(struct)) Output: :: mutation CreateReviewForEpisode ( $episode : Episode!, $review : ReviewInput! ){ createReview( episode : $ep, review : $review ){ stars commentary } } { "episode": "JEDI", "review": { "stars": 5, "commentary": "This is a great movie!" } } Inline Fragments ~~~~~~~~~~~~~~~~~ Nothing special needed. :: struct = { "@queries": [{ '@args': { '$ep': 'Episode!' }, '@operation_name': 'HeroForEpisode', '@query': [{ 'hero': { '@args': { 'episode': '$ep' }, '@fields': ['name', { '... on Droid': { '@fields': ['primaryFunction'] } }, { '... on Human': { '@fields': ['height'] } } ] } }] }] } print (GqlFromStruct.from_struct(struct)) Output: :: query HeroForEpisode ( $ep : Episode! ){ hero( episode : $ep ){ name ... on Droid{ primaryFunction } ... on Human{ height } } } Meta fields ~~~~~~~~~~~~ Use meta field as usual field: :: struct = { 'search': { '@args': { 'text': 'an' }, '@fields': ['__typename', { '... on Human': { '@fields': ['name'] } }, { '... on Droid': { '@fields': ['name'] } }, { '... on Starship': { '@fields': ['name'] } } ] } } print (GqlFromStruct.from_struct(struct)) Output: :: query{ search( text : an ){ __typename ... on Human{ name } ... on Droid{ name } ... on Starship{ name } } } .. |Release| image:: https://img.shields.io/github/v/release/artamonoviv/graphql-from-struct.svg :target: https://github.com/artamonoviv/graphql-from-struct/releases .. |Code Coverage| image:: https://codecov.io/gh/artamonoviv/graphql-from-struct/branch/master/graph/badge.svg :target: https://codecov.io/gh/artamonoviv/graphql-from-struct .. |Build Status Travis CI| image:: https://travis-ci.org/artamonoviv/graphql-from-struct.svg?branch=master :target: https://travis-ci.org/artamonoviv/graphql-from-struct .. |Blog| image:: https://img.shields.io/badge/site-my%20blog-yellow.svg :target: https://artamonoviv.ru .. |License| image:: https://img.shields.io/badge/License-MIT-yellow.svg :target: https://opensource.org/licenses/MIT .. |Docs| image:: https://readthedocs.org/projects/graphql-from-struct/badge/?version=latest&style=flat :target: https://graphql-from-struct.readthedocs.io/en/latest/
PypiClean
/django_crispy_forms-2.0a1-py3-none-any.whl/crispy_forms/helper.py
import re from django.urls import NoReverseMatch, reverse from django.utils.safestring import mark_safe from crispy_forms.exceptions import FormHelpersException from crispy_forms.layout import Layout from crispy_forms.layout_slice import LayoutSlice from crispy_forms.utils import TEMPLATE_PACK, flatatt, list_difference, render_field class DynamicLayoutHandler: def _check_layout(self): if self.layout is None: raise FormHelpersException("You need to set a layout in your FormHelper") def _check_layout_and_form(self): self._check_layout() if self.form is None: raise FormHelpersException("You need to pass a form instance to your FormHelper") def all(self): """ Returns all layout objects of first level of depth """ self._check_layout() return LayoutSlice(self.layout, slice(0, len(self.layout.fields), 1)) def filter(self, *LayoutClasses, max_level=0, greedy=False): """ Returns a LayoutSlice pointing to layout objects of type `LayoutClass` """ self._check_layout() filtered_layout_objects = self.layout.get_layout_objects(LayoutClasses, max_level=max_level, greedy=greedy) return LayoutSlice(self.layout, filtered_layout_objects) def filter_by_widget(self, widget_type): """ Returns a LayoutSlice pointing to fields with widgets of `widget_type` """ self._check_layout_and_form() layout_field_names = self.layout.get_field_names() # Let's filter all fields with widgets like widget_type filtered_fields = [] for pointer in layout_field_names: if isinstance(self.form.fields[pointer.name].widget, widget_type): filtered_fields.append(pointer) return LayoutSlice(self.layout, filtered_fields) def exclude_by_widget(self, widget_type): """ Returns a LayoutSlice pointing to fields with widgets NOT matching `widget_type` """ self._check_layout_and_form() layout_field_names = self.layout.get_field_names() # Let's exclude all fields with widgets like widget_type filtered_fields = [] for pointer in layout_field_names: if not isinstance(self.form.fields[pointer.name].widget, widget_type): filtered_fields.append(pointer) return LayoutSlice(self.layout, filtered_fields) def __getitem__(self, key): """ Return a LayoutSlice that makes changes affect the current instance of the layout and not a copy. """ # when key is a string containing the field name if isinstance(key, str): # Django templates access FormHelper attributes using dictionary [] operator # This could be a helper['form_id'] access, not looking for a field if hasattr(self, key): return getattr(self, key) self._check_layout() layout_field_names = self.layout.get_field_names() filtered_field = [] for pointer in layout_field_names: # There can be an empty pointer if pointer.name == key: filtered_field.append(pointer) return LayoutSlice(self.layout, filtered_field) return LayoutSlice(self.layout, key) def __setitem__(self, key, value): self.layout[key] = value def __delitem__(self, key): del self.layout.fields[key] def __len__(self): if self.layout is not None: return len(self.layout.fields) else: return 0 class FormHelper(DynamicLayoutHandler): """ This class controls the form rendering behavior of the form passed to the `{% crispy %}` tag. For doing so you will need to set its attributes and pass the corresponding helper object to the tag:: {% crispy form form.helper %} Let's see what attributes you can set and what form behaviors they apply to: **form_method**: Specifies form method attribute. You can set it to 'POST' or 'GET'. Defaults to 'POST' **form_action**: Applied to the form action attribute: - Can be a named url in your URLconf that can be executed via the `{% url %}` template tag. \ Example: 'show_my_profile'. In your URLconf you could have something like:: path('show/profile/', 'show_my_profile_view', name = 'show_my_profile') - It can simply point to a URL '/whatever/blabla/'. **form_id**: Generates a form id for dom identification. If no id provided then no id attribute is created on the form. **form_class**: String containing separated CSS classes to be applied to form class attribute. **form_group_wrapper_class**: String containing separated CSS classes to be applied to each row of inputs. **form_tag**: It specifies if <form></form> tags should be rendered when using a Layout. If set to False it renders the form without the <form></form> tags. Defaults to True. **form_error_title**: If a form has `non_field_errors` to display, they are rendered in a div. You can set title's div with this attribute. Example: "Oooops!" or "Form Errors" **formset_error_title**: If a formset has `non_form_errors` to display, they are rendered in a div. You can set title's div with this attribute. **include_media**: Whether to automatically include form media. Set to False if you want to manually include form media outside the form. Defaults to True. Public Methods: **add_input(input)**: You can add input buttons using this method. Inputs added using this method will be rendered at the end of the form/formset. **add_layout(layout)**: You can add a `Layout` object to `FormHelper`. The Layout specifies in a simple, clean and DRY way how the form fields should be rendered. You can wrap fields, order them, customize pretty much anything in the form. Best way to add a helper to a form is adding a property named helper to the form that returns customized `FormHelper` object:: from crispy_forms.helper import FormHelper from crispy_forms.layout import Submit class MyForm(forms.Form): title = forms.CharField(_("Title")) @property def helper(self): helper = FormHelper() helper.form_id = 'this-form-rocks' helper.form_class = 'search' helper.add_input(Submit('save', 'save')) [...] return helper You can use it in a template doing:: {% load crispy_forms_tags %} {% crispy form %} """ _form_method = "post" _form_action = "" form = None form_id = "" form_class = "" form_group_wrapper_class = "" layout = None form_tag = True form_error_title = "" formset_error_title = "" form_show_errors = True render_unmentioned_fields = False render_hidden_fields = False render_required_fields = False _help_text_inline = False _error_text_inline = True form_show_labels = True template = None field_template = None disable_csrf = False use_custom_control = True label_class = "" field_class = "" include_media = True def __init__(self, form=None): self.attrs = {} self.inputs = [] if form is not None: self.form = form self.layout = self.build_default_layout(form) def build_default_layout(self, form): return Layout(*form.fields.keys()) @property def form_method(self): return self._form_method @form_method.setter def form_method(self, method): if method.lower() not in ("get", "post"): raise FormHelpersException( "Only GET and POST are valid in the \ form_method helper attribute" ) self._form_method = method.lower() @property def form_action(self): try: return reverse(self._form_action) except NoReverseMatch: return self._form_action @form_action.setter def form_action(self, action): self._form_action = action @property def help_text_inline(self): return self._help_text_inline @help_text_inline.setter def help_text_inline(self, flag): self._help_text_inline = flag self._error_text_inline = not flag @property def error_text_inline(self): return self._error_text_inline @error_text_inline.setter def error_text_inline(self, flag): self._error_text_inline = flag self._help_text_inline = not flag def add_input(self, input_object): self.inputs.append(input_object) def add_layout(self, layout): self.layout = layout def render_layout(self, form, context, template_pack=TEMPLATE_PACK): """ Returns safe html of the rendering of the layout """ form.rendered_fields = set() form.crispy_field_template = self.field_template # This renders the specified Layout strictly html = self.layout.render(form, context, template_pack=template_pack) # Rendering some extra fields if specified if self.render_unmentioned_fields or self.render_hidden_fields or self.render_required_fields: fields = tuple(form.fields.keys()) left_fields_to_render = list_difference(fields, form.rendered_fields) for field in left_fields_to_render: if ( self.render_unmentioned_fields or (self.render_hidden_fields and form.fields[field].widget.is_hidden) or (self.render_required_fields and form.fields[field].widget.is_required) ): html += render_field(field, form, context, template_pack=template_pack) return mark_safe(html) def get_attributes(self, template_pack=TEMPLATE_PACK): """ Used by crispy_forms_tags to get helper attributes """ attrs = self.attrs.copy() if self.attrs else {} if self.form_action: attrs["action"] = self.form_action.strip() if self.form_id: attrs["id"] = self.form_id.strip() if self.form_class: attrs["class"] = self.form_class.strip() if self.form_group_wrapper_class: attrs["form_group_wrapper_class"] = self.form_group_wrapper_class items = { "attrs": attrs, "disable_csrf": self.disable_csrf, "error_text_inline": self.error_text_inline, "field_class": self.field_class, "field_template": self.field_template or "%s/field.html" % template_pack, "flat_attrs": flatatt(attrs), "form_error_title": self.form_error_title.strip(), "form_method": self.form_method.strip(), "form_show_errors": self.form_show_errors, "form_show_labels": self.form_show_labels, "form_tag": self.form_tag, "formset_error_title": self.formset_error_title.strip(), "help_text_inline": self.help_text_inline, "include_media": self.include_media, "label_class": self.label_class, "use_custom_control": self.use_custom_control, } if template_pack == "bootstrap4": if "form-horizontal" in self.form_class.split(): bootstrap_size_match = re.findall(r"col(-(xl|lg|md|sm))?-(\d+)", self.label_class) if bootstrap_size_match: offset_pattern = "offset%s-%s" items["bootstrap_checkbox_offsets"] = [ offset_pattern % (m[0], m[-1]) for m in bootstrap_size_match ] else: bootstrap_size_match = re.findall(r"col-(lg|md|sm|xs)-(\d+)", self.label_class) if bootstrap_size_match: offset_pattern = "col-%s-offset-%s" items["bootstrap_checkbox_offsets"] = [offset_pattern % m for m in bootstrap_size_match] if self.inputs: items["inputs"] = self.inputs for attribute_name, value in self.__dict__.items(): if ( attribute_name not in items and attribute_name not in ["layout", "inputs"] and not attribute_name.startswith("_") ): items[attribute_name] = value return items
PypiClean
/pyro-api-0.1.2.tar.gz/pyro-api-0.1.2/pyroapi/dispatch.py
import importlib from contextlib import contextmanager DEFAULT_RNG_SEED = 1 _ALIASES = {} class GenericModule(object): """ Wrapper for a module that can be dynamically routed to a custom backend. """ current_backend = {} _modules = {} def __init__(self, name, default_backend): assert isinstance(name, str) assert isinstance(default_backend, str) self._name = name GenericModule.current_backend[name] = default_backend def __getattribute__(self, name): module_name = super(GenericModule, self).__getattribute__('_name') backend = GenericModule.current_backend[module_name] try: module = GenericModule._modules[backend] except KeyError: module = importlib.import_module(backend) GenericModule._modules[backend] = module if name.startswith('__'): return getattr(module, name) # allow magic attributes to return AttributeError try: return getattr(module, name) except AttributeError: raise NotImplementedError('This Pyro backend does not implement {}.{}' .format(module_name, name)) @contextmanager def pyro_backend(*aliases, **new_backends): """ Context manager to set a custom backend for Pyro models. Backends can be specified either by name (for standard backends or backends registered through :func:`register_backend` ) or by providing kwargs mapping module name to backend module name. Standard backends include: pyro, minipyro, funsor, and numpy. """ if aliases: assert len(aliases) == 1 assert not new_backends new_backends = _ALIASES[aliases[0]] old_backends = {} for name, new_backend in new_backends.items(): old_backends[name] = GenericModule.current_backend[name] GenericModule.current_backend[name] = new_backend try: with handlers.seed(rng_seed=DEFAULT_RNG_SEED): yield finally: for name, old_backend in old_backends.items(): GenericModule.current_backend[name] = old_backend def register_backend(alias, new_backends): """ Register a new backend alias. For example:: register_backend("minipyro", { "infer": "pyro.contrib.minipyro", "optim": "pyro.contrib.minipyro", "pyro": "pyro.contrib.minipyro", }) :param str alias: The name of the new backend. :param dict new_backends: A dict mapping standard module name (str) to new module name (str). This needs to include only nonstandard backends (e.g. if your backend uses torch ops, you need not override ``ops``) """ assert isinstance(new_backends, dict) assert all(isinstance(key, str) for key in new_backends.keys()) assert all(isinstance(value, str) for value in new_backends.values()) _ALIASES[alias] = new_backends.copy() # These modules can be overridden. pyro = GenericModule('pyro', 'pyro') distributions = GenericModule('distributions', 'pyro.distributions') handlers = GenericModule('handlers', 'pyro.poutine') infer = GenericModule('infer', 'pyro.infer') optim = GenericModule('optim', 'pyro.optim') ops = GenericModule('ops', 'torch') # These are standard backends. register_backend('pyro', { 'distributions': 'pyro.distributions', 'handlers': 'pyro.poutine', 'infer': 'pyro.infer', 'ops': 'torch', 'optim': 'pyro.optim', 'pyro': 'pyro', }) register_backend('minipyro', { 'distributions': 'pyro.distributions', 'handlers': 'pyro.poutine', 'infer': 'pyro.contrib.minipyro', 'ops': 'torch', 'optim': 'pyro.contrib.minipyro', 'pyro': 'pyro.contrib.minipyro', }) register_backend('funsor', { 'distributions': 'funsor.torch.distributions', 'handlers': 'funsor.minipyro', 'infer': 'funsor.minipyro', 'ops': 'funsor.compat.ops', 'optim': 'funsor.minipyro', 'pyro': 'funsor.minipyro', }) register_backend('numpy', { 'distributions': 'numpyro.compat.distributions', 'handlers': 'numpyro.compat.handlers', 'infer': 'numpyro.compat.infer', 'ops': 'numpyro.compat.ops', 'optim': 'numpyro.compat.optim', 'pyro': 'numpyro.compat.pyro', })
PypiClean
/cdktf_cdktf_provider_mongodbatlas-5.0.0-py3-none-any.whl/cdktf_cdktf_provider_mongodbatlas/data_mongodbatlas_federated_settings_org_configs/__init__.py
import abc import builtins import datetime import enum import typing import jsii import publication import typing_extensions from typeguard import check_type from .._jsii import * import cdktf as _cdktf_9a9027ec import constructs as _constructs_77d1e7e8 class DataMongodbatlasFederatedSettingsOrgConfigs( _cdktf_9a9027ec.TerraformDataSource, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigs", ): '''Represents a {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs mongodbatlas_federated_settings_org_configs}.''' def __init__( self, scope: _constructs_77d1e7e8.Construct, id_: builtins.str, *, federation_settings_id: builtins.str, id: typing.Optional[builtins.str] = None, items_per_page: typing.Optional[jsii.Number] = None, page_num: typing.Optional[jsii.Number] = None, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, ) -> None: '''Create a new {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs mongodbatlas_federated_settings_org_configs} Data Source. :param scope: The scope in which to define this construct. :param id_: The scoped construct ID. Must be unique amongst siblings in the same scope :param federation_settings_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#federation_settings_id DataMongodbatlasFederatedSettingsOrgConfigs#federation_settings_id}. :param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#id DataMongodbatlasFederatedSettingsOrgConfigs#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. :param items_per_page: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#items_per_page DataMongodbatlasFederatedSettingsOrgConfigs#items_per_page}. :param page_num: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#page_num DataMongodbatlasFederatedSettingsOrgConfigs#page_num}. :param connection: :param count: :param depends_on: :param for_each: :param lifecycle: :param provider: :param provisioners: ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__41fa2142762635403a549d372cddb76fb35434b33384f009b7c13059defb4d2e) check_type(argname="argument scope", value=scope, expected_type=type_hints["scope"]) check_type(argname="argument id_", value=id_, expected_type=type_hints["id_"]) config = DataMongodbatlasFederatedSettingsOrgConfigsConfig( federation_settings_id=federation_settings_id, id=id, items_per_page=items_per_page, page_num=page_num, connection=connection, count=count, depends_on=depends_on, for_each=for_each, lifecycle=lifecycle, provider=provider, provisioners=provisioners, ) jsii.create(self.__class__, self, [scope, id_, config]) @jsii.member(jsii_name="resetId") def reset_id(self) -> None: return typing.cast(None, jsii.invoke(self, "resetId", [])) @jsii.member(jsii_name="resetItemsPerPage") def reset_items_per_page(self) -> None: return typing.cast(None, jsii.invoke(self, "resetItemsPerPage", [])) @jsii.member(jsii_name="resetPageNum") def reset_page_num(self) -> None: return typing.cast(None, jsii.invoke(self, "resetPageNum", [])) @jsii.member(jsii_name="synthesizeAttributes") def _synthesize_attributes(self) -> typing.Mapping[builtins.str, typing.Any]: return typing.cast(typing.Mapping[builtins.str, typing.Any], jsii.invoke(self, "synthesizeAttributes", [])) @jsii.python.classproperty @jsii.member(jsii_name="tfResourceType") def TF_RESOURCE_TYPE(cls) -> builtins.str: return typing.cast(builtins.str, jsii.sget(cls, "tfResourceType")) @builtins.property @jsii.member(jsii_name="results") def results(self) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsList": return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsList", jsii.get(self, "results")) @builtins.property @jsii.member(jsii_name="federationSettingsIdInput") def federation_settings_id_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "federationSettingsIdInput")) @builtins.property @jsii.member(jsii_name="idInput") def id_input(self) -> typing.Optional[builtins.str]: return typing.cast(typing.Optional[builtins.str], jsii.get(self, "idInput")) @builtins.property @jsii.member(jsii_name="itemsPerPageInput") def items_per_page_input(self) -> typing.Optional[jsii.Number]: return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "itemsPerPageInput")) @builtins.property @jsii.member(jsii_name="pageNumInput") def page_num_input(self) -> typing.Optional[jsii.Number]: return typing.cast(typing.Optional[jsii.Number], jsii.get(self, "pageNumInput")) @builtins.property @jsii.member(jsii_name="federationSettingsId") def federation_settings_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "federationSettingsId")) @federation_settings_id.setter def federation_settings_id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__c1898d55daa2df2622d1e2dbb38092aa575851625db59853fa03035e019f770f) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "federationSettingsId", value) @builtins.property @jsii.member(jsii_name="id") def id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "id")) @id.setter def id(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__de14ffd75e95c616fc9cc6e5d507531aa73606b455d7bcaca5a7a602a0b58bf5) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "id", value) @builtins.property @jsii.member(jsii_name="itemsPerPage") def items_per_page(self) -> jsii.Number: return typing.cast(jsii.Number, jsii.get(self, "itemsPerPage")) @items_per_page.setter def items_per_page(self, value: jsii.Number) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__b72c3a023b993fc8f5f1c01fa8e6a6cfd3afecb01447a79b5dcdb0f48628e887) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "itemsPerPage", value) @builtins.property @jsii.member(jsii_name="pageNum") def page_num(self) -> jsii.Number: return typing.cast(jsii.Number, jsii.get(self, "pageNum")) @page_num.setter def page_num(self, value: jsii.Number) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__1b86b6cc1ccc3bc1b3c98e240910d7389d460ea6fc4a510cab2b93398745be81) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "pageNum", value) @jsii.data_type( jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsConfig", jsii_struct_bases=[_cdktf_9a9027ec.TerraformMetaArguments], name_mapping={ "connection": "connection", "count": "count", "depends_on": "dependsOn", "for_each": "forEach", "lifecycle": "lifecycle", "provider": "provider", "provisioners": "provisioners", "federation_settings_id": "federationSettingsId", "id": "id", "items_per_page": "itemsPerPage", "page_num": "pageNum", }, ) class DataMongodbatlasFederatedSettingsOrgConfigsConfig( _cdktf_9a9027ec.TerraformMetaArguments, ): def __init__( self, *, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, federation_settings_id: builtins.str, id: typing.Optional[builtins.str] = None, items_per_page: typing.Optional[jsii.Number] = None, page_num: typing.Optional[jsii.Number] = None, ) -> None: ''' :param connection: :param count: :param depends_on: :param for_each: :param lifecycle: :param provider: :param provisioners: :param federation_settings_id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#federation_settings_id DataMongodbatlasFederatedSettingsOrgConfigs#federation_settings_id}. :param id: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#id DataMongodbatlasFederatedSettingsOrgConfigs#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. :param items_per_page: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#items_per_page DataMongodbatlasFederatedSettingsOrgConfigs#items_per_page}. :param page_num: Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#page_num DataMongodbatlasFederatedSettingsOrgConfigs#page_num}. ''' if isinstance(lifecycle, dict): lifecycle = _cdktf_9a9027ec.TerraformResourceLifecycle(**lifecycle) if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__3fc6eb1198f57e2b477d5be3ecf09e09be7015c64225b8e517f9fcc239a41ab1) check_type(argname="argument connection", value=connection, expected_type=type_hints["connection"]) check_type(argname="argument count", value=count, expected_type=type_hints["count"]) check_type(argname="argument depends_on", value=depends_on, expected_type=type_hints["depends_on"]) check_type(argname="argument for_each", value=for_each, expected_type=type_hints["for_each"]) check_type(argname="argument lifecycle", value=lifecycle, expected_type=type_hints["lifecycle"]) check_type(argname="argument provider", value=provider, expected_type=type_hints["provider"]) check_type(argname="argument provisioners", value=provisioners, expected_type=type_hints["provisioners"]) check_type(argname="argument federation_settings_id", value=federation_settings_id, expected_type=type_hints["federation_settings_id"]) check_type(argname="argument id", value=id, expected_type=type_hints["id"]) check_type(argname="argument items_per_page", value=items_per_page, expected_type=type_hints["items_per_page"]) check_type(argname="argument page_num", value=page_num, expected_type=type_hints["page_num"]) self._values: typing.Dict[builtins.str, typing.Any] = { "federation_settings_id": federation_settings_id, } if connection is not None: self._values["connection"] = connection if count is not None: self._values["count"] = count if depends_on is not None: self._values["depends_on"] = depends_on if for_each is not None: self._values["for_each"] = for_each if lifecycle is not None: self._values["lifecycle"] = lifecycle if provider is not None: self._values["provider"] = provider if provisioners is not None: self._values["provisioners"] = provisioners if id is not None: self._values["id"] = id if items_per_page is not None: self._values["items_per_page"] = items_per_page if page_num is not None: self._values["page_num"] = page_num @builtins.property def connection( self, ) -> typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]]: ''' :stability: experimental ''' result = self._values.get("connection") return typing.cast(typing.Optional[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, _cdktf_9a9027ec.WinrmProvisionerConnection]], result) @builtins.property def count( self, ) -> typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]]: ''' :stability: experimental ''' result = self._values.get("count") return typing.cast(typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]], result) @builtins.property def depends_on( self, ) -> typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]]: ''' :stability: experimental ''' result = self._values.get("depends_on") return typing.cast(typing.Optional[typing.List[_cdktf_9a9027ec.ITerraformDependable]], result) @builtins.property def for_each(self) -> typing.Optional[_cdktf_9a9027ec.ITerraformIterator]: ''' :stability: experimental ''' result = self._values.get("for_each") return typing.cast(typing.Optional[_cdktf_9a9027ec.ITerraformIterator], result) @builtins.property def lifecycle(self) -> typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle]: ''' :stability: experimental ''' result = self._values.get("lifecycle") return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformResourceLifecycle], result) @builtins.property def provider(self) -> typing.Optional[_cdktf_9a9027ec.TerraformProvider]: ''' :stability: experimental ''' result = self._values.get("provider") return typing.cast(typing.Optional[_cdktf_9a9027ec.TerraformProvider], result) @builtins.property def provisioners( self, ) -> typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]]: ''' :stability: experimental ''' result = self._values.get("provisioners") return typing.cast(typing.Optional[typing.List[typing.Union[_cdktf_9a9027ec.FileProvisioner, _cdktf_9a9027ec.LocalExecProvisioner, _cdktf_9a9027ec.RemoteExecProvisioner]]], result) @builtins.property def federation_settings_id(self) -> builtins.str: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#federation_settings_id DataMongodbatlasFederatedSettingsOrgConfigs#federation_settings_id}.''' result = self._values.get("federation_settings_id") assert result is not None, "Required property 'federation_settings_id' is missing" return typing.cast(builtins.str, result) @builtins.property def id(self) -> typing.Optional[builtins.str]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#id DataMongodbatlasFederatedSettingsOrgConfigs#id}. Please be aware that the id field is automatically added to all resources in Terraform providers using a Terraform provider SDK version below 2. If you experience problems setting this value it might not be settable. Please take a look at the provider documentation to ensure it should be settable. ''' result = self._values.get("id") return typing.cast(typing.Optional[builtins.str], result) @builtins.property def items_per_page(self) -> typing.Optional[jsii.Number]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#items_per_page DataMongodbatlasFederatedSettingsOrgConfigs#items_per_page}.''' result = self._values.get("items_per_page") return typing.cast(typing.Optional[jsii.Number], result) @builtins.property def page_num(self) -> typing.Optional[jsii.Number]: '''Docs at Terraform Registry: {@link https://registry.terraform.io/providers/mongodb/mongodbatlas/1.11.0/docs/data-sources/federated_settings_org_configs#page_num DataMongodbatlasFederatedSettingsOrgConfigs#page_num}.''' result = self._values.get("page_num") return typing.cast(typing.Optional[jsii.Number], result) def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "DataMongodbatlasFederatedSettingsOrgConfigsConfig(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) @jsii.data_type( jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResults", jsii_struct_bases=[], name_mapping={}, ) class DataMongodbatlasFederatedSettingsOrgConfigsResults: def __init__(self) -> None: self._values: typing.Dict[builtins.str, typing.Any] = {} def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "DataMongodbatlasFederatedSettingsOrgConfigsResults(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsList( _cdktf_9a9027ec.ComplexList, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsList", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__a539493ed49665514a6452a9414163563c4c98710ecc938f691e0d48b0d136c6) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set]) @jsii.member(jsii_name="get") def get( self, index: jsii.Number, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsOutputReference": ''' :param index: the index of the item to return. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__1582280f8b7fbeda19b9e6bb03ec459aba191849beb86e22ea40e83eabfb2758) check_type(argname="argument index", value=index, expected_type=type_hints["index"]) return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsOutputReference", jsii.invoke(self, "get", [index])) @builtins.property @jsii.member(jsii_name="terraformAttribute") def _terraform_attribute(self) -> builtins.str: '''The attribute on the parent resource this class is referencing.''' return typing.cast(builtins.str, jsii.get(self, "terraformAttribute")) @_terraform_attribute.setter def _terraform_attribute(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__858f88846ddfe44b704a088fc1b2a89cf78937d10032f5703ef747647a857e40) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformAttribute", value) @builtins.property @jsii.member(jsii_name="terraformResource") def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent: '''The parent resource.''' return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource")) @_terraform_resource.setter def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__e137195153518adec7e83a237c15836b0b79a42322b2e7d8555d34e61a5a7ca4) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformResource", value) @builtins.property @jsii.member(jsii_name="wrapsSet") def _wraps_set(self) -> builtins.bool: '''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).''' return typing.cast(builtins.bool, jsii.get(self, "wrapsSet")) @_wraps_set.setter def _wraps_set(self, value: builtins.bool) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__4dd4c882c3f0fc3327d5638cfbbeef2e9a94a46fa9aec048737a46c278444015) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "wrapsSet", value) class DataMongodbatlasFederatedSettingsOrgConfigsResultsOutputReference( _cdktf_9a9027ec.ComplexObject, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsOutputReference", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param complex_object_index: the index of this item in the list. :param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__41e3e54eba397b7b296e34a1196c0902ab4478bb088c1b5e3988f257895b08ad) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"]) check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set]) @builtins.property @jsii.member(jsii_name="domainAllowList") def domain_allow_list(self) -> typing.List[builtins.str]: return typing.cast(typing.List[builtins.str], jsii.get(self, "domainAllowList")) @builtins.property @jsii.member(jsii_name="domainRestrictionEnabled") def domain_restriction_enabled(self) -> _cdktf_9a9027ec.IResolvable: return typing.cast(_cdktf_9a9027ec.IResolvable, jsii.get(self, "domainRestrictionEnabled")) @builtins.property @jsii.member(jsii_name="identityProviderId") def identity_provider_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "identityProviderId")) @builtins.property @jsii.member(jsii_name="orgId") def org_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "orgId")) @builtins.property @jsii.member(jsii_name="postAuthRoleGrants") def post_auth_role_grants(self) -> typing.List[builtins.str]: return typing.cast(typing.List[builtins.str], jsii.get(self, "postAuthRoleGrants")) @builtins.property @jsii.member(jsii_name="roleMappings") def role_mappings( self, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsList": return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsList", jsii.get(self, "roleMappings")) @builtins.property @jsii.member(jsii_name="userConflicts") def user_conflicts( self, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsList": return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsList", jsii.get(self, "userConflicts")) @builtins.property @jsii.member(jsii_name="internalValue") def internal_value( self, ) -> typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResults]: return typing.cast(typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResults], jsii.get(self, "internalValue")) @internal_value.setter def internal_value( self, value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResults], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__28f081d22641c03f849b3d8344bd83f9f816fad8c3c81f8b5a5430b474a68df1) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "internalValue", value) @jsii.data_type( jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings", jsii_struct_bases=[], name_mapping={}, ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings: def __init__(self) -> None: self._values: typing.Dict[builtins.str, typing.Any] = {} def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsList( _cdktf_9a9027ec.ComplexList, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsList", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__beb4c7b4d4d21e20d5a47b5e23487540c84c5ec5b19990ab1f24db4ca1dcf01e) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set]) @jsii.member(jsii_name="get") def get( self, index: jsii.Number, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsOutputReference": ''' :param index: the index of the item to return. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__2f184a3d86f6b4eb3569125524a195547df904074b35516613c73f32294ec397) check_type(argname="argument index", value=index, expected_type=type_hints["index"]) return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsOutputReference", jsii.invoke(self, "get", [index])) @builtins.property @jsii.member(jsii_name="terraformAttribute") def _terraform_attribute(self) -> builtins.str: '''The attribute on the parent resource this class is referencing.''' return typing.cast(builtins.str, jsii.get(self, "terraformAttribute")) @_terraform_attribute.setter def _terraform_attribute(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__2681fcc4dda5d91c774774f220b271ee2b3b7003dceceb20a9a97d9b63f70ebd) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformAttribute", value) @builtins.property @jsii.member(jsii_name="terraformResource") def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent: '''The parent resource.''' return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource")) @_terraform_resource.setter def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__5684daf77a5251ef3cb44747f735a03b222adfe403c08b771110b52010808dcb) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformResource", value) @builtins.property @jsii.member(jsii_name="wrapsSet") def _wraps_set(self) -> builtins.bool: '''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).''' return typing.cast(builtins.bool, jsii.get(self, "wrapsSet")) @_wraps_set.setter def _wraps_set(self, value: builtins.bool) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__c47af14a9ee596177ff148a1fb810ee50442ad376aa50b757abc6fd4277331a6) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "wrapsSet", value) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsOutputReference( _cdktf_9a9027ec.ComplexObject, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsOutputReference", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param complex_object_index: the index of this item in the list. :param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__ffa673c5f3f597fe22a11b87c5f7de3a8d0eecd8451fbd94b738543dcfe2fb3c) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"]) check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set]) @builtins.property @jsii.member(jsii_name="externalGroupName") def external_group_name(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "externalGroupName")) @builtins.property @jsii.member(jsii_name="id") def id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "id")) @builtins.property @jsii.member(jsii_name="roleAssignments") def role_assignments( self, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsList": return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsList", jsii.get(self, "roleAssignments")) @builtins.property @jsii.member(jsii_name="internalValue") def internal_value( self, ) -> typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings]: return typing.cast(typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings], jsii.get(self, "internalValue")) @internal_value.setter def internal_value( self, value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__1ca526e6dfde7c84fc15596130af02fff507520e8858163cfa0d5e52f45dda99) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "internalValue", value) @jsii.data_type( jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments", jsii_struct_bases=[], name_mapping={}, ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments: def __init__(self) -> None: self._values: typing.Dict[builtins.str, typing.Any] = {} def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsList( _cdktf_9a9027ec.ComplexList, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsList", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__61c5badcd49ef9610f4f966bee6fe3cbd0f6a21aeba6ac8dd1f62958b22cc037) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set]) @jsii.member(jsii_name="get") def get( self, index: jsii.Number, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsOutputReference": ''' :param index: the index of the item to return. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__0adbdf0ccadf81be06817f1754b01ca52d5e190b469fc543c21d8fce46932f0c) check_type(argname="argument index", value=index, expected_type=type_hints["index"]) return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsOutputReference", jsii.invoke(self, "get", [index])) @builtins.property @jsii.member(jsii_name="terraformAttribute") def _terraform_attribute(self) -> builtins.str: '''The attribute on the parent resource this class is referencing.''' return typing.cast(builtins.str, jsii.get(self, "terraformAttribute")) @_terraform_attribute.setter def _terraform_attribute(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__bf012923023227beea8455b87734ba13518fc7fc2603908b535c7fb8028e5ab6) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformAttribute", value) @builtins.property @jsii.member(jsii_name="terraformResource") def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent: '''The parent resource.''' return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource")) @_terraform_resource.setter def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__f697945603f3692bfeb1f2300c61689407f7972d2433c0843e96276e7f14493e) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformResource", value) @builtins.property @jsii.member(jsii_name="wrapsSet") def _wraps_set(self) -> builtins.bool: '''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).''' return typing.cast(builtins.bool, jsii.get(self, "wrapsSet")) @_wraps_set.setter def _wraps_set(self, value: builtins.bool) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__cc76aa768e02d5bceff95cb2a404b52d9c4c63068218c9166fdfea1b9411156d) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "wrapsSet", value) class DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsOutputReference( _cdktf_9a9027ec.ComplexObject, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsOutputReference", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param complex_object_index: the index of this item in the list. :param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__99897f7ed7f3aa5bddfc794bbbb62c4d8b8d972f28725f92ee4dc2ff694c2b5a) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"]) check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set]) @builtins.property @jsii.member(jsii_name="groupId") def group_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "groupId")) @builtins.property @jsii.member(jsii_name="orgId") def org_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "orgId")) @builtins.property @jsii.member(jsii_name="role") def role(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "role")) @builtins.property @jsii.member(jsii_name="internalValue") def internal_value( self, ) -> typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments]: return typing.cast(typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments], jsii.get(self, "internalValue")) @internal_value.setter def internal_value( self, value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__6d851664d76445303b34ee0f82e885cd4adbed59ee8cb3a2fae74cc3c87b6e2e) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "internalValue", value) @jsii.data_type( jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts", jsii_struct_bases=[], name_mapping={}, ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts: def __init__(self) -> None: self._values: typing.Dict[builtins.str, typing.Any] = {} def __eq__(self, rhs: typing.Any) -> builtins.bool: return isinstance(rhs, self.__class__) and rhs._values == self._values def __ne__(self, rhs: typing.Any) -> builtins.bool: return not (rhs == self) def __repr__(self) -> str: return "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts(%s)" % ", ".join( k + "=" + repr(v) for k, v in self._values.items() ) class DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsList( _cdktf_9a9027ec.ComplexList, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsList", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param wraps_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__cd31073a6a6074a59887169e7a903b7ab40261defadd6503f4c1c69d9dc5dc45) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument wraps_set", value=wraps_set, expected_type=type_hints["wraps_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, wraps_set]) @jsii.member(jsii_name="get") def get( self, index: jsii.Number, ) -> "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsOutputReference": ''' :param index: the index of the item to return. ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__82970c2deb30bb9d3c8cbfb69ba2f1fdff36182ae28c41b59de7f9e99336645e) check_type(argname="argument index", value=index, expected_type=type_hints["index"]) return typing.cast("DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsOutputReference", jsii.invoke(self, "get", [index])) @builtins.property @jsii.member(jsii_name="terraformAttribute") def _terraform_attribute(self) -> builtins.str: '''The attribute on the parent resource this class is referencing.''' return typing.cast(builtins.str, jsii.get(self, "terraformAttribute")) @_terraform_attribute.setter def _terraform_attribute(self, value: builtins.str) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__5e484e7e9369b2117948fbed122c74f55d5c4e10f5cb45d748307f9d23a4c29f) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformAttribute", value) @builtins.property @jsii.member(jsii_name="terraformResource") def _terraform_resource(self) -> _cdktf_9a9027ec.IInterpolatingParent: '''The parent resource.''' return typing.cast(_cdktf_9a9027ec.IInterpolatingParent, jsii.get(self, "terraformResource")) @_terraform_resource.setter def _terraform_resource(self, value: _cdktf_9a9027ec.IInterpolatingParent) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__17011a552de88803e5934a56f5aad8135b029721757069701472a07258d23b6d) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "terraformResource", value) @builtins.property @jsii.member(jsii_name="wrapsSet") def _wraps_set(self) -> builtins.bool: '''whether the list is wrapping a set (will add tolist() to be able to access an item via an index).''' return typing.cast(builtins.bool, jsii.get(self, "wrapsSet")) @_wraps_set.setter def _wraps_set(self, value: builtins.bool) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__6bcd6641f32ad34fb70a47e2b3bacc525269adfe710ea6e21fb2d51e080c15d3) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "wrapsSet", value) class DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsOutputReference( _cdktf_9a9027ec.ComplexObject, metaclass=jsii.JSIIMeta, jsii_type="@cdktf/provider-mongodbatlas.dataMongodbatlasFederatedSettingsOrgConfigs.DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsOutputReference", ): def __init__( self, terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: ''' :param terraform_resource: The parent resource. :param terraform_attribute: The attribute on the parent resource this class is referencing. :param complex_object_index: the index of this item in the list. :param complex_object_is_from_set: whether the list is wrapping a set (will add tolist() to be able to access an item via an index). ''' if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__4d8e1faf0bd5fa114cdd072da1133d2b79037b9241a20ae0cebf55f5c476489f) check_type(argname="argument terraform_resource", value=terraform_resource, expected_type=type_hints["terraform_resource"]) check_type(argname="argument terraform_attribute", value=terraform_attribute, expected_type=type_hints["terraform_attribute"]) check_type(argname="argument complex_object_index", value=complex_object_index, expected_type=type_hints["complex_object_index"]) check_type(argname="argument complex_object_is_from_set", value=complex_object_is_from_set, expected_type=type_hints["complex_object_is_from_set"]) jsii.create(self.__class__, self, [terraform_resource, terraform_attribute, complex_object_index, complex_object_is_from_set]) @builtins.property @jsii.member(jsii_name="emailAddress") def email_address(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "emailAddress")) @builtins.property @jsii.member(jsii_name="federationSettingsId") def federation_settings_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "federationSettingsId")) @builtins.property @jsii.member(jsii_name="firstName") def first_name(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "firstName")) @builtins.property @jsii.member(jsii_name="lastName") def last_name(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "lastName")) @builtins.property @jsii.member(jsii_name="userId") def user_id(self) -> builtins.str: return typing.cast(builtins.str, jsii.get(self, "userId")) @builtins.property @jsii.member(jsii_name="internalValue") def internal_value( self, ) -> typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts]: return typing.cast(typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts], jsii.get(self, "internalValue")) @internal_value.setter def internal_value( self, value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts], ) -> None: if __debug__: type_hints = typing.get_type_hints(_typecheckingstub__b6e284da5561df0326ba76701efd24b6ff50dcfc3a568691c0c284dd0cbaf90e) check_type(argname="argument value", value=value, expected_type=type_hints["value"]) jsii.set(self, "internalValue", value) __all__ = [ "DataMongodbatlasFederatedSettingsOrgConfigs", "DataMongodbatlasFederatedSettingsOrgConfigsConfig", "DataMongodbatlasFederatedSettingsOrgConfigsResults", "DataMongodbatlasFederatedSettingsOrgConfigsResultsList", "DataMongodbatlasFederatedSettingsOrgConfigsResultsOutputReference", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsList", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsOutputReference", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsList", "DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignmentsOutputReference", "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts", "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsList", "DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflictsOutputReference", ] publication.publish() def _typecheckingstub__41fa2142762635403a549d372cddb76fb35434b33384f009b7c13059defb4d2e( scope: _constructs_77d1e7e8.Construct, id_: builtins.str, *, federation_settings_id: builtins.str, id: typing.Optional[builtins.str] = None, items_per_page: typing.Optional[jsii.Number] = None, page_num: typing.Optional[jsii.Number] = None, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, ) -> None: """Type checking stubs""" pass def _typecheckingstub__c1898d55daa2df2622d1e2dbb38092aa575851625db59853fa03035e019f770f( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__de14ffd75e95c616fc9cc6e5d507531aa73606b455d7bcaca5a7a602a0b58bf5( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__b72c3a023b993fc8f5f1c01fa8e6a6cfd3afecb01447a79b5dcdb0f48628e887( value: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__1b86b6cc1ccc3bc1b3c98e240910d7389d460ea6fc4a510cab2b93398745be81( value: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__3fc6eb1198f57e2b477d5be3ecf09e09be7015c64225b8e517f9fcc239a41ab1( *, connection: typing.Optional[typing.Union[typing.Union[_cdktf_9a9027ec.SSHProvisionerConnection, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.WinrmProvisionerConnection, typing.Dict[builtins.str, typing.Any]]]] = None, count: typing.Optional[typing.Union[jsii.Number, _cdktf_9a9027ec.TerraformCount]] = None, depends_on: typing.Optional[typing.Sequence[_cdktf_9a9027ec.ITerraformDependable]] = None, for_each: typing.Optional[_cdktf_9a9027ec.ITerraformIterator] = None, lifecycle: typing.Optional[typing.Union[_cdktf_9a9027ec.TerraformResourceLifecycle, typing.Dict[builtins.str, typing.Any]]] = None, provider: typing.Optional[_cdktf_9a9027ec.TerraformProvider] = None, provisioners: typing.Optional[typing.Sequence[typing.Union[typing.Union[_cdktf_9a9027ec.FileProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.LocalExecProvisioner, typing.Dict[builtins.str, typing.Any]], typing.Union[_cdktf_9a9027ec.RemoteExecProvisioner, typing.Dict[builtins.str, typing.Any]]]]] = None, federation_settings_id: builtins.str, id: typing.Optional[builtins.str] = None, items_per_page: typing.Optional[jsii.Number] = None, page_num: typing.Optional[jsii.Number] = None, ) -> None: """Type checking stubs""" pass def _typecheckingstub__a539493ed49665514a6452a9414163563c4c98710ecc938f691e0d48b0d136c6( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__1582280f8b7fbeda19b9e6bb03ec459aba191849beb86e22ea40e83eabfb2758( index: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__858f88846ddfe44b704a088fc1b2a89cf78937d10032f5703ef747647a857e40( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__e137195153518adec7e83a237c15836b0b79a42322b2e7d8555d34e61a5a7ca4( value: _cdktf_9a9027ec.IInterpolatingParent, ) -> None: """Type checking stubs""" pass def _typecheckingstub__4dd4c882c3f0fc3327d5638cfbbeef2e9a94a46fa9aec048737a46c278444015( value: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__41e3e54eba397b7b296e34a1196c0902ab4478bb088c1b5e3988f257895b08ad( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__28f081d22641c03f849b3d8344bd83f9f816fad8c3c81f8b5a5430b474a68df1( value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResults], ) -> None: """Type checking stubs""" pass def _typecheckingstub__beb4c7b4d4d21e20d5a47b5e23487540c84c5ec5b19990ab1f24db4ca1dcf01e( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__2f184a3d86f6b4eb3569125524a195547df904074b35516613c73f32294ec397( index: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__2681fcc4dda5d91c774774f220b271ee2b3b7003dceceb20a9a97d9b63f70ebd( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__5684daf77a5251ef3cb44747f735a03b222adfe403c08b771110b52010808dcb( value: _cdktf_9a9027ec.IInterpolatingParent, ) -> None: """Type checking stubs""" pass def _typecheckingstub__c47af14a9ee596177ff148a1fb810ee50442ad376aa50b757abc6fd4277331a6( value: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__ffa673c5f3f597fe22a11b87c5f7de3a8d0eecd8451fbd94b738543dcfe2fb3c( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__1ca526e6dfde7c84fc15596130af02fff507520e8858163cfa0d5e52f45dda99( value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappings], ) -> None: """Type checking stubs""" pass def _typecheckingstub__61c5badcd49ef9610f4f966bee6fe3cbd0f6a21aeba6ac8dd1f62958b22cc037( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__0adbdf0ccadf81be06817f1754b01ca52d5e190b469fc543c21d8fce46932f0c( index: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__bf012923023227beea8455b87734ba13518fc7fc2603908b535c7fb8028e5ab6( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__f697945603f3692bfeb1f2300c61689407f7972d2433c0843e96276e7f14493e( value: _cdktf_9a9027ec.IInterpolatingParent, ) -> None: """Type checking stubs""" pass def _typecheckingstub__cc76aa768e02d5bceff95cb2a404b52d9c4c63068218c9166fdfea1b9411156d( value: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__99897f7ed7f3aa5bddfc794bbbb62c4d8b8d972f28725f92ee4dc2ff694c2b5a( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__6d851664d76445303b34ee0f82e885cd4adbed59ee8cb3a2fae74cc3c87b6e2e( value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsRoleMappingsRoleAssignments], ) -> None: """Type checking stubs""" pass def _typecheckingstub__cd31073a6a6074a59887169e7a903b7ab40261defadd6503f4c1c69d9dc5dc45( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, wraps_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__82970c2deb30bb9d3c8cbfb69ba2f1fdff36182ae28c41b59de7f9e99336645e( index: jsii.Number, ) -> None: """Type checking stubs""" pass def _typecheckingstub__5e484e7e9369b2117948fbed122c74f55d5c4e10f5cb45d748307f9d23a4c29f( value: builtins.str, ) -> None: """Type checking stubs""" pass def _typecheckingstub__17011a552de88803e5934a56f5aad8135b029721757069701472a07258d23b6d( value: _cdktf_9a9027ec.IInterpolatingParent, ) -> None: """Type checking stubs""" pass def _typecheckingstub__6bcd6641f32ad34fb70a47e2b3bacc525269adfe710ea6e21fb2d51e080c15d3( value: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__4d8e1faf0bd5fa114cdd072da1133d2b79037b9241a20ae0cebf55f5c476489f( terraform_resource: _cdktf_9a9027ec.IInterpolatingParent, terraform_attribute: builtins.str, complex_object_index: jsii.Number, complex_object_is_from_set: builtins.bool, ) -> None: """Type checking stubs""" pass def _typecheckingstub__b6e284da5561df0326ba76701efd24b6ff50dcfc3a568691c0c284dd0cbaf90e( value: typing.Optional[DataMongodbatlasFederatedSettingsOrgConfigsResultsUserConflicts], ) -> None: """Type checking stubs""" pass
PypiClean
/peek-storage-service-3.4.11.tar.gz/peek-storage-service-3.4.11/peek_storage_service/_private/alembic/objects/object_run_generic_python.py
sql = """ DROP FUNCTION IF EXISTS peek_storage_service.run_generic_python(character varying, character varying, character varying, character varying); CREATE OR REPLACE FUNCTION peek_storage_service.run_generic_python( args_tuple_json_str character varying, class_method_to_run_str_ character varying, class_method_to_import_tuples_ character varying, python_path character varying) RETURNS character varying LANGUAGE 'plpython3u' COST 100 VOLATILE AS $BODY$ import json from base64 import b64decode argsTupleJsonStr = args_tuple_json_str classMethodToRunStr = class_method_to_run_str_ classMethodToImportTuplesStr = class_method_to_import_tuples_ pythonPath = json.loads(python_path) # --------------- # Setup to use the virtual environment import sys sys.path.extend(pythonPath) from importlib.util import find_spec, module_from_spec # --------------- # Dynamically load the import tuple method if classMethodToImportTuplesStr and classMethodToImportTuplesStr != 'None': modName, className, methodName = classMethodToImportTuplesStr.rsplit('.',2) if modName in sys.modules: package_ = sys.modules[modName] else: modSpec = find_spec(modName) if not modSpec: raise Exception("Failed to find package %s," " is the python package installed?" % modName) package_ = modSpec.loader.load_module() Class_ = getattr(package_, className) importTupleMethod = getattr(Class_, methodName) importTupleMethod() # --------------- # Dynamically load the tuple create method modName, className, methodName = classMethodToRunStr.rsplit('.',2) if modName in sys.modules: package_ = sys.modules[modName] else: modSpec = find_spec(modName) if not modSpec: raise Exception("Failed to find package %s," " is the python package installed?" % modName) package_ = modSpec.loader.load_module() Class_ = getattr(package_, className) classMethodToRun = getattr(Class_, methodName) # --------------- # Load the arguments from peek_storage_service.plpython.RunPyInPg import _RunPyInPgResultTuple, _RunPyInPgArgTuple argsTuple = _RunPyInPgArgTuple()._fromJson(argsTupleJsonStr) # --------------- # Run the method result = classMethodToRun(plpy, *argsTuple.args, **argsTuple.kwargs) # --------------- # Return the result return _RunPyInPgResultTuple(result=result)._toJson() $BODY$; ALTER FUNCTION peek_storage_service.run_generic_python(character varying, character varying, character varying, character varying) OWNER TO peek; """
PypiClean
/portmod-2.6.2.tar.gz/portmod-2.6.2/test/_benchmarks/hash.py
import pytest from portmod.globals import env from portmodlib._deprecated.vfs import find_file from portmodlib.portmod import _get_hash def canimport(name: str) -> bool: """Returns true if the given module can be imported""" try: __import__(name) return True except ModuleNotFoundError: return False @pytest.mark.skipif( not canimport("pytest_benchmark"), reason="requires pytest-benchmark", ) @pytest.mark.parametrize("alg", ["BLAKE2B", "MD5", "SHA512", "BLAKE3"]) @pytest.mark.parametrize( "buffer", [16 * 1024, 65536, 5 * 1024 * 1024, 512 * 1024 * 1024] ) @pytest.mark.parametrize("file", ["TR_Data.bsa", "Quill of Feyfolken.omwaddon"]) def test_get_hash_rust(benchmark, alg, buffer, file): """Test the speed of loading Manifest files Note: requires an openmw configuration and a prefix named openmw """ env.set_prefix("openmw") file = find_file(file) def test(): _get_hash(file, [alg], buffer) benchmark(test) @pytest.mark.skipif( not canimport("pytest_benchmark"), reason="requires pytest-benchmark", ) @pytest.mark.parametrize( "buffer", [16 * 1024, 65536, 5 * 1024 * 1024, 512 * 1024 * 1024] ) @pytest.mark.parametrize("file", ["TR_Data.bsa", "Quill of Feyfolken.omwaddon"]) def test_get_hash_rust_multiple(benchmark, buffer, file): """Test the speed of loading Manifest files Note: requires an openmw configuration and a prefix named openmw """ env.set_prefix("openmw") file = find_file(file) def test(): _get_hash(file, ["BLAKE2B", "MD5", "SHA512", "BLAKE3"], buffer) benchmark(test) @pytest.mark.skipif( not canimport("pytest_benchmark"), reason="requires pytest-benchmark", ) @pytest.mark.parametrize( "buffer", [1024 * elem for elem in range(1, 10)] + [1024 * 1024 * elem for elem in range(1, 10)] + [1024 * 1024 * 10 * elem for elem in range(1, 10)], ) @pytest.mark.parametrize("file", ["TR_Data.bsa", "Quill of Feyfolken.omwaddon"]) def test_get_hash_rust_blake3(benchmark, buffer, file): """Test the speed of loading Manifest files Note: requires an openmw configuration and a prefix named openmw """ env.set_prefix("openmw") file = find_file(file) def test(): _get_hash(file, ["BLAKE3"], buffer) benchmark(test)
PypiClean
/odoo12_addon_repair_discount-12.0.1.0.1-py3-none-any.whl/odoo/addons/repair_discount/models/mrp_repair.py
from odoo import api, fields, models import odoo.addons.decimal_precision as dp class RepairFee(models.Model): _inherit = 'repair.fee' @api.depends( 'invoiced', 'price_unit', 'repair_id', 'product_uom_qty', 'product_id') def _compute_price_subtotal(self): for record in self: taxes = self.env['account.tax'].compute_all( record.price_unit, record.repair_id.pricelist_id.currency_id, record.product_uom_qty, record.product_id, record.repair_id.partner_id ) record.price_subtotal = ( taxes['total_excluded'] * (1 - (record.discount or 0.0) / 100.0) ) discount = fields.Float(string='Discount (%)') price_subtotal = fields.Float( 'Subtotal', compute='_compute_price_subtotal', digits=dp.get_precision('Account')) class RepairLine(models.Model): _inherit = 'repair.line' @api.depends( 'invoiced', 'price_unit', 'repair_id', 'product_uom_qty', 'product_id') def _compute_price_subtotal(self): for repair_line in self: taxes = self.env['account.tax'].compute_all( repair_line.price_unit, repair_line.repair_id.pricelist_id.currency_id, repair_line.product_uom_qty, repair_line.product_id, repair_line.repair_id.partner_id ) repair_line.price_subtotal = ( taxes['total_excluded'] * (1 - (repair_line.discount or 0.0) / 100.0) ) discount = fields.Float(string='Discount (%)') price_subtotal = fields.Float( 'Subtotal', compute='_compute_price_subtotal', digits=dp.get_precision('Account')) class RepairOrder(models.Model): _inherit = 'repair.order' @api.multi def action_invoice_create(self, group=False): res = super(RepairOrder, self).action_invoice_create(group) for repair in self.filtered( lambda _repair: _repair.invoice_method != 'none' ): operations = repair.operations fees_lines = repair.fees_lines for op in operations.filtered( lambda item: item.invoice_line_id ): op.invoice_line_id.discount = op.discount if operations: repair.invoice_id.compute_taxes() for fee_lines in fees_lines.filtered( lambda item: item.invoice_line_id ): fee_lines.invoice_line_id.discount = fee_lines.discount if fees_lines: repair.invoice_id.compute_taxes() return res def _calculate_line_base_price(self, line): return line.price_unit * (1 - (line.discount or 0.0) / 100.0) @api.depends('operations', 'fees_lines', 'operations.invoiced', 'fees_lines.invoiced') def _amount_tax(self): for repair in self: taxed_amount = 0.0 currency = repair.pricelist_id.currency_id for line in repair.operations: tax_calculate = line.tax_id.compute_all( self._calculate_line_base_price(line), self.pricelist_id.currency_id, line.product_uom_qty, line.product_id, repair.partner_id ) for c in tax_calculate['taxes']: taxed_amount += c['amount'] for line in repair.fees_lines: tax_calculate = line.tax_id.compute_all( self._calculate_line_base_price(line), self.pricelist_id.currency_id, line.product_uom_qty, line.product_id, repair.partner_id) for c in tax_calculate['taxes']: taxed_amount += c['amount'] repair.amount_tax = currency.round(taxed_amount)
PypiClean
/h2o_pysparkling_3.2-3.42.0.2.post1.tar.gz/h2o_pysparkling_3.2-3.42.0.2.post1/h2o/estimators/kmeans.py
from h2o.estimators.estimator_base import H2OEstimator from h2o.exceptions import H2OValueError from h2o.frame import H2OFrame from h2o.utils.typechecks import assert_is_type, Enum, numeric class H2OKMeansEstimator(H2OEstimator): """ K-means Performs k-means clustering on an H2O dataset. """ algo = "kmeans" supervised_learning = False def __init__(self, model_id=None, # type: Optional[Union[None, str, H2OEstimator]] training_frame=None, # type: Optional[Union[None, str, H2OFrame]] validation_frame=None, # type: Optional[Union[None, str, H2OFrame]] nfolds=0, # type: int keep_cross_validation_models=True, # type: bool keep_cross_validation_predictions=False, # type: bool keep_cross_validation_fold_assignment=False, # type: bool fold_assignment="auto", # type: Literal["auto", "random", "modulo", "stratified"] fold_column=None, # type: Optional[str] ignored_columns=None, # type: Optional[List[str]] ignore_const_cols=True, # type: bool score_each_iteration=False, # type: bool k=1, # type: int estimate_k=False, # type: bool user_points=None, # type: Optional[Union[None, str, H2OFrame]] max_iterations=10, # type: int standardize=True, # type: bool seed=-1, # type: int init="furthest", # type: Literal["random", "plus_plus", "furthest", "user"] max_runtime_secs=0.0, # type: float categorical_encoding="auto", # type: Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"] export_checkpoints_dir=None, # type: Optional[str] cluster_size_constraints=None, # type: Optional[List[int]] ): """ :param model_id: Destination id for this model; auto-generated if not specified. Defaults to ``None``. :type model_id: Union[None, str, H2OEstimator], optional :param training_frame: Id of the training data frame. Defaults to ``None``. :type training_frame: Union[None, str, H2OFrame], optional :param validation_frame: Id of the validation data frame. Defaults to ``None``. :type validation_frame: Union[None, str, H2OFrame], optional :param nfolds: Number of folds for K-fold cross-validation (0 to disable or >= 2). Defaults to ``0``. :type nfolds: int :param keep_cross_validation_models: Whether to keep the cross-validation models. Defaults to ``True``. :type keep_cross_validation_models: bool :param keep_cross_validation_predictions: Whether to keep the predictions of the cross-validation models. Defaults to ``False``. :type keep_cross_validation_predictions: bool :param keep_cross_validation_fold_assignment: Whether to keep the cross-validation fold assignment. Defaults to ``False``. :type keep_cross_validation_fold_assignment: bool :param fold_assignment: Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify the folds based on the response variable, for classification problems. Defaults to ``"auto"``. :type fold_assignment: Literal["auto", "random", "modulo", "stratified"] :param fold_column: Column with cross-validation fold index assignment per observation. Defaults to ``None``. :type fold_column: str, optional :param ignored_columns: Names of columns to ignore for training. Defaults to ``None``. :type ignored_columns: List[str], optional :param ignore_const_cols: Ignore constant columns. Defaults to ``True``. :type ignore_const_cols: bool :param score_each_iteration: Whether to score during each iteration of model training. Defaults to ``False``. :type score_each_iteration: bool :param k: The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it will find up to k centroids. Defaults to ``1``. :type k: int :param estimate_k: Whether to estimate the number of clusters (<=k) iteratively and deterministically. Defaults to ``False``. :type estimate_k: bool :param user_points: This option allows you to specify a dataframe, where each row represents an initial cluster center. The user-specified points must have the same number of columns as the training observations. The number of rows must equal the number of clusters Defaults to ``None``. :type user_points: Union[None, str, H2OFrame], optional :param max_iterations: Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration) Defaults to ``10``. :type max_iterations: int :param standardize: Standardize columns before computing distances Defaults to ``True``. :type standardize: bool :param seed: RNG Seed Defaults to ``-1``. :type seed: int :param init: Initialization mode Defaults to ``"furthest"``. :type init: Literal["random", "plus_plus", "furthest", "user"] :param max_runtime_secs: Maximum allowed runtime in seconds for model training. Use 0 to disable. Defaults to ``0.0``. :type max_runtime_secs: float :param categorical_encoding: Encoding scheme for categorical features Defaults to ``"auto"``. :type categorical_encoding: Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"] :param export_checkpoints_dir: Automatically export generated models to this directory. Defaults to ``None``. :type export_checkpoints_dir: str, optional :param cluster_size_constraints: An array specifying the minimum number of points that should be in each cluster. The length of the constraints array has to be the same as the number of clusters. Defaults to ``None``. :type cluster_size_constraints: List[int], optional """ super(H2OKMeansEstimator, self).__init__() self._parms = {} self._id = self._parms['model_id'] = model_id self.training_frame = training_frame self.validation_frame = validation_frame self.nfolds = nfolds self.keep_cross_validation_models = keep_cross_validation_models self.keep_cross_validation_predictions = keep_cross_validation_predictions self.keep_cross_validation_fold_assignment = keep_cross_validation_fold_assignment self.fold_assignment = fold_assignment self.fold_column = fold_column self.ignored_columns = ignored_columns self.ignore_const_cols = ignore_const_cols self.score_each_iteration = score_each_iteration self.k = k self.estimate_k = estimate_k self.user_points = user_points self.max_iterations = max_iterations self.standardize = standardize self.seed = seed self.init = init self.max_runtime_secs = max_runtime_secs self.categorical_encoding = categorical_encoding self.export_checkpoints_dir = export_checkpoints_dir self.cluster_size_constraints = cluster_size_constraints @property def training_frame(self): """ Id of the training data frame. Type: ``Union[None, str, H2OFrame]``. :examples: >>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv") >>> predictors = ["AGE", "RACE", "DPROS", "DCAPS", ... "PSA", "VOL", "GLEASON"] >>> train, valid = prostate.split_frame(ratios=[.8], seed=1234) >>> pros_km = H2OKMeansEstimator(seed=1234) >>> pros_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_km.scoring_history() """ return self._parms.get("training_frame") @training_frame.setter def training_frame(self, training_frame): self._parms["training_frame"] = H2OFrame._validate(training_frame, 'training_frame') @property def validation_frame(self): """ Id of the validation data frame. Type: ``Union[None, str, H2OFrame]``. :examples: >>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv") >>> predictors = ["AGE", "RACE", "DPROS", "DCAPS", ... "PSA", "VOL", "GLEASON"] >>> train, valid = prostate.split_frame(ratios=[.8], seed=1234) >>> pros_km = H2OKMeansEstimator(seed=1234) >>> pros_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_km.scoring_history() """ return self._parms.get("validation_frame") @validation_frame.setter def validation_frame(self, validation_frame): self._parms["validation_frame"] = H2OFrame._validate(validation_frame, 'validation_frame') @property def nfolds(self): """ Number of folds for K-fold cross-validation (0 to disable or >= 2). Type: ``int``, defaults to ``0``. :examples: >>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv") >>> predictors = ["AGMT","FNDX","HIGD","DEG","CHK", ... "AGP1","AGMN","LIV","AGLP"] >>> train, valid = benign.split_frame(ratios=[.8], seed=1234) >>> benign_km = H2OKMeansEstimator(nfolds=5, seed=1234) >>> benign_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> benign_km.scoring_history() """ return self._parms.get("nfolds") @nfolds.setter def nfolds(self, nfolds): assert_is_type(nfolds, None, int) self._parms["nfolds"] = nfolds @property def keep_cross_validation_models(self): """ Whether to keep the cross-validation models. Type: ``bool``, defaults to ``True``. :examples: >>> ozone = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/ozone.csv") >>> predictors = ["radiation","temperature","wind"] >>> train, valid = ozone.split_frame(ratios=[.8], seed=1234) >>> ozone_km = H2OKMeansEstimator(keep_cross_validation_models=True, ... nfolds=5, ... seed=1234) >>> ozone_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> ozone_km.scoring_history() """ return self._parms.get("keep_cross_validation_models") @keep_cross_validation_models.setter def keep_cross_validation_models(self, keep_cross_validation_models): assert_is_type(keep_cross_validation_models, None, bool) self._parms["keep_cross_validation_models"] = keep_cross_validation_models @property def keep_cross_validation_predictions(self): """ Whether to keep the predictions of the cross-validation models. Type: ``bool``, defaults to ``False``. :examples: >>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv") >>> predictors = ["AGE", "RACE", "DPROS", "DCAPS", ... "PSA", "VOL", "GLEASON"] >>> train, valid = prostate.split_frame(ratios=[.8], seed=1234) >>> pros_km = H2OKMeansEstimator(keep_cross_validation_predictions=True, ... nfolds=5, ... seed=1234) >>> pros_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_km.scoring_history() """ return self._parms.get("keep_cross_validation_predictions") @keep_cross_validation_predictions.setter def keep_cross_validation_predictions(self, keep_cross_validation_predictions): assert_is_type(keep_cross_validation_predictions, None, bool) self._parms["keep_cross_validation_predictions"] = keep_cross_validation_predictions @property def keep_cross_validation_fold_assignment(self): """ Whether to keep the cross-validation fold assignment. Type: ``bool``, defaults to ``False``. :examples: >>> ozone = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/ozone.csv") >>> predictors = ["radiation","temperature","wind"] >>> train, valid = ozone.split_frame(ratios=[.8], seed=1234) >>> ozone_km = H2OKMeansEstimator(keep_cross_validation_fold_assignment=True, ... nfolds=5, ... seed=1234) >>> ozone_km.train(x=predictors, ... training_frame=train) >>> ozone_km.scoring_history() """ return self._parms.get("keep_cross_validation_fold_assignment") @keep_cross_validation_fold_assignment.setter def keep_cross_validation_fold_assignment(self, keep_cross_validation_fold_assignment): assert_is_type(keep_cross_validation_fold_assignment, None, bool) self._parms["keep_cross_validation_fold_assignment"] = keep_cross_validation_fold_assignment @property def fold_assignment(self): """ Cross-validation fold assignment scheme, if fold_column is not specified. The 'Stratified' option will stratify the folds based on the response variable, for classification problems. Type: ``Literal["auto", "random", "modulo", "stratified"]``, defaults to ``"auto"``. :examples: >>> ozone = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/glm_test/ozone.csv") >>> predictors = ["radiation","temperature","wind"] >>> train, valid = ozone.split_frame(ratios=[.8], seed=1234) >>> ozone_km = H2OKMeansEstimator(fold_assignment="Random", ... nfolds=5, ... seed=1234) >>> ozone_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> ozone_km.scoring_history() """ return self._parms.get("fold_assignment") @fold_assignment.setter def fold_assignment(self, fold_assignment): assert_is_type(fold_assignment, None, Enum("auto", "random", "modulo", "stratified")) self._parms["fold_assignment"] = fold_assignment @property def fold_column(self): """ Column with cross-validation fold index assignment per observation. Type: ``str``. :examples: >>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv") >>> predictors = ["displacement","power","weight","acceleration","year"] >>> fold_numbers = cars.kfold_column(n_folds=5, seed=1234) >>> fold_numbers.set_names(["fold_numbers"]) >>> cars = cars.cbind(fold_numbers) >>> print(cars['fold_numbers']) >>> cars_km = H2OKMeansEstimator(seed=1234) >>> cars_km.train(x=predictors, ... training_frame=cars, ... fold_column="fold_numbers") >>> cars_km.scoring_history() """ return self._parms.get("fold_column") @fold_column.setter def fold_column(self, fold_column): assert_is_type(fold_column, None, str) self._parms["fold_column"] = fold_column @property def ignored_columns(self): """ Names of columns to ignore for training. Type: ``List[str]``. """ return self._parms.get("ignored_columns") @ignored_columns.setter def ignored_columns(self, ignored_columns): assert_is_type(ignored_columns, None, [str]) self._parms["ignored_columns"] = ignored_columns @property def ignore_const_cols(self): """ Ignore constant columns. Type: ``bool``, defaults to ``True``. :examples: >>> cars = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/junit/cars_20mpg.csv") >>> predictors = ["displacement","power","weight","acceleration","year"] >>> cars["const_1"] = 6 >>> cars["const_2"] = 7 >>> train, valid = cars.split_frame(ratios=[.8], seed=1234) >>> cars_km = H2OKMeansEstimator(ignore_const_cols=True, ... seed=1234) >>> cars_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> cars_km.scoring_history() """ return self._parms.get("ignore_const_cols") @ignore_const_cols.setter def ignore_const_cols(self, ignore_const_cols): assert_is_type(ignore_const_cols, None, bool) self._parms["ignore_const_cols"] = ignore_const_cols @property def score_each_iteration(self): """ Whether to score during each iteration of model training. Type: ``bool``, defaults to ``False``. :examples: >>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv") >>> predictors = ["AGMT","FNDX","HIGD","DEG","CHK", ... "AGP1","AGMN","LIV","AGLP"] >>> train, valid = benign.split_frame(ratios=[.8], seed=1234) >>> benign_km = H2OKMeansEstimator(score_each_iteration=True, ... seed=1234) >>> benign_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> benign_km.scoring_history() """ return self._parms.get("score_each_iteration") @score_each_iteration.setter def score_each_iteration(self, score_each_iteration): assert_is_type(score_each_iteration, None, bool) self._parms["score_each_iteration"] = score_each_iteration @property def k(self): """ The max. number of clusters. If estimate_k is disabled, the model will find k centroids, otherwise it will find up to k centroids. Type: ``int``, defaults to ``1``. :examples: >>> seeds = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/flow_examples/seeds_dataset.txt") >>> predictors = seeds.columns[0:7] >>> train, valid = seeds.split_frame(ratios=[.8], seed=1234) >>> seeds_km = H2OKMeansEstimator(k=3, seed=1234) >>> seeds_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> seeds_km.scoring_history() """ return self._parms.get("k") @k.setter def k(self, k): assert_is_type(k, None, int) self._parms["k"] = k @property def estimate_k(self): """ Whether to estimate the number of clusters (<=k) iteratively and deterministically. Type: ``bool``, defaults to ``False``. :examples: >>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv") >>> iris['class'] = iris['class'].asfactor() >>> predictors = iris.columns[:-1] >>> train, valid = iris.split_frame(ratios=[.8], seed=1234) >>> iris_kmeans = H2OKMeansEstimator(k=10, ... estimate_k=True, ... standardize=False, ... seed=1234) >>> iris_kmeans.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> iris_kmeans.scoring_history() """ return self._parms.get("estimate_k") @estimate_k.setter def estimate_k(self, estimate_k): assert_is_type(estimate_k, None, bool) self._parms["estimate_k"] = estimate_k @property def user_points(self): """ This option allows you to specify a dataframe, where each row represents an initial cluster center. The user- specified points must have the same number of columns as the training observations. The number of rows must equal the number of clusters Type: ``Union[None, str, H2OFrame]``. :examples: >>> iris = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris_wheader.csv") >>> iris['class'] = iris['class'].asfactor() >>> predictors = iris.columns[:-1] >>> train, valid = iris.split_frame(ratios=[.8], seed=1234) >>> point1 = [4.9,3.0,1.4,0.2] >>> point2 = [5.6,2.5,3.9,1.1] >>> point3 = [6.5,3.0,5.2,2.0] >>> points = h2o.H2OFrame([point1, point2, point3]) >>> iris_km = H2OKMeansEstimator(k=3, ... user_points=points, ... seed=1234) >>> iris_km.train(x=predictors, ... training_frame=iris, ... validation_frame=valid) >>> iris_kmeans.tot_withinss(valid=True) """ return self._parms.get("user_points") @user_points.setter def user_points(self, user_points): self._parms["user_points"] = H2OFrame._validate(user_points, 'user_points') @property def max_iterations(self): """ Maximum training iterations (if estimate_k is enabled, then this is for each inner Lloyds iteration) Type: ``int``, defaults to ``10``. :examples: >>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv") >>> predictors = ["AGMT","FNDX","HIGD","DEG","CHK", ... "AGP1","AGMN","LIV","AGLP"] >>> train, valid = benign.split_frame(ratios=[.8], seed=1234) >>> benign_km = H2OKMeansEstimator(max_iterations=50) >>> benign_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> benign_km.scoring_history() """ return self._parms.get("max_iterations") @max_iterations.setter def max_iterations(self, max_iterations): assert_is_type(max_iterations, None, int) self._parms["max_iterations"] = max_iterations @property def standardize(self): """ Standardize columns before computing distances Type: ``bool``, defaults to ``True``. :examples: >>> boston = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/gbm_test/BostonHousing.csv") >>> predictors = boston.columns[:-1] >>> boston['chas'] = boston['chas'].asfactor() >>> train, valid = boston.split_frame(ratios=[.8]) >>> boston_km = H2OKMeansEstimator(standardize=True) >>> boston_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> boston_km.scoring_history() """ return self._parms.get("standardize") @standardize.setter def standardize(self, standardize): assert_is_type(standardize, None, bool) self._parms["standardize"] = standardize @property def seed(self): """ RNG Seed Type: ``int``, defaults to ``-1``. :examples: >>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv") >>> predictors = ["AGE", "RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"] >>> train, valid = prostate.split_frame(ratios=[.8], seed=1234) >>> pros_w_seed = H2OKMeansEstimator(seed=1234) >>> pros_w_seed.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_wo_seed = H2OKMeansEstimator() >>> pros_wo_seed.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_w_seed.scoring_history() >>> pros_wo_seed.scoring_history() """ return self._parms.get("seed") @seed.setter def seed(self, seed): assert_is_type(seed, None, int) self._parms["seed"] = seed @property def init(self): """ Initialization mode Type: ``Literal["random", "plus_plus", "furthest", "user"]``, defaults to ``"furthest"``. :examples: >>> seeds = h2o.import_file("https://s3.amazonaws.com/h2o-public-test-data/smalldata/flow_examples/seeds_dataset.txt") >>> predictors = seeds.columns[0:7] >>> train, valid = seeds.split_frame(ratios=[.8], seed=1234) >>> seeds_km = H2OKMeansEstimator(k=3, ... init='Furthest', ... seed=1234) >>> seeds_km.train(x=predictors, ... training_frame=train, ... validation_frame= valid) >>> seeds_km.scoring_history() """ return self._parms.get("init") @init.setter def init(self, init): assert_is_type(init, None, Enum("random", "plus_plus", "furthest", "user")) self._parms["init"] = init @property def max_runtime_secs(self): """ Maximum allowed runtime in seconds for model training. Use 0 to disable. Type: ``float``, defaults to ``0.0``. :examples: >>> benign = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/logreg/benign.csv") >>> predictors = ["AGMT","FNDX","HIGD","DEG","CHK", ... "AGP1","AGMN","LIV","AGLP"] >>> train, valid = benign.split_frame(ratios=[.8], seed=1234) >>> benign_km = H2OKMeansEstimator(max_runtime_secs=10, ... seed=1234) >>> benign_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> benign_km.scoring_history() """ return self._parms.get("max_runtime_secs") @max_runtime_secs.setter def max_runtime_secs(self, max_runtime_secs): assert_is_type(max_runtime_secs, None, numeric) self._parms["max_runtime_secs"] = max_runtime_secs @property def categorical_encoding(self): """ Encoding scheme for categorical features Type: ``Literal["auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited"]``, defaults to ``"auto"``. :examples: >>> prostate = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/prostate/prostate.csv") >>> predictors = ["AGE", "RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON"] >>> train, valid = prostate.split_frame(ratios=[.8], seed=1234) >>> encoding = "one_hot_explicit" >>> pros_km = H2OKMeansEstimator(categorical_encoding=encoding, ... seed=1234) >>> pros_km.train(x=predictors, ... training_frame=train, ... validation_frame=valid) >>> pros_km.scoring_history() """ return self._parms.get("categorical_encoding") @categorical_encoding.setter def categorical_encoding(self, categorical_encoding): assert_is_type(categorical_encoding, None, Enum("auto", "enum", "one_hot_internal", "one_hot_explicit", "binary", "eigen", "label_encoder", "sort_by_response", "enum_limited")) self._parms["categorical_encoding"] = categorical_encoding @property def export_checkpoints_dir(self): """ Automatically export generated models to this directory. Type: ``str``. :examples: >>> import tempfile >>> from os import listdir >>> airlines = h2o.import_file("http://s3.amazonaws.com/h2o-public-test-data/smalldata/airlines/allyears2k_headers.zip", destination_frame="air.hex") >>> predictors = ["DayofMonth", "DayOfWeek"] >>> checkpoints_dir = tempfile.mkdtemp() >>> air_km = H2OKMeansEstimator(export_checkpoints_dir=checkpoints_dir, ... seed=1234) >>> air_km.train(x=predictors, training_frame=airlines) >>> len(listdir(checkpoints_dir)) """ return self._parms.get("export_checkpoints_dir") @export_checkpoints_dir.setter def export_checkpoints_dir(self, export_checkpoints_dir): assert_is_type(export_checkpoints_dir, None, str) self._parms["export_checkpoints_dir"] = export_checkpoints_dir @property def cluster_size_constraints(self): """ An array specifying the minimum number of points that should be in each cluster. The length of the constraints array has to be the same as the number of clusters. Type: ``List[int]``. :examples: >>> iris_h2o = h2o.import_file("http://h2o-public-test-data.s3.amazonaws.com/smalldata/iris/iris.csv") >>> k=3 >>> start_points = h2o.H2OFrame( ... [[4.9, 3.0, 1.4, 0.2], ... [5.6, 2.5, 3.9, 1.1], ... [6.5, 3.0, 5.2, 2.0]]) >>> kmm = H2OKMeansEstimator(k=k, ... user_points=start_points, ... standardize=True, ... cluster_size_constraints=[2, 5, 8], ... score_each_iteration=True) >>> kmm.train(x=list(range(7)), training_frame=iris_h2o) >>> kmm.scoring_history() """ return self._parms.get("cluster_size_constraints") @cluster_size_constraints.setter def cluster_size_constraints(self, cluster_size_constraints): assert_is_type(cluster_size_constraints, None, [int]) self._parms["cluster_size_constraints"] = cluster_size_constraints
PypiClean
/graph_learn-1.2.0a1-cp37-cp37m-manylinux_2_24_x86_64.whl/graphlearn/python/nn/tf/model/ego_gnn.py
from __future__ import absolute_import from __future__ import division from __future__ import print_function """EgoGraph based GNN model.""" try: # https://www.tensorflow.org/guide/migrate import tensorflow.compat.v1 as tf tf.disable_v2_behavior() except ImportError: import tensorflow as tf from graphlearn.python.nn.tf.config import conf from graphlearn.python.nn.tf.module import Module class EgoGNN(Module): """ Represents `EgoGraph` based GNN models. Args: layers: A list, each element is an `EgoLayer`. bn_func: Batch normalization function for hidden layers' output. Default is None, which means batch normalization will not be performed. act_func: Activation function for hidden layers' output. Default is tf.nn.relu. dropout: Dropout rate for hidden layers' output. Default is 0.0, which means dropout will not be performed. The optional value is a float. """ def __init__(self, layers, bn_func=None, act_func=tf.nn.relu, dropout=0.0, **kwargs): super(EgoGNN, self).__init__() self.layers = layers self.bn_func = bn_func self.active_func = act_func self.dropout = dropout def forward(self, graph): """ Update node embeddings through the given ego layers. h^{i} is a list, 0 <= i <= n, where n is len(layers). h^{i} = [ h_{0}^{i}, h_{1}^{i}, h_{2}^{i}, ... , h_{n - i}^{i} ] For 3 layers, we need nodes and 3-hop neighbors in the graph object. h^{0} = [ h_{0}^{0}, h_{1}^{0}, h_{2}^{0}, h_{3}^{0} ] h^{1} = [ h_{0}^{1}, h_{1}^{1}, h_{2}^{1} ] h^{2} = [ h_{0}^{2}, h_{1}^{2} ] h^{3} = [ h_{0}^{3} ] For initialization, h_{0}^{0} = graph.src h_{1}^{0} = graph.hop_node{1} h_{2}^{0} = graph.hop_node{2} h_{3}^{0} = graph.hop_node{3} Then we apply h^{i} = layer_{i}(h^{i-1}), and h_{0}^{3} is the final returned value. Args: graph: an `EgoGraph` object. Return: A tensor with shape [batch_size, output_dim], where `output_dim` is the same with layers[-1]. """ graph = graph.transform() # feature transformation of `EgoGrpah` # h^{0} h = [graph.src] for i in range(len(self.layers)): h.append(graph.hop_node(i)) hops = graph.nbr_nums for i in range(len(self.layers) - 1): # h^{i} current_hops = hops if i == 0 else hops[:-i] h = self.layers[i].forward(h, current_hops) H = [] for x in h: if self.bn_func is not None: x = self.bn_func(x) if self.active_func is not None: x = self.active_func(x) if self.dropout and conf.training: x = tf.nn.dropout(x, keep_prob=1-self.dropout) H.append(x) h = H # The last layer h = self.layers[-1].forward(h, [hops[0]]) assert len(h) == 1 return h[0]
PypiClean
/simoptlib-1.0.2.tar.gz/simoptlib-1.0.2/simopt/solvers/spsa.py
import numpy as np from ..base import Solver class SPSA(Solver): """ Simultaneous perturbation stochastic approximation (SPSA) is an algorithm for optimizing systems with multiple unknown parameters. Attributes ---------- name : string name of solver objective_type : string description of objective types: "single" or "multi" constraint_type : string description of constraints types: "unconstrained", "box", "deterministic", "stochastic" variable_type : string description of variable types: "discrete", "continuous", "mixed" gradient_needed : bool indicates if gradient of objective function is needed factors : dict changeable factors (i.e., parameters) of the solver specifications : dict details of each factor (for GUI, data validation, and defaults) rng_list : list of mrg32k3a.mrg32k3a.MRG32k3a objects list of RNGs used for the solver's internal purposes Parameters ---------- name : str user-specified name for solver fixed_factors : dict fixed_factors of the solver See also -------- base.Solver """ def __init__(self, name="SPSA", fixed_factors=None): if fixed_factors is None: fixed_factors = {} self.name = name self.objective_type = "single" self.constraint_type = "box" self.variable_type = "continuous" self.gradient_needed = False self.specifications = { "crn_across_solns": { "description": "use CRN across solutions?", "datatype": bool, "default": True }, "alpha": { "description": "non-negative coefficient in the SPSA gain sequecence ak", "datatype": float, "default": 0.602 }, "gamma": { "description": "non-negative coefficient in the SPSA gain sequence ck", "datatype": float, "default": 0.101 }, "step": { "description": "initial desired magnitude of change in the theta elements", "datatype": float, "default": 0.1 }, "gavg": { "description": "averaged SP gradients used per iteration", "datatype": int, "default": 1 }, "n_reps": { "description": "number of replications takes at each solution", "datatype": int, "default": 30 }, "n_loss": { "description": "number of loss function evaluations used in this gain calculation", "datatype": int, "default": 2 }, "eval_pct": { "description": "percentage of the expected number of loss evaluations per run", "datatype": float, "default": 2 / 3 }, "iter_pct": { "description": "percentage of the maximum expected number of iterations", "datatype": float, "default": 0.1 } } self.check_factor_list = { "alpha": self.check_alpha, "gamma": self.check_gamma, "step": self.check_step, "gavg": self.check_gavg, "n_reps": self.check_n_reps, "n_loss": self.check_n_loss, "eval_pct": self.check_eval_pct, "iter_pct": self.check_iter_pct } super().__init__(fixed_factors) def check_alpha(self): return self.factors["alpha"] > 0 def check_gamma(self): return self.factors["gamma"] > 0 def check_step(self): return self.factors["step"] > 0 def check_gavg(self): return self.factors["gavg"] > 0 def check_n_reps(self): return self.factors["n_reps"] > 0 def check_n_loss(self): return self.factors["n_loss"] > 0 def check_eval_pct(self): return 0 < self.factors["eval_pct"] <= 1 def check_iter_pct(self): return 0 < self.factors["iter_pct"] <= 1 def check_problem_factors(self): # Check divisibility for the for loop. return self.factors["n_loss"] % (2 * self.factors["gavg"]) == 0 def gen_simul_pert_vec(self, dim): """ Generate a new simulatanious pertubation vector with a 50/50 probability discrete distribution, with values of -1 and 1. The vector size is the problem's dimension. The vector components are independent from each other. Parameters ---------- dim : int Length of the vector. Returns ------- list Vector of -1's and 1's. """ SP_vect = self.rng_list[2].choices([-1, 1], [.5, .5], k=dim) return SP_vect def solve(self, problem): """ Run a single macroreplication of a solver on a problem. Parameters ---------- problem : Problem object simulation-optimization problem to solve crn_across_solns : bool indicates if CRN are used when simulating different solutions Returns ------- recommended_solns : list of Solution objects list of solutions recommended throughout the budget intermediate_budgets : list of ints list of intermediate budgets when recommended solutions changes """ recommended_solns = [] intermediate_budgets = [] expended_budget = 0 # problem.minmax = [int(i) for i in problem.minmax] # Start at initial solution and record as best. theta = problem.factors["initial_solution"] theta_sol = self.create_new_solution(tuple(theta), problem) recommended_solns.append(theta_sol) intermediate_budgets.append(expended_budget) # Simulate initial solution. problem.simulate(theta_sol, self.factors["n_reps"]) expended_budget = self.factors["n_reps"] # Determine initial value for the parameters c, a, and A (Aalg) (according to Section III.B of Spall (1998)). c = float(max((theta_sol.objectives_var / self.factors["gavg"]) ** 0.5, .0001)) # Calculating the maximum expected number of loss evaluations per run. nEvals = round((problem.factors["budget"] / self.factors["n_reps"]) * self.factors["eval_pct"]) Aalg = self.factors["iter_pct"] * nEvals / (2 * self.factors["gavg"]) gbar = np.zeros((1, problem.dim)) for _ in range(int(self.factors["n_loss"] / (2 * self.factors["gavg"]))): ghat = np.zeros((1, problem.dim)) for _ in range(self.factors["gavg"]): # Generate a random random direction (delta). delta = self.gen_simul_pert_vec(problem.dim) # Determine points forward/backward relative to random direction. thetaplus = np.add(theta, np.dot(c, delta)) thetaminus = np.subtract(theta, np.dot(c, delta)) thetaplus, step_weight_plus = check_cons(thetaplus, theta, problem.lower_bounds, problem.upper_bounds) thetaminus, step_weight_minus = check_cons(thetaminus, theta, problem.lower_bounds, problem.upper_bounds) thetaplus_sol = self.create_new_solution(tuple(thetaplus), problem) thetaminus_sol = self.create_new_solution(tuple(thetaminus), problem) # Evaluate two points and update budget spent. problem.simulate(thetaplus_sol, self.factors["n_reps"]) problem.simulate(thetaminus_sol, self.factors["n_reps"]) expended_budget += 2 * self.factors["n_reps"] # Estimate gradient. # (-minmax is needed to cast this as a minimization problem, # but is not essential here because of the absolute value taken.) ghat += np.dot(-1, problem.minmax) * np.divide((thetaplus_sol.objectives_mean - thetaminus_sol.objectives_mean) / ((step_weight_plus + step_weight_minus) * c), delta) gbar += np.abs(np.divide(ghat, self.factors["gavg"])) meangbar = np.mean(gbar) / (self.factors["n_loss"] / (2 * self.factors["gavg"])) a = self.factors["step"] * ((Aalg + 1) ** self.factors["alpha"]) / meangbar # Run the main algorithm. # Initiate iteration counter. k = 0 while expended_budget < problem.factors["budget"]: k += 1 # Calculate the gain sequences ak and ck. ak = a / (k + Aalg) ** self.factors["alpha"] ck = c / (k ** self.factors["gamma"]) # Generate random direction (delta). delta = self.gen_simul_pert_vec(problem.dim) # Determine points forward/backward relative to random direction. thetaplus = np.add(theta, np.dot(ck, delta)) thetaminus = np.subtract(theta, np.dot(ck, delta)) thetaplus, step_weight_plus = check_cons(thetaplus, theta, problem.lower_bounds, problem.upper_bounds) thetaminus, step_weight_minus = check_cons(thetaminus, theta, problem.lower_bounds, problem.upper_bounds) thetaplus_sol = self.create_new_solution(tuple(thetaplus), problem) thetaminus_sol = self.create_new_solution(tuple(thetaminus), problem) # Evaluate two points and update budget spent. problem.simulate(thetaplus_sol, self.factors["n_reps"]) problem.simulate(thetaminus_sol, self.factors["n_reps"]) expended_budget += 2 * self.factors["n_reps"] # Estimate current solution's objective funtion value by weighted average. ftheta = ((thetaplus_sol.objectives_mean * step_weight_minus) + (thetaminus_sol.objectives_mean * step_weight_plus)) / (step_weight_plus + step_weight_minus) # If on the first iteration, record the initial solution as best estimated objective. if k == 1: ftheta_best = ftheta # Check if new solution is better than the best recorded and update accordingly. if np.dot(-1, problem.minmax) * ftheta < np.dot(-1, problem.minmax) * ftheta_best: ftheta_best = ftheta # Record data from the new best solution. recommended_solns.append(theta_sol) intermediate_budgets.append(expended_budget) # Estimate gradient. (-minmax is needed to cast this as a minimization problem.) ghat = np.dot(-1, problem.minmax) * np.divide((thetaplus_sol.objectives_mean - thetaminus_sol.objectives_mean) / ((step_weight_plus + step_weight_minus) * c), delta) # Take step and check feasibility. theta_next = np.subtract(theta, np.dot(ak, ghat)) theta, _ = check_cons(theta_next, theta, problem.lower_bounds, problem.upper_bounds) theta_sol = self.create_new_solution(tuple(theta), problem) return recommended_solns, intermediate_budgets def check_cons(candidate_x, new_x, lower_bound, upper_bound): """Evaluates the distance from the new vector (candiate_x) compared to the current vector (new_x) respecting the vector's boundaries of feasibility. Returns the evaluated vector (modified_x) and the weight (t2 - how much of a full step took) of the new vector. The weight (t2) is used to calculate the weigthed average in the ftheta calculation.""" # The current step. stepV = np.subtract(candidate_x, new_x) # Form a matrix to determine the possible stepsize. tmaxV = np.ones((2, len(candidate_x))) for i in range(0, len(candidate_x)): if stepV[i] > 0: tmaxV[0, i] = (upper_bound[i] - new_x[i]) / stepV[i] elif stepV[i] < 0: tmaxV[1, i] = (lower_bound[i] - new_x[i]) / stepV[i] # Find the minimum stepsize. t2 = tmaxV.min() # Calculate the modified x. modified_x = new_x + t2 * stepV return modified_x, t2
PypiClean
/aliby_post-0.1.36-py3-none-any.whl/postprocessor/core/processes/bud_metric.py
from typing import Dict, Tuple import numpy as np import pandas as pd from agora.utils.lineage import mb_array_to_dict from postprocessor.core.processes.lineageprocess import ( LineageProcess, LineageProcessParameters, ) class bud_metricParameters(LineageProcessParameters): """ Parameters """ _defaults = {"lineage_location": "postprocessing/lineage_merged"} class bud_metric(LineageProcess): """ Requires mother-bud information to create a new dataframe where the indices are mother ids and values are the daughters' values for a given signal. """ def __init__(self, parameters: bud_metricParameters): super().__init__(parameters) def run( self, signal: pd.DataFrame, mother_bud_ids: Dict[pd.Index, Tuple[pd.Index]] = None, ): if mother_bud_ids is None: filtered_lineage = self.filter_signal_cells(signal) mother_bud_ids = mb_array_to_dict(filtered_lineage) return self.get_bud_metric(signal, mother_bud_ids) @staticmethod def get_bud_metric(signal: pd.DataFrame, md: Dict[Tuple, Tuple[Tuple]]): """ signal: Daughter-inclusive dataframe md: Mother-daughters dictionary where key is mother's index and value a list of daugher indices Get fvi (First Valid Index) for all cells Create empty matrix for every mother: - Get daughters' subdataframe - sort daughters by cell label - get series of fvis - concatenate the values of these ranges from the dataframe Fill the empty matrix Convert matrix into dataframe using mother indices """ mothers_mat = np.zeros((len(md), signal.shape[1])) for i, daughters in enumerate(md.values()): dau_vals = signal.loc[set(daughters)].droplevel("trap") sorted_da_ids = dau_vals.sort_index(level="cell_label") tp_fvt = sorted_da_ids.apply( lambda x: x.last_valid_index(), axis=0 ) tp_fvt = sorted_da_ids.index.get_indexer(tp_fvt) tp_fvt[tp_fvt < 0] = sorted_da_ids.shape[0] - 1 buds_metric = np.choose(tp_fvt, sorted_da_ids.values) # mothers_mat[i, tp_fvt[0] : tp_fvt[0] + len(buds_metric)] = buds_metric mothers_mat[i] = buds_metric df = pd.DataFrame(mothers_mat, index=md.keys(), columns=signal.columns) df.index.names = signal.index.names return df def load_lineage(self, lineage): """ Reshape the lineage information if needed """ self.lineage = lineage
PypiClean
/cohesity-sdk-1.1.0.tar.gz/cohesity-sdk-1.1.0/cohesity_sdk/cluster/model/recover_netapp_files_params.py
import re # noqa: F401 import sys # noqa: F401 from cohesity_sdk.cluster.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) def lazy_import(): from cohesity_sdk.cluster.model.netapp_recover_file_and_folder_info import NetappRecoverFileAndFolderInfo globals()['NetappRecoverFileAndFolderInfo'] = NetappRecoverFileAndFolderInfo class RecoverNetappFilesParams(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { ('target_environment',): { 'KELASTIFILE': "kElastifile", 'KFLASHBLADE': "kFlashBlade", 'KGENERICNAS': "kGenericNas", 'KGPFS': "kGPFS", 'KISILON': "kIsilon", 'KNETAPP': "kNetapp", }, } validations = { } additional_properties_type = None _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'files_and_folders': ([NetappRecoverFileAndFolderInfo], none_type,), # noqa: E501 'target_environment': (str,), # noqa: E501 'is_from_source_initiated_protection': (bool, none_type,), # noqa: E501 'netapp_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 'elastifile_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 'flashblade_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 'generic_nas_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 'gpfs_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 'isilon_target_params': ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'files_and_folders': 'filesAndFolders', # noqa: E501 'target_environment': 'targetEnvironment', # noqa: E501 'is_from_source_initiated_protection': 'isFromSourceInitiatedProtection', # noqa: E501 'netapp_target_params': 'netappTargetParams', # noqa: E501 'elastifile_target_params': 'elastifileTargetParams', # noqa: E501 'flashblade_target_params': 'flashbladeTargetParams', # noqa: E501 'generic_nas_target_params': 'genericNasTargetParams', # noqa: E501 'gpfs_target_params': 'gpfsTargetParams', # noqa: E501 'isilon_target_params': 'isilonTargetParams', # noqa: E501 } _composed_schemas = {} required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, files_and_folders, target_environment, *args, **kwargs): # noqa: E501 """RecoverNetappFilesParams - a model defined in OpenAPI Args: files_and_folders ([NetappRecoverFileAndFolderInfo], none_type): Specifies the list of info about the netapp files and folders to be recovered. target_environment (str): Specifies the environment of the recovery target. The corresponding params below must be filled out. Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) is_from_source_initiated_protection (bool, none_type): Specifies if the snapshot trying to recover is from a source initiated protection.. [optional] # noqa: E501 netapp_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for a Netapp recovery target.. [optional] # noqa: E501 elastifile_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for an Elastifile recovery target.. [optional] # noqa: E501 flashblade_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for a Flashblade recovery target.. [optional] # noqa: E501 generic_nas_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for a generic NAS recovery target.. [optional] # noqa: E501 gpfs_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for a GPFS recovery target.. [optional] # noqa: E501 isilon_target_params ({str: (bool, date, datetime, dict, float, int, list, str, none_type)}, none_type): Specifies the params for an Isilon recovery target.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.files_and_folders = files_and_folders self.target_environment = target_environment for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value)
PypiClean
/inigo_py-0.27.8.tar.gz/inigo_py-0.27.8/inigo_py/flask/middleware.py
import ctypes import json import os import sys from importlib import import_module from io import BytesIO from urllib.parse import parse_qs from werkzeug.datastructures import EnvironHeaders from inigo_py import ffi, Query class Middleware: def __init__(self, app): self.app = app.wsgi_app self.instance = 0 if ffi.library is None: # library is not found, skip middleware initialization return # default values self.path = '/graphql' c = ffi.Config() c.disable_response_data = False inigo_settings = {} if 'INIGO' in app.config: inigo_settings = app.config.get('INIGO') if inigo_settings.get('ENABLE') is False: return # process Inigo settings if inigo_settings.get('DEBUG'): c.debug = inigo_settings.get('DEBUG') else: # use regular DEBUG setting if specific is not provided if 'DEBUG' in app.config: c.debug = app.config.get('DEBUG') if inigo_settings.get('TOKEN'): c.token = str.encode(inigo_settings.get('TOKEN')) schema = None if inigo_settings.get('GRAPHENE_SCHEMA'): schema = import_string(inigo_settings.get('GRAPHENE_SCHEMA')) elif inigo_settings.get('SCHEMA_PATH'): if os.path.isfile(inigo_settings.get('SCHEMA_PATH')): with open(inigo_settings.get('SCHEMA_PATH'), 'r') as f: schema = f.read() elif 'GRAPHENE' in app.config and app.config.get('GRAPHENE').get('SCHEMA'): schema = import_string(app.config.get('GRAPHENE').get('SCHEMA')) if schema: c.schema = str.encode(str(schema)) if inigo_settings.get('PATH'): self.path = inigo_settings.get('PATH') # create Inigo instance self.instance = ffi.create(ctypes.byref(c)) error = ffi.check_lasterror() if error: print("INIGO: " + error.decode('utf-8')) if self.instance == 0: print("INIGO: error, instance can not be created") def __call__(self, environ, start_response): # ignore execution if Inigo is not initialized if self.instance == 0: return self.app(environ, start_response) # 'path' guard -> /graphql if environ['PATH_INFO'] != self.path: return self.app(environ, start_response) request_method = environ['REQUEST_METHOD'] # graphiql request if request_method == 'GET' and ("text/html" in environ.get('HTTP_ACCEPT', '*/*')): return self.app(environ, start_response) # support only POST and GET requests if request_method != 'POST' and request_method != 'GET': return self.app(environ, start_response) # parse request g_req: bytes = b'' if request_method == "POST": # Get the request body from the environ as reading from global request object caches it. if environ.get('wsgi.input'): content_length = environ.get('CONTENT_LENGTH') if content_length == '-1': g_req = environ.get('wsgi.input').read(-1) else: g_req = environ.get('wsgi.input').read(int(content_length)) # reset request body for the nested app environ['wsgi.input'] = BytesIO(g_req) elif request_method == "GET": # Returns a dictionary in which the values are lists query_params = parse_qs(environ['QUERY_STRING']) data = { 'query': query_params.get('query', [''])[0], 'operationName': query_params.get('operationName', [''])[0], 'variables': query_params.get('variables', [''])[0], } g_req = str.encode(json.dumps(data)) q = Query(self.instance, g_req) headers = dict(EnvironHeaders(environ).to_wsgi_list()) # inigo: process request resp, req = q.process_request(self.headers(headers)) # introspection query if resp: return self.respond(resp, start_response) # modify query if required if req: if request_method == 'GET': query_params = parse_qs(environ['QUERY_STRING']) query_params['query'] = req.get('query') environ['QUERY_STRING'] = '&'.join([f"{k}={v[0]}" for k, v in query_params.items()]) elif request_method == 'POST': content_length = int(environ.get('CONTENT_LENGTH', 0)) body = environ['wsgi.input'].read(content_length) try: payload = json.loads(body) except ValueError: payload = {} payload.update({ 'query': req.get('query'), 'operationName': req.get('operationName'), 'variables': req.get('variables'), }) payload_str = json.dumps(payload).encode('utf-8') environ['wsgi.input'] = BytesIO(payload_str) environ['CONTENT_LENGTH'] = str(len(payload_str)) inner_status = None inner_headers = [] inner_exc_info = None def start_response_collector(status, headers, exc_info=None): # Just collects the inner response headers, to be modified before sending to client nonlocal inner_status, inner_headers, inner_exc_info inner_status = status inner_headers = headers inner_exc_info = exc_info # Not calling start_response(), as we will modify the headers first. return None # forward to request handler # populates the inner_* vars, as triggers inner call of the collector closure response = self.app(environ, start_response_collector) # inigo: process response response = [q.process_response(b"".join(response))] # removes Content-Length from original headers inner_headers = [(key, value) for key, value in inner_headers if key != 'Content-Length'] start_response(inner_status, inner_headers, inner_exc_info) return response @staticmethod def headers(headers_dict): headers = {} for key, value in headers_dict.items(): headers[key] = value.split(", ") return str.encode(json.dumps(headers)) @staticmethod def respond(data, start_response): response = { 'data': data.get('data'), } if data.get('errors'): response['errors'] = data.get('errors') if data.get('extensions'): response['extensions'] = data.get('extensions') status = "200 OK" headers = [("Content-type", "application/json")] start_response(status, headers) return [json.dumps(response).encode("utf-8")] def cached_import(module_path, class_name): # Check whether module is loaded and fully initialized. if not ( (module := sys.modules.get(module_path)) and (spec := getattr(module, "__spec__", None)) and getattr(spec, "_initializing", False) is False ): module = import_module(module_path) return getattr(module, class_name) def import_string(dotted_path): """ Import a dotted module path and return the attribute/class designated by the last name in the path. Raise ImportError if the import failed. """ try: module_path, class_name = dotted_path.rsplit(".", 1) except ValueError as err: raise ImportError("%s doesn't look like a module path" % dotted_path) from err try: return cached_import(module_path, class_name) except AttributeError as err: raise ImportError( 'Module "%s" does not define a "%s" attribute/class' % (module_path, class_name) ) from err
PypiClean
/tg_utils-1.0.1.tar.gz/tg_utils-1.0.1/README.rst
=============================== tg-utils =============================== .. image:: https://img.shields.io/pypi/v/tg-utils.svg :target: https://pypi.python.org/pypi/tg-utils .. image:: https://github.com/thorgate/tg-utils/actions/workflows/python-package.yml/badge.svg?branch=master :target: https://github.com/thorgate/tg-utils/actions .. image:: https://readthedocs.org/projects/tg-utils/badge/?version=latest :target: https://readthedocs.org/projects/tg-utils/?badge=latest :alt: Documentation Status Collection of various utils for Django-based projects. This is code that we're using in our projects at Thorgate and we're hoping you'll find some of it useful as well. * Free software: ISC license * Documentation: https://tg-utils.readthedocs.org. Features -------- * Model utils, e.g. timestamped and closable models, QuerySets that send out a signal when objects are modified. * Templated email sending. * Profiling utilities. * Unique filename generation for uploads. * Using hashids for models (instead of exposing primary keys). * System checks for email and Sentry configuration. * Mixin for easier implementation of ordering in Django's generic ListView. * Mixin for making admin view read-only. * Decorator for annotating admin methods. * JS/CSS compressors for `Django Compressor <https://django-compressor.readthedocs.org/en/latest/>`_. * Health-check endpoints (with and without token authentication) Credits --------- This package was created with Cookiecutter_ and the `audreyr/cookiecutter-pypackage`_ project template. .. _Cookiecutter: https://github.com/audreyr/cookiecutter .. _`audreyr/cookiecutter-pypackage`: https://github.com/audreyr/cookiecutter-pypackage
PypiClean
/datadog_api_client-2.16.0-py3-none-any.whl/datadog_api_client/v2/model/ci_app_aggregate_sort.py
from __future__ import annotations from typing import Union, TYPE_CHECKING from datadog_api_client.model_utils import ( ModelNormal, cached_property, unset, UnsetType, ) if TYPE_CHECKING: from datadog_api_client.v2.model.ci_app_aggregation_function import CIAppAggregationFunction from datadog_api_client.v2.model.ci_app_sort_order import CIAppSortOrder from datadog_api_client.v2.model.ci_app_aggregate_sort_type import CIAppAggregateSortType class CIAppAggregateSort(ModelNormal): @cached_property def openapi_types(_): from datadog_api_client.v2.model.ci_app_aggregation_function import CIAppAggregationFunction from datadog_api_client.v2.model.ci_app_sort_order import CIAppSortOrder from datadog_api_client.v2.model.ci_app_aggregate_sort_type import CIAppAggregateSortType return { "aggregation": (CIAppAggregationFunction,), "metric": (str,), "order": (CIAppSortOrder,), "type": (CIAppAggregateSortType,), } attribute_map = { "aggregation": "aggregation", "metric": "metric", "order": "order", "type": "type", } def __init__( self_, aggregation: Union[CIAppAggregationFunction, UnsetType] = unset, metric: Union[str, UnsetType] = unset, order: Union[CIAppSortOrder, UnsetType] = unset, type: Union[CIAppAggregateSortType, UnsetType] = unset, **kwargs, ): """ A sort rule. :param aggregation: An aggregation function. :type aggregation: CIAppAggregationFunction, optional :param metric: The metric to sort by (only used for ``type=measure`` ). :type metric: str, optional :param order: The order to use, ascending or descending. :type order: CIAppSortOrder, optional :param type: The type of sorting algorithm. :type type: CIAppAggregateSortType, optional """ if aggregation is not unset: kwargs["aggregation"] = aggregation if metric is not unset: kwargs["metric"] = metric if order is not unset: kwargs["order"] = order if type is not unset: kwargs["type"] = type super().__init__(kwargs)
PypiClean
/aifs_nni-1.9.5-py3-none-manylinux1_x86_64.whl/aifs_nni-1.9.5.data/data/nni/node_modules/url-join/CHANGELOG.md
## 4.0.0 - 2018-02-02 - Ignore empty string arguments and throw an exception for non-string. Closes #36, #18 ([da05242f381bfe1ae09d00b708cfdbdb93c1a85d](https://github.com/jfromaniello/url-join/commit/da05242f381bfe1ae09d00b708cfdbdb93c1a85d)), closes [#36](https://github.com/jfromaniello/url-join/issues/36) [#18](https://github.com/jfromaniello/url-join/issues/18) ## 3.0.0 - 2018-01-12 - add new test ([d65d7c1696cb53b53ceabadf1a77917196967b4c](https://github.com/jfromaniello/url-join/commit/d65d7c1696cb53b53ceabadf1a77917196967b4c)) - Fixed to handle the colon in non-protocol separation role in the first part. ([9212db75f805031a9cc06120b5dd08a6cdd805e4](https://github.com/jfromaniello/url-join/commit/9212db75f805031a9cc06120b5dd08a6cdd805e4)) ## 2.0.5 - 2018-01-10 - revert to previous behavior #30 ([b6943343af7bd723cbca266388e84e036543577d](https://github.com/jfromaniello/url-join/commit/b6943343af7bd723cbca266388e84e036543577d)), closes [#30](https://github.com/jfromaniello/url-join/issues/30) ## 2.0.4 - 2018-01-10 - fix bower.json ([9677895a4afe51d8a1d670980bc6fede71252e9a](https://github.com/jfromaniello/url-join/commit/9677895a4afe51d8a1d670980bc6fede71252e9a)) ## 2.0.3 - 2018-01-09 - 2.0.3 ([7b7806b21cf81a3476e39ddb8a6f51272a276186](https://github.com/jfromaniello/url-join/commit/7b7806b21cf81a3476e39ddb8a6f51272a276186)) - Added a test for simple paths for issue #21 ([be99b10a707b4d22aac015d19eb087fff46d4270](https://github.com/jfromaniello/url-join/commit/be99b10a707b4d22aac015d19eb087fff46d4270)), closes [#21](https://github.com/jfromaniello/url-join/issues/21) - Added some new tests for cases that fail. ([f1afbd62c3149476a9ef099ba523e85fb4839732](https://github.com/jfromaniello/url-join/commit/f1afbd62c3149476a9ef099ba523e85fb4839732)) - Passes all the tests with these changes. ([8cde667f400fa83efc7ed5c2437c7cb25c7d7600](https://github.com/jfromaniello/url-join/commit/8cde667f400fa83efc7ed5c2437c7cb25c7d7600)) - The protocol slashes should be normalized also when the protocol is not alone in the first argument. ([0ce1239c60f7bbb625d4ccbf1fcf044f37488bd8](https://github.com/jfromaniello/url-join/commit/0ce1239c60f7bbb625d4ccbf1fcf044f37488bd8)) ## 2.0.2 - 2017-05-18 - fix: remove consecutives slashes ([33639364ef186e257b8424620017b9d1ba225539](https://github.com/jfromaniello/url-join/commit/33639364ef186e257b8424620017b9d1ba225539)) ## 2.0.1 - 2017-04-12 - update mocha and bower.json ([ebd3665028b2408d405f9a31f8479e91c4ef52c1](https://github.com/jfromaniello/url-join/commit/ebd3665028b2408d405f9a31f8479e91c4ef52c1)) - feat: add test ([46d3387141e5d2f751da699e02d57fc36bfe37a8](https://github.com/jfromaniello/url-join/commit/46d3387141e5d2f751da699e02d57fc36bfe37a8)) - fix: ignore encoded url when removing consecusive slashes ([711add4e8af8fc97390adef14b9a4722cac5e70a](https://github.com/jfromaniello/url-join/commit/711add4e8af8fc97390adef14b9a4722cac5e70a)) ## 2.0.0 - 2017-04-11 - Add a LICENSE file ([ffd3b2253470cee648152c55dd51c1bf4e688a60](https://github.com/jfromaniello/url-join/commit/ffd3b2253470cee648152c55dd51c1bf4e688a60)) - change copyright year ([9f67671dd8ab23b4d2da6ae775efdf66d594eac3](https://github.com/jfromaniello/url-join/commit/9f67671dd8ab23b4d2da6ae775efdf66d594eac3)) - refactor: use local startsWith function ([a1e1214644cd187f2584b79b4241ac3b8c9b9f1b](https://github.com/jfromaniello/url-join/commit/a1e1214644cd187f2584b79b4241ac3b8c9b9f1b)) - fix: split logic for files ([d7053a99aa40b0c2f4802819f7e0643be8889ac4](https://github.com/jfromaniello/url-join/commit/d7053a99aa40b0c2f4802819f7e0643be8889ac4)) - feat: add file protocol support ([48ebe0d84e8e2eca3a02fe5e3259cdd294e519dc](https://github.com/jfromaniello/url-join/commit/48ebe0d84e8e2eca3a02fe5e3259cdd294e519dc)) ## 1.1.0 - 2016-04-05 - add .travis.yml ([c75e7507f72fd4be101b64bb44539fd249842cc0](https://github.com/jfromaniello/url-join/commit/c75e7507f72fd4be101b64bb44539fd249842cc0)) - added new syntax to allow options, fixed #! urls ([b8e5d8372c55187cdd9c6fa5e02830f76858347e](https://github.com/jfromaniello/url-join/commit/b8e5d8372c55187cdd9c6fa5e02830f76858347e)) - added travis, updated version in bower.json ([5a58405d89298e693e8f97a74b14324d83a8a87a](https://github.com/jfromaniello/url-join/commit/5a58405d89298e693e8f97a74b14324d83a8a87a)) - fixed query string handling, closes #9, closes #4 ([e190fe28282287204dbe7877979f18b4570042f9](https://github.com/jfromaniello/url-join/commit/e190fe28282287204dbe7877979f18b4570042f9)), closes [#9](https://github.com/jfromaniello/url-join/issues/9) [#4](https://github.com/jfromaniello/url-join/issues/4) ## 1.0.0 - 2016-03-23 ## 0.1.0 - 2016-03-23 - 0.1.0 ([2db128d268dfd531f1af6c9bd0543458387e94cd](https://github.com/jfromaniello/url-join/commit/2db128d268dfd531f1af6c9bd0543458387e94cd)) - add support for AMD and windows['url-join'] ([b02169596877a1e6cd518f1b0d711f38c721fb02](https://github.com/jfromaniello/url-join/commit/b02169596877a1e6cd518f1b0d711f38c721fb02)) - added comments, fixed leading // ([3f72b6ea6fa84c4b254d0c656815a5df6b89a10a](https://github.com/jfromaniello/url-join/commit/3f72b6ea6fa84c4b254d0c656815a5df6b89a10a)) - added test for leading // ([baac627b2052e1d9b5c05e48c8dc6a05a80e08fa](https://github.com/jfromaniello/url-join/commit/baac627b2052e1d9b5c05e48c8dc6a05a80e08fa)) - bower init ([650dcfe72eee854108dd0832963553eae5ede7c5](https://github.com/jfromaniello/url-join/commit/650dcfe72eee854108dd0832963553eae5ede7c5)) - initial ([af68a208966de3d4be757c9d0f4a918c6dfa360e](https://github.com/jfromaniello/url-join/commit/af68a208966de3d4be757c9d0f4a918c6dfa360e)) - minor ([dde2dc6815f9a0476d7aade1d6848cbc5f3a14a4](https://github.com/jfromaniello/url-join/commit/dde2dc6815f9a0476d7aade1d6848cbc5f3a14a4)) - minor ([4d9d8ee16591da2092739a172145f968f71598dc](https://github.com/jfromaniello/url-join/commit/4d9d8ee16591da2092739a172145f968f71598dc)) - minor ([9ed0161497ee7d7d1b4b04d1735483a6216fe2c6](https://github.com/jfromaniello/url-join/commit/9ed0161497ee7d7d1b4b04d1735483a6216fe2c6)) - simplify normalize function ([d6886a362828eacc028c6167b9ae0efd8b2fbfc8](https://github.com/jfromaniello/url-join/commit/d6886a362828eacc028c6167b9ae0efd8b2fbfc8))
PypiClean
/ttkbwidgets-1.0.4.tar.gz/ttkbwidgets-1.0.4/widgets/ttkb_widgets.py
from abc import abstractmethod from datetime import datetime, date, time from decimal import Decimal import re import tkinter as tk import ttkbootstrap as ttkb import ttkbootstrap.dialogs.dialogs as dialogs from ttkbootstrap.validation import add_validation from ttkbootstrap.constants import * from typing import Any, Callable, Literal, Optional, Union from widgets.ttkb_validators import month_validator, day_validator, year_validator, hour_validator, \ minute_validator class Radiobutton(ttkb.Radiobutton): """ Subclass of ttkbootstrap.Radiobutton that changes it appears when it is in focus, so that the user can tell when that can use the space bar to select or unselect a button """ def __init__(self, parent, text: str, value: str, variable: ttkb.StringVar, command=None, width=None): ttkb.Radiobutton.__init__(self, parent, text=text, value=value, variable=variable, command=command, width=width, bootstyle='primary') self.bind('<FocusIn>', self.focus_in) self.bind('<FocusOut>', self.focus_out) def focus_in(self, event) -> None: """ Handler for the <FocusIn> event. Changes config parm bootstyle to 'toolbutton' :param event: :return: None """ self.configure(bootstyle='toolbutton') def focus_out(self, event): """ Handler for the <FocusOut> event. Changes config parm bootstyle to 'primary' :param event: :return: """ self.configure(bootstyle='primary') class Checkbutton(ttkb.Checkbutton): """ Subclass of ttkbootstrap.Checkbutton that changes it appears when it is in focus, so that the user can tell when that can use the space bar to select or unselect a checkbox """ def __init__(self, parent, text, variable, command=None, padding=None, width=None): ttkb.Checkbutton.__init__(self, parent, text=text, variable=variable, command=command, padding=padding, width=width, bootstyle='primary') self.bind('<FocusIn>', self.focus_in) self.bind('<FocusOut>', self.focus_out) self.row: int = -1 self.column: int = -1 def focus_in(self, event): """ Handler for the <FocusIn> event. Changes config parm bootstyle to 'toolbutton' :param event: :return: None """ self.configure(bootstyle='toolbutton') def focus_out(self, event): """ Handler for the <FocusOut> event. Changes config parm bootstyle to 'primary' :param event: :return: """ self.configure(bootstyle='primary') class EntryWidget: """ An abstract base class for entry label/widget pairs """ def __init__(self, parent, label_text: Optional[str], entry_width: int, regex_str: Optional[str] = None): """ Create an instance of EntryWidget :param parent: the GUI parent of this class :param label_text: the text to be used in creating the label :type label_text: str` :param regex_str: """ self.parent = parent self.label_text = label_text if regex_str is not None: self.regex_pattern: Optional[re.Pattern] = re.compile(regex_str) else: self.regex_pattern = None self.strvar = ttkb.StringVar() self.entry = ttkb.Entry(master=parent, textvariable=self.strvar, validate='focusout', width=entry_width, validatecommand=self.validate, invalidcommand=self.invalid) def invalid(self): """ This method is used as an 'invalidcommand' callback. It is invoked when validation fails. It sets the widget background color to red, which gives it a red border, then calls focus_set on the widget :return: None """ self.entry.config(background='red') self.focus_set() def focus_set(self): """ Delegates focus_set calls to the entry widget :return: None """ self.entry.focus_set() self.entry.selection_range('0', tk.END) def bind(self, sequence: str | None = ..., func: Callable[[tk.Event], Any] | None = ..., add: Literal["", "+"] | bool | None = ..., ) -> str: """ Delegate bind calls to the entry widget :param sequence: the name of the event to be bound :type sequence: str | None :param func: the callback to be invoked when the event is captured :type func: Callable[[tk.Event], Any] :param add: allows multiple bindings for single event :type add: Literal["", "+"] | bool | None :return: None """ return self.entry.bind(sequence, func, add) def grid(self, **kwargs): """ Delegate grid calls to the entry widget :param kwargs: key word args :type kwargs: dict[str,Any] :return: None """ self.entry.grid(kwargs) def set_regex(self, regex_str: str) -> None: """ Set the regular expression used to validate user input :param regex_str: a regular expression :type regex_str: str :return: None """ self.regex_pattern = re.compile(regex_str) def apply_regex(self, value=None) -> Optional[tuple[Any, ...]]: """ Apply the widget's regex either to the proved string or to the value entered to the widget prompt :param value: if not None, the regex will be applied to this value :type value: str :return: returns the groups collected by the regex :rtype: Optional[tuple[Any, ...]] """ if value is None: match = self.regex_pattern.match(self.strvar.get()) else: match = self.regex_pattern.match(value) if match is None: return None else: return match.groups() def get_var(self) -> ttkb.StringVar: """ Returns the StringVar associated with the entry widget :return: the StringVar associated with the entry widget :rtype: ttkb.StringVar """ return self.strvar @abstractmethod def get_value(self) -> str: """ An abstract method to be implemented by subclasses to return the entry widget value :return: the entry widget value :rtype: implementation dependant """ return self.strvar.get() @abstractmethod def set_value(self, value: Union[int, Decimal, str]) -> None: """ An abstract method implemented by subclasses to set the entry widget value :param value: value to be set :type value: Union[int, Decimal, str] :return: None """ if isinstance(value, str): self.strvar.set(value) elif isinstance(value, int): self.strvar.set(f'{value:d}') else: self.strvar.set(f'{value:.2f}') @abstractmethod def validate(self) -> int: """ An abstract method implemented by subclasses to set the entry widget value :return: 1 if valid, 0 if not """ ... class TextWidget(EntryWidget): """ A regex validated text entry widget """ def __init__(self, parent, label_text: str, entry_width: int, regex_str: Optional[str] = None): """ Creates and instance of TextWidget :param parent: the GUI parent of this widget :param label_text: the text for the label that will be associated with the widget. The label is not created with the entry widget, but the label text is used to create ValueError exception messages :type label_text: str :param regex_str: The regular expression that will be used to validate data entered to the widget :type regex_str: str """ EntryWidget.__init__(self, parent=parent, label_text=label_text, entry_width=entry_width, regex_str=regex_str) def validate(self) -> int: """ A validation callback for the validationcommand parameter of the tkinter Entry widget :return: 1 if validate, otherwise 0 :rtype: int """ if self.regex_pattern is not None: str_value = self.strvar.get().strip() if len(str_value) > 0: groups = self.apply_regex() if groups is not None and len(groups) == 1: return 1 else: return 0 else: return 1 else: return 1 def get_value(self): """ Return the widget's entry value :return: the widget's entry value :rtype: str """ return self.strvar.get() def set_value(self, value: str): """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: str :return: None """ if self.regex_pattern is not None: groups = self.apply_regex(value) if groups is None: raise ValueError(f'{value} is not a valid {self.label_text}') else: self.strvar.set(groups[0]) else: self.strvar.set(value) class LabeledTextWidget: """ A Frame containing a Label and a TextWidget """ def __init__(self, parent, label_text: str, label_width: int, label_grid_args: dict[str, Any], entry_width: int, entry_grid_args: dict[str, Any], regex_str: Optional[str] = None): """ Creates and instance of LabeledTextWidget :param parent: The GUI parent for this Frame :param label_text: the text to be used in creating the label :type label_text: str :param label_width: the width to be used in creating the label :type label_width: int :param label_grid_args: the arguments to be used in gridding the label :type label_grid_args: dict[str, Any] :param entry_width: the width to be used in creating the entry widget :type entry_width: int :param entry_grid_args: the arguments to be used in gridding the entry widet :type entry_grid_args: dict[str, Any] :param regex_str: the regular expression to be used for validation of input: default '\\s*(\\w*)\\s*' :type regex_str: str """ # ttkb.Frame.__init__(self, master=parent) anchor = tk.W if 'row' in label_grid_args and 'row' in entry_grid_args and 'sticky' not in entry_grid_args: if label_grid_args['row'] == entry_grid_args['row']: entry_grid_args['sticky'] = tk.W anchor = tk.E self.label = ttkb.Label(master=parent, text=label_text, width=label_width, anchor=anchor) self.label.grid(**label_grid_args) self.entry = TextWidget(parent=parent, label_text=label_text, entry_width=entry_width, regex_str=regex_str) self.entry.grid(**entry_grid_args) def focus_set(self): """ Delegates calls to focus_set to the entry widget :return: None """ self.entry.focus_set() def bind(self, sequence: str | None = ..., func: Callable[[tk.Event], Any] | None = ..., add: Literal["", "+"] | bool | None = ..., ) -> str: """ Delegate bind calls to the entry widget :param sequence: the name of the event to be bound :type sequence: str | None :param func: the callback to be invoked when the event is captured :type func: Callable[[tk.Event], Any] :param add: allows multiple bindings for single event :type add: Literal["", "+"] | bool | None :return: None """ return self.entry.bind(sequence, func, add) def get_value(self) -> str: """ Return the widget's entry value :return: the widget's entry value :rtype: str """ return self.entry.get_value() def set_value(self, value: str) -> None: """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: str :return: None """ self.entry.set_value(value) @staticmethod def label_width(text: str, min_width: int) -> int: """ Calculates a width for the label based on the label text and a minumum width :param text: the label test :type text: str :param min_width: the minimum label width :type min_width: int :return: the calculated label width :rtype: int """ text_width = len(text) + 2 if text_width < min_width: return min_width else: return text_width class IntegerWidget(EntryWidget): def __init__(self, parent, label_text: str, regex_str: Optional[str] = '\\s*(\\d*)\\s*'): """ Creates and instance of IntegerWidget :param parent: the GUI parent of this widget :param label_text: the text for the label that will be associated with the widget. The label is not created with the entry widget, but the label text is used to create ValueError exception messages :type label_text: str :param regex_str: The regular expression used to validate data entered: default '\\s*(\\d*)\\s*' :type regex_str: str """ EntryWidget.__init__(self, parent=parent, label_text=label_text, entry_width=10, regex_str=regex_str) def validate(self): """ A validation callback for the validationcommand parameter of the tkinter Entry widget :return: 1 if validate, otherwise 0 :rtype: int """ str_value = self.strvar.get().strip() if len(str_value) > 0: groups = self.apply_regex() if groups is not None and len(groups) == 1: if groups[0].isnumeric(): return 1 else: return 0 else: return 0 else: return 1 def get_value(self): """ Return the widget's entry value :return: the widget's entry value as an integer :rtype: int """ value = self.strvar.get().strip() if len(value) == 0: return 0 else: return int(self.strvar.get().strip()) def set_value(self, value: Union[int, str]): """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: Union[int, str] :return: None """ if isinstance(value, int): self.strvar.set(f'{value:d}') elif isinstance(value, str) and value.isnumeric(): if len(value.strip()) > 0: re_groups = self.apply_regex(value) int_value: int = int(re_groups[0]) self.strvar.set(f'{int_value:d}') else: self.strvar.set('0') else: raise ValueError(f'{value} is not a valid {self.label_text}') class LabeledIntegerWidget: def __init__(self, parent, label_text: str, label_width: int, label_grid_args: dict[str, Any], entry_width: int, entry_grid_args: dict[str, Any], regex_str: Optional[str] = '\\s*(\\d*)\\s*'): """ Creates and instance of LabeledIntegerWidget :param parent: The GUI parent for this Frame :param label_text: the text to be used in creating the label :type label_text: str :param label_width: the width to be used in creating the label :type label_width: int :param label_grid_args: the arguments to be used in gridding the label :type label_grid_args: dict[str, Any] :param entry_width: the width to be used in creating the entry widget :type entry_width: int :param entry_grid_args: the arguments to be used in gridding the entry widet :type entry_grid_args: dict[str, Any] :param regex_str: the regular expression to be used for validation of input: default value '\\s*(\\d*)\\s*' :type regex_str: str """ # ttkb.Frame.__init__(self, master=parent) row_str: str = 'row' sticky_str: str = 'sticky' anchor = tk.W if row_str in label_grid_args and row_str in entry_grid_args and sticky_str not in entry_grid_args: if label_grid_args[row_str] == entry_grid_args[row_str]: anchor = tk.E entry_grid_args[sticky_str] = tk.W self.label = ttkb.Label(master=parent, text=label_text, width=label_width, anchor=anchor) self.label.grid(**label_grid_args) self.entry = IntegerWidget(parent=parent, label_text=label_text, regex_str=regex_str) self.entry.grid(**entry_grid_args) def focus_set(self): """ Delegates calls to focus_set to the entry widget :return: None """ self.entry.focus_set() def bind(self, sequence: str | None = ..., func: Callable[[tk.Event], Any] | None = ..., add: Literal["", "+"] | bool | None = ..., ) -> str: """ Delegate bind calls to the entry widget :param sequence: the name of the event to be bound :type sequence: str | None :param func: the callback to be invoked when the event is captured :type func: Callable[[tk.Event], Any] :param add: allows multiple bindings for single event :type add: Literal["", "+"] | bool | None :return: None """ return self.entry.bind(sequence, func, add) def get_value(self) -> int: """ Return the widget's entry value :return: the widget's entry value as an integer :rtype: int """ return self.entry.get_value() def set_value(self, value: Union[int, str]) -> None: """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: Union[int, str] :return: None """ self.entry.set_value(value) @staticmethod def label_width(text: str, min_width: int) -> int: """ Calculates a width for the label based on the label text and a minumum width :param text: the label test :type text: str :param min_width: the minimum label width :type min_width: int :return: the calculated label width :rtype: int """ text_width = len(text) + 2 if text_width < min_width: return min_width else: return text_width class DecimalWidget(EntryWidget): def __init__(self, parent, label_text: str, regex_str: Optional[str] = '\\s*(\\d+[.]*\\d*)\\s*'): """ Creates and instance of DecimalWidget :param parent: the GUI parent of this widget :param label_text: the text for the label that will be associated with the widget. The label is not created with the entry widget, but the label text is used to create ValueError exception messages :type label_text: str :param regex_str: The regular expression used to validate data entered: default '\\s*(\\d+[.]*\\d*)\\s*' :type regex_str: str """ EntryWidget.__init__(self, parent=parent, label_text=label_text, entry_width=10, regex_str=regex_str) def validate(self): """ A validation callback for the validationcommand parameter of the tkinter Entry widget :return: 1 if validate, otherwise 0 :rtype: int """ str_value = self.strvar.get().strip() if len(str_value) > 0: groups = self.apply_regex() if groups is not None and len(groups) == 1: return 1 else: return 0 else: return 1 def get_value(self) -> Decimal: """ Return the widget's entry value :return: the widget's entry value as a Decimal :rtype: decimal.Decimal """ if self.validate() == 1: str_value = self.strvar.get().strip() if len(str_value) == 0: return Decimal(0) else: return Decimal(str_value) else: raise ValueError(f'{self.strvar.get()} is not a valid {self.label_text}') def set_value(self, value: Union[Decimal, str]): """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: Union[decimal.Decimal, str] :return: None """ if isinstance(value, Decimal): self.strvar.set(f'{value:.1f}') elif isinstance(value, str) and value.isnumeric(): if len(value.strip()) > 0: re_groups = self.apply_regex() if re_groups is not None: dec_value: Decimal = Decimal(re_groups[0]) self.strvar.set(f'{dec_value:.1f}') raise ValueError(f'{self.strvar.get()} is not a valid {self.label_text}') else: self.strvar.set('0.0') else: raise ValueError(f'{self.strvar.get()} is not a valid {self.label_text}') class LabeledDecimalWidget: def __init__(self, parent, label_text: str, label_width: int, label_grid_args: dict[str, Any], entry_width: int, entry_grid_args: dict[str, Any], regex_str: Optional[str] = '\\s*(\\d+[.]*\\d*)\\s*'): """ Creates and instance of LabeledDecimalWidget :param parent: The GUI parent for this Frame :param label_text: the text to be used in creating the label :type label_text: str :param label_width: the width to be used in creating the label :type label_width: int :param label_grid_args: the arguments to be used in gridding the label :type label_grid_args: dict[str, Any] :param entry_width: the width to be used in creating the entry widget :type entry_width: int :param entry_grid_args: the arguments to be used in gridding the entry widet :type entry_grid_args: dict[str, Any] :param regex_str: the regular expression to be used for validation of input: default value '\\s*(\\d+[.]*\\d*)\\s*' :type regex_str: str """ # ttkb.Frame.__init__(self, master=parent) row_str: str = 'row' sticky_str: str = 'sticky' anchor = tk.W if row_str in label_grid_args and row_str in entry_grid_args and sticky_str not in entry_grid_args: if label_grid_args[row_str] == entry_grid_args[row_str]: anchor = tk.E entry_grid_args[sticky_str] = tk.W self.label = ttkb.Label(parent, text=label_text, width=label_width, anchor=anchor) self.label.grid(**label_grid_args) self.entry = DecimalWidget(parent=parent, label_text=label_text, regex_str=regex_str) self.entry.grid(**entry_grid_args) def focus_set(self): """ Delegates calls to focus_set to the entry widget :return: None """ self.entry.focus_set() def bind(self, sequence: str | None = ..., func: Callable[[tk.Event], Any] | None = ..., add: Literal["", "+"] | bool | None = ..., ) -> str: """ Delegate bind calls to the entry widget :param sequence: the name of the event to be bound :type sequence: str | None :param func: the callback to be invoked when the event is captured :type func: Callable[[tk.Event], Any] :param add: allows multiple bindings for single event :type add: Literal["", "+"] | bool | None :return: None """ return self.entry.bind(sequence, func, add) def get_value(self) -> Decimal: """ Return the widget's entry value :return: the widget's entry value as a Decimal :rtype: decimal.Decimal """ return self.entry.get_value() def set_value(self, value: Union[Decimal, str]) -> None: """ The argument value is filtered through the validation regex and the resulting regex group is used to set the widget's entry value :param value: the value to be used to set the entry value :type value: Union[decimal.Decimal, str] :return: None """ self.entry.set_value(value) @staticmethod def label_width(text: str, min_width: int) -> int: """ Calculates a width for the label based on the label text and a minumum width :param text: the label test :type text: str :param min_width: the minimum label width :type min_width: int :return: the calculated label width :rtype: int """ text_width = len(text) + 2 if text_width < min_width: return min_width else: return text_width class DateWidget(ttkb.Frame): """ A general purpose date entry widget """ def __init__(self, parent, default_value: datetime.date = None): """ Creates widgets.DateWidget instance :param parent: the GUI element that will contain the created widget :param default_value: a datetime.date value to be used as an initial value for the month, day and year entry fields. """ ttkb.Frame.__init__(self, parent) self.month_var = ttkb.StringVar() self.day_var = ttkb.StringVar() self.year_var = ttkb.StringVar() self.prev_entry = None self.next_entry = None if default_value: self.month_var.set(default_value.month) self.day_var.set(default_value.day) self.year_var.set(default_value.year) self.month_entry = ttkb.Entry(self, textvariable=self.month_var, width=3) self.month_entry.grid(column=0, row=0, sticky=tk.NW, ipadx=0, padx=0) ttkb.Label(self, text="/", width=1, font=('courier', 20, 'bold')).grid(column=1, row=0, sticky=tk.NW, padx=0, pady=5) self.month_entry.bind('<KeyPress>', self.month_keypress) self.month_entry.bind('<FocusIn>', self.clear_key_count) add_validation(self.month_entry, month_validator) self.day_entry = ttkb.Entry(self, textvariable=self.day_var, width=3) self.day_entry.grid(column=2, row=0, sticky=tk.NW) self.day_entry.bind('<KeyPress>', self.day_keypress) self.day_entry.bind('<FocusIn>', self.clear_key_count) add_validation(self.day_entry, day_validator) ttkb.Label(self, text="/", width=1, font=('courier', 20, 'bold')).grid(column=3, row=0, sticky=tk.NW, padx=0, pady=5) self.year_entry = ttkb.Entry(self, textvariable=self.year_var, width=6) self.year_entry.grid(column=4, row=0, stick=tk.NW) self.year_entry.bind('<KeyPress>', self.year_keypress) self.year_entry.bind('<FocusIn>', self.clear_key_count) self.year_entry.bind('<FocusOut>', self.validate_date) add_validation(self.year_entry, year_validator) self.grid() self.key_count = 0 self.max_dom: dict[int, int] = { 1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31 } self.prev_key_press = None self.error: bool = False def set_prev_entry(self, entry) -> None: """ Establishes the widget to focus on when the Back Tab key is pressed in the Month field :param entry: :return: None """ self.prev_entry = entry def set_next_entry(self, entry) -> None: """ Establishes the widget to focus on when the Tab key is pressed in the Year field :param entry: :return: None """ self.next_entry = entry def focus_set(self) -> None: """ Delegates focus_set to the month entry :return: None """ self.month_entry.focus_set() self.key_count = 0 self.prev_key_press = None def disable(self) -> None: """ Disables the month, day and year fields :return: None """ self.month_entry.configure(state=DISABLED) self.day_entry.configure(state=DISABLED) self.year_entry.configure(state=DISABLED) def enable(self) -> None: """ Enables the month, day and year fields :return: None """ self.month_entry.configure(state=NORMAL) self.day_entry.configure(state=NORMAL) self.year_entry.configure(state=NORMAL) def select_range(self, start, end): """ Delegates select range functionality to the month entry field :param start: :param end: :return: """ self.month_entry.select_range(start, end) def clear_key_count(self, event): """ Sets the keystroke counter to 0. This method is bound to the <FocusIn> event for the month, day and year fields :param event: :return: None """ self.key_count = 0 def month_keypress(self, event): """ Tracks the keystrokes entered to the month field, puts the day entry field in focus after two keystrokes. This method is bound to the <KeyPress> event of the month field. :param event: :return: None """ if event.char.isnumeric(): self.key_count += 1 if self.key_count == 2: self.key_count = 0 self.day_entry.select_range(0, END) self.day_entry.focus_set() self.prev_key_press = None else: match event.keysym: case 'Tab': self.key_count = 0 case 'ISO_Left_Tab': self.key_count = 0 if self.prev_entry is not None: self.prev_entry.focus_set() case 'BackSpace': if self.key_count > 0: self.key_count -= 1 case 'Up': m = int(self.month_var.get()) if m < 12: self.month_var.set(str(m + 1)) else: self.month_var.set('01') self.month_entry.update() case 'KP_Up': m = int(self.month_var.get()) if m < 12: self.month_var.set(str(m + 1)) else: self.month_var.set('01') self.month_entry.update() case 'Down': m = int(self.month_var.get()) if m > 1: self.month_var.set(str(m - 1)) else: self.month_var.set('12') self.month_entry.update() case 'KP_Down': m = int(self.month_var.get()) if m > 1: self.month_var.set(str(m - 1)) else: self.month_var.set('12') self.month_entry.update() self.prev_key_press = event.keysym def day_keypress(self, event): """ Tracks the keystrokes entered to the day field, puts the year entry field in focus after two keystrokes. This method is bound to the <KeyPress> event of the day entry field. :param event: :return: None """ if event.char.isnumeric(): self.key_count += 1 if self.key_count == 2: self.key_count = 0 self.year_entry.select_range(0, END) self.year_entry.focus_set() self.prev_key_press = None else: match event.keysym: case 'Tab': self.key_count = 0 case 'ISO_Left_Tab': self.key_count = 0 case 'BackSpace': if self.key_count > 0: self.key_count -= 1 case 'Up': m = int(self.day_var.get()) if m < self.max_dom[int(self.month_var.get())]: self.day_var.set(str(m + 1)) else: self.day_var.set('01') self.day_entry.update() case 'KP_Up': m = int(self.day_var.get()) if m < self.max_dom[int(self.month_var.get())]: self.day_var.set(str(m + 1)) else: self.day_var.set('01') self.day_entry.update() case 'Down': m = int(self.day_var.get()) if m > 1: self.day_var.set(str(m - 1)) else: self.day_var.set(str(self.max_dom[int(self.month_var.get())])) self.day_entry.update() case 'KP_Down': m = int(self.day_var.get()) if m > 1: self.day_var.set(str(m - 1)) else: self.day_var.set(str(self.max_dom[int(self.month_var.get())])) self.day_entry.update() self.prev_key_press = event.keysym def year_keypress(self, event): """ Tracks the keystrokes entered to the year field, puts the next entry field on the GUI in focus after two keystrokes. This method is bound to the <KeyPress> event of the year entry field. :param event: :return: None """ if event.char.isnumeric(): self.key_count += 1 if self.key_count == 4: self.key_count = 0 self.next_entry.focus_set() self.prev_key_press = None else: match event.keysym: case 'Tab': self.key_count = 0 self.next_entry.focus_set() case 'ISO_Left_Tab': self.key_count = 0 case 'BackSpace': if self.key_count > 0: self.key_count -= 1 case 'Up': m = int(self.year_var.get()) self.year_var.set(str(m + 1)) self.year_entry.update() case 'KP_Up': m = int(self.year_var.get()) self.year_var.set(str(m + 1)) self.year_entry.update() case 'Down': m = int(self.year_var.get()) if m > 0: self.year_var.set(str(m - 1)) self.year_entry.update() case 'KP_Down': m = int(self.year_var.get()) if m > 1: self.year_var.set(str(m - 1)) self.year_entry.update() self.prev_key_press = event.keysym def validate_date(self, event): """ Validates the month, day and year elements of the date and presents a Messagebox if any is invalid This method is bound to the Focus Out event of the year entry field. :param event: :return: None """ if self.prev_key_press != 'ISO_Left_Tab': valid = True if self.month_var.get().isnumeric(): month: int = int(self.month_var.get()) if not 1 <= month <= 12: self.month_var.set('') self.month_entry.update() self.month_entry.focus_set() valid = False if valid: if self.day_var.get().isnumeric(): try: day: int = int(self.day_var.get()) if not 1 <= day <= self.max_dom[month]: self.day_var.set('') self.day_entry.update() self.day_entry.focus_set() valid = False except KeyError: # This should never happen, as the month has already been validated pass if valid and not self.year_var.get().isnumeric(): self.year_var.set('') self.year_entry.update() self.year_entry.focus_set() valid = False else: # If the day is not numeric self.day_var.set('') self.day_entry.update() self.day_entry.focus_set() valid = False else: # If the month is not numeric self.month_var.set('') self.month_entry.update() self.month_entry.focus_set() valid = False if valid and int(self.year_var.get()) < 100: year = 2000 + int(self.year_var.get()) self.year_var.set(f'{year:4d}') self.year_entry.update() if valid: self.next_entry.focus_set() self.error = False else: if not self.error: self.error = True dialogs.Messagebox.ok("Date entered is not valid.", "Date Entry Error") def get_date(self) -> date: """ Build a date value from the values entered to the month, day and year entry fields. :return: datetime.date instance """ month: int = int(self.month_var.get()) day: int = int(self.day_var.get()) year: int = int(self.year_var.get()) return date(year=year, month=month, day=day) def set_date(self, date_value: date): """ Set the month, day and year field from the provided date :param date_value: the new value for the date widget :type date_value: date :return: None """ self.month_var.set(str(date_value.month)) self.day_var.set(str(date_value.day)) self.year_var.set(str(date_value.year)) class TimeWidget(ttkb.Frame): """ A general purpose date entry widget """ def __init__(self, parent, default_value: time = None): """ Creates widgets.TimeWidget instance :param parent: the GUI element that will contain the created widget :param default_value: a datetime.time value to be used as an initial value for the hour and minute entry fields. """ ttkb.Frame.__init__(self, parent) self.columnconfigure(0, weight=2) self.columnconfigure(1, weight=1) self.columnconfigure(2, weight=2) self.columnconfigure(3, weight=2) self.hour_var = ttkb.StringVar() self.minute_var = ttkb.StringVar() self.ampm_var = ttkb.StringVar() self.prev_entry = None self.next_entry = None if default_value is not None: fmt_time = default_value.strftime('%I:%M %p') self.hour_var.set(fmt_time[0:2]) self.minute_var.set(fmt_time[3:5]) self.ampm_var.set(fmt_time[6:8]) self.hour_entry = ttkb.Entry(self, textvariable=self.hour_var, width=3) self.hour_entry.grid(column=0, row=0, sticky=tk.W, ipadx=0, padx=0, pady=5) ttkb.Label(self, text=":", width=1, font=('courier', 20, 'bold')).grid(column=1, row=0, sticky=tk.NW, padx=0, pady=5) self.hour_entry.bind('<KeyPress>', self.hour_keypress) self.hour_entry.bind('<FocusIn>', self.clear_key_count) add_validation(self.hour_entry, hour_validator) self.minute_entry = ttkb.Entry(self, textvariable=self.minute_var, width=3) self.minute_entry.grid(column=2, row=0, sticky=tk.W, ipadx=0, padx=0, pady=5) self.minute_entry.bind('<KeyPress>', self.minute_keypress) self.minute_entry.bind('<FocusIn>', self.clear_key_count) add_validation(self.minute_entry, minute_validator) self.am_button = Radiobutton(self, text='AM', value='AM', variable=self.ampm_var, command=self.validate_time) self.am_button.grid(column=3, row=0, stick=tk.W, padx=5, pady=5) self.pm_button = Radiobutton(self, text='PM', value='PM', variable=self.ampm_var, command=self.validate_time) self.pm_button.bind('<FocusOut>', self.validate_time, '+') self.pm_button.grid(column=4, row=0, sticky=tk.W, padx=5, pady=5) if self.ampm_var.get() not in ['AM', 'PM']: self.ampm_var.set('AM') self.grid() self.key_count = 0 self.prev_key_press = None self.error: bool = False def disable(self) -> None: """ Disables the hour and minute fields and the ap/pm radio buttons. :return: None """ self.hour_entry.configure(state=DISABLED) self.minute_entry.configure(state=DISABLED) self.am_button.configure(state=DISABLED) self.pm_button.configure(state=DISABLED) def enable(self) -> None: """ Enables the hour and minute fields and the am/pm radio buttons. :return: None """ self.hour_entry.configure(state=NORMAL) self.minute_entry.configure(state=NORMAL) self.am_button.configure(state=NORMAL) self.pm_button.configure(state=NORMAL) def set_prev_entry(self, entry) -> None: """ Establishes the widget that will receive focus when the Back Tab key is pressed in the hour field. :param entry: :return: None """ self.prev_entry = entry def set_next_entry(self, entry): """ Establishes the widget that will receive focus after the am/pm radio buttons :param entry: :return: None """ self.next_entry = entry def focus_set(self) -> None: """ Delegates set focus functionality to the hour field :return: None """ self.hour_entry.focus_set() self.key_count = 0 self.prev_key_press = None def select_range(self, start, end) -> None: """ Delegates select range to the hour field :param start: start position for selection :param end: end position for selection :return: None """ self.hour_entry.select_range(start, end) def clear_key_count(self, event): """ Sets the keystroke counter to 0. This method is bound to the <FocusIn> event for the hour and minute entry fields :param event: :return: None """ self.key_count = 0 def hour_keypress(self, event): """ Tracks the keystrokes entered to the hour field, puts the minute entry field on the GUI in focus after two keystrokes. This method is bound to the <KeyPress> event of the hour entry field. :param event: :return: None """ if event.char.isnumeric(): self.key_count += 1 if self.key_count == 2: self.key_count = 0 self.minute_entry.select_range(0, END) self.minute_entry.focus_set() self.prev_key_press = None else: match event.keysym: case 'Tab': self.key_count = 0 case 'ISO_Left_Tab': self.key_count = 0 if self.prev_entry is not None: self.prev_entry.select_range(0, END) self.prev_entry.focus_set() case 'BackSpace': if self.key_count > 0: self.key_count -= 1 case 'Up': h = int(self.hour_var.get()) if h < 12: self.hour_var.set(str(h + 1)) else: self.hour_var.set('01') self.hour_entry.update() case 'KP_Up': h = int(self.hour_var.get()) if h < 12: self.hour_var.set(str(h + 1)) else: self.hour_var.set('01') self.hour_entry.update() case 'Down': h = int(self.hour_var.get()) if h > 1: self.hour_var.set(str(h - 1)) else: self.hour_var.set('12') self.hour_entry.update() case 'KP_Down': h = int(self.hour_var.get()) if h > 1: self.hour_var.set(str(h - 1)) else: self.hour_var.set('12') self.hour_entry.update() self.prev_key_press = event.keysym def minute_keypress(self, event): """ Tracks the keystrokes entered to the minute field, puts the next entry field on the GUI on the GUI in focus after two keystrokes. This method is bound to the <KeyPress> event of the hour entry field. :param event: :return: None """ if event.char.isnumeric(): self.key_count += 1 if self.key_count == 2: self.key_count = 0 self.am_button.focus_set() self.prev_key_press = None else: match event.keysym: case 'Tab': self.key_count = 0 case 'ISO_Left_Tab': self.key_count = 0 case 'BackSpace': if self.key_count > 0: self.key_count -= 1 case 'Up': m = int(self.minute_var.get()) if m < 59: self.minute_var.set(str(m + 1)) self.minute_entry.update() else: self.minute_var.set('00') case 'KP_Up': m = int(self.minute_var.get()) if m < 59: self.minute_var.set(str(m + 1)) else: self.minute_var.set('00') self.minute_entry.update() case 'Down': m = int(self.minute_var.get()) if m > 1: self.minute_var.set(str(m - 1)) else: self.minute_var.set('59') self.minute_entry.update() case 'KP_Down': m = int(self.minute_var.get()) if m > 1: self.minute_var.set(str(m - 1)) else: self.minute_var.set('59') self.minute_entry.update() self.prev_key_press = event.keysym def am_keypress(self, event): """ If the Tab key is pressed when the AM radio button is in focus, skip over the PM button and set the focus on the next_entry field if any has been specified :param event: :return: None """ if event.keysym == 'Tab' and self.next_entry is not None: self.next_entry.focus_set() def validate_time(self, event=None): """ Validate the hour and minute values, display a MessageBox if either is invalid. This method is bound to the Button-1 events on the AM and PM radio buttons :param event: :return: None """ if self.prev_key_press != 'ISO_Left_Tab': valid = True if self.hour_entry.get().isnumeric(): hour = int(self.hour_var.get()) if not 1 <= hour <= 12: self.hour_var.set('') self.hour_entry.select_range(0, END) self.hour_entry.focus_set() valid = False if valid: if self.minute_entry.get().isnumeric(): minute = int(self.minute_var.get()) if not 0 <= minute <= 60: self.minute_var.set('') self.minute_entry.select_range(0, END) self.minute_entry.focus_set() valid = False else: self.minute_var.set('') self.minute_entry.select_range(0, END) self.minute_entry.focus_set() valid = False else: self.hour_var.set('') self.hour_entry.select_range(0, END) self.hour_entry.focus_set() valid = False if valid: if self.next_entry is not None: self.next_entry.focus_set() self.error = False else: if not self.error: dialogs.Messagebox.ok("Time entered is not valid.", "Time Entry Error") self.error = True def get_time(self) -> Optional[time]: """ Builds a datetime.time instance using the values entered to the hour and minute entry fields. :return: datetime.time instance """ if len(self.hour_var.get().strip()) == 0 or len(self.minute_var.get().strip()) == 0: return None else: hour: int = int(self.hour_var.get()) minute: int = int(self.minute_var.get()) ampm: str = self.ampm_var.get() if ampm == "PM": if hour < 12: hour += 12 return time(hour=hour, minute=minute) def get_datetime(self): """ Returns a datetime instance with a zero date. :return: datetime instance :rtype: datetime """ dummy = date(year=2022, month=10, day=23) return utilities.mk_datetime(dummy, self.get_time()) def set_time(self, time_value: time) -> None: """ Set the hour, minute and am/pm fields to the provided time :param time_value: the new value for the time widget :type time_value: time :return: None """ hour: int = time_value.hour minute: int = time_value.minute ampm: str = 'AM' if hour >= 12: hour -= 12 ampm = 'PM' self.hour_var.set(str(hour)) self.minute_var.set(str(minute)) self.ampm_var.set(ampm) def set_datetime(self, dt_value: datetime): dt, tm = utilities.split_datetime(dt_value) self.set_time(tm)
PypiClean
/nanorpc-0.0.3.tar.gz/nanorpc-0.0.3/README.md
# NanoRPC NanoRPC is a Python library for interacting with Nano cryptocurrency nodes via their JSON-RPC API. It provides an easy-to-use interface to execute RPC (Remote Procedure Call) commands supported by different versions of Nano nodes. ## Features - Compatible with Python 3.7 and higher. - Supports asynchronous operations using `asyncio` and `aiohttp`. - Automatically adjusts available RPC methods based on the version of the connected Nano node. - Provides a simple and intuitive API for executing RPC commands. ## Requirements - Python 3.7 and higher - aiohttp library ## Installation You can install NanoRPC using `pip`: ```bash pip install nanorpc ``` ## Usage Here's a basic example of how to use NanoRPC to interact with a Nano node: ```python import asyncio from nanorpc.client import NanoRpc, NodeVersion async def main(): # Connect to a Nano node rpc = NanoRpc(url='http://localhost:7076', node_version=NodeVersion.V25_0) # Execute an RPC command block_count = await rpc.block_count() print(f"Current block count: {block_count}") # Execute another RPC command version_info = await rpc.version() print(f"Node version: {version_info}") #pass mandatory input as args* and optional as kwargs* account = "nano_3msc38fyn67pgio16dj586pdrceahtn75qgnx7fy19wscixrc8dbb3abhbw6" account_info = await rpc.account_info(account, include_confirmed="true", representative="true", receivable="true", weight="true") print(f"Account info: {account_info}") # Run the main function within an asyncio event loop asyncio.run(main()) ``` ### Error Handling NanoRPC includes several error handling mechanisms. If an RPC command fails or if a network issue is encountered, the library will retry the request for a specified number of times. If all retries are exhausted, the library will raise a `MaxRetriesExceededError`. ### Available RPC Commands NanoRPC provides access to various RPC commands based on the connected Nano node version. For a complete list of available commands and their parameters, please refer to the [versions folder](./nanorpc/versions) in this repository. ### Official RPC Documentation For detailed documentation on the Nano RPC protocol and available commands, please visit the [Nano RPC Documentation](https://docs.nano.org/commands/rpc-protocol/) page. ## Contributing Contributions to NanoRPC are welcome! If you find any issues, have suggestions, or would like to contribute enhancements or new features, please open an issue or submit a pull request. ## License NanoRPC is licensed under the MIT License. See the [LICENSE](LICENSE) file for more information.
PypiClean
/acryl-datahub-0.10.0.2rc2.tar.gz/acryl-datahub-0.10.0.2rc2/src/datahub_provider/hooks/datahub.py
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union from airflow.exceptions import AirflowException from airflow.hooks.base import BaseHook from datahub.metadata.com.linkedin.pegasus2avro.mxe import ( MetadataChangeEvent, MetadataChangeProposal, ) if TYPE_CHECKING: from airflow.models.connection import Connection from datahub.emitter.kafka_emitter import DatahubKafkaEmitter from datahub.emitter.rest_emitter import DatahubRestEmitter from datahub.ingestion.sink.datahub_kafka import KafkaSinkConfig class DatahubRestHook(BaseHook): """ Creates a DataHub Rest API connection used to send metadata to DataHub. Takes the endpoint for your DataHub Rest API in the Server Endpoint(host) field. URI example: :: AIRFLOW_CONN_DATAHUB_REST_DEFAULT='datahub-rest://rest-endpoint' :param datahub_rest_conn_id: Reference to the DataHub Rest connection. :type datahub_rest_conn_id: str """ conn_name_attr = "datahub_rest_conn_id" default_conn_name = "datahub_rest_default" conn_type = "datahub_rest" hook_name = "DataHub REST Server" def __init__(self, datahub_rest_conn_id: str = default_conn_name) -> None: super().__init__() self.datahub_rest_conn_id = datahub_rest_conn_id @staticmethod def get_connection_form_widgets() -> Dict[str, Any]: return {} @staticmethod def get_ui_field_behaviour() -> Dict: """Returns custom field behavior""" return { "hidden_fields": ["port", "schema", "login"], "relabeling": { "host": "Server Endpoint", }, } def _get_config(self) -> Tuple[str, Optional[str], Optional[int]]: conn: "Connection" = self.get_connection(self.datahub_rest_conn_id) host = conn.host if host is None: raise AirflowException("host parameter is required") password = conn.password timeout_sec = conn.extra_dejson.get("timeout_sec") return (host, password, timeout_sec) def make_emitter(self) -> "DatahubRestEmitter": import datahub.emitter.rest_emitter return datahub.emitter.rest_emitter.DatahubRestEmitter(*self._get_config()) def emit_mces(self, mces: List[MetadataChangeEvent]) -> None: emitter = self.make_emitter() for mce in mces: emitter.emit_mce(mce) def emit_mcps(self, mcps: List[MetadataChangeProposal]) -> None: emitter = self.make_emitter() for mce in mcps: emitter.emit_mcp(mce) class DatahubKafkaHook(BaseHook): """ Creates a DataHub Kafka connection used to send metadata to DataHub. Takes your kafka broker in the Kafka Broker(host) field. URI example: :: AIRFLOW_CONN_DATAHUB_KAFKA_DEFAULT='datahub-kafka://kafka-broker' :param datahub_kafka_conn_id: Reference to the DataHub Kafka connection. :type datahub_kafka_conn_id: str """ conn_name_attr = "datahub_kafka_conn_id" default_conn_name = "datahub_kafka_default" conn_type = "datahub_kafka" hook_name = "DataHub Kafka Sink" def __init__(self, datahub_kafka_conn_id: str = default_conn_name) -> None: super().__init__() self.datahub_kafka_conn_id = datahub_kafka_conn_id @staticmethod def get_connection_form_widgets() -> Dict[str, Any]: return {} @staticmethod def get_ui_field_behaviour() -> Dict: """Returns custom field behavior""" return { "hidden_fields": ["port", "schema", "login", "password"], "relabeling": { "host": "Kafka Broker", }, } def _get_config(self) -> "KafkaSinkConfig": import datahub.ingestion.sink.datahub_kafka conn = self.get_connection(self.datahub_kafka_conn_id) obj = conn.extra_dejson obj.setdefault("connection", {}) if conn.host is not None: if "bootstrap" in obj["connection"]: raise AirflowException( "Kafka broker specified twice (present in host and extra)" ) obj["connection"]["bootstrap"] = ":".join( map(str, filter(None, [conn.host, conn.port])) ) config = datahub.ingestion.sink.datahub_kafka.KafkaSinkConfig.parse_obj(obj) return config def make_emitter(self) -> "DatahubKafkaEmitter": import datahub.emitter.kafka_emitter sink_config = self._get_config() return datahub.emitter.kafka_emitter.DatahubKafkaEmitter(sink_config) def emit_mces(self, mces: List[MetadataChangeEvent]) -> None: emitter = self.make_emitter() errors = [] def callback(exc, msg): if exc: errors.append(exc) for mce in mces: emitter.emit_mce_async(mce, callback) emitter.flush() if errors: raise AirflowException(f"failed to push some MCEs: {errors}") def emit_mcps(self, mcps: List[MetadataChangeProposal]) -> None: emitter = self.make_emitter() errors = [] def callback(exc, msg): if exc: errors.append(exc) for mcp in mcps: emitter.emit_mcp_async(mcp, callback) emitter.flush() if errors: raise AirflowException(f"failed to push some MCPs: {errors}") class DatahubGenericHook(BaseHook): """ Emits Metadata Change Events using either the DatahubRestHook or the DatahubKafkaHook. Set up a DataHub Rest or Kafka connection to use. :param datahub_conn_id: Reference to the DataHub connection. :type datahub_conn_id: str """ def __init__(self, datahub_conn_id: str) -> None: super().__init__() self.datahub_conn_id = datahub_conn_id def get_underlying_hook(self) -> Union[DatahubRestHook, DatahubKafkaHook]: conn = self.get_connection(self.datahub_conn_id) # We need to figure out the underlying hook type. First check the # conn_type. If that fails, attempt to guess using the conn id name. if conn.conn_type == DatahubRestHook.conn_type: return DatahubRestHook(self.datahub_conn_id) elif conn.conn_type == DatahubKafkaHook.conn_type: return DatahubKafkaHook(self.datahub_conn_id) elif "rest" in self.datahub_conn_id: return DatahubRestHook(self.datahub_conn_id) elif "kafka" in self.datahub_conn_id: return DatahubKafkaHook(self.datahub_conn_id) else: raise AirflowException( f"DataHub cannot handle conn_type {conn.conn_type} in {conn}" ) def make_emitter(self) -> Union["DatahubRestEmitter", "DatahubKafkaEmitter"]: return self.get_underlying_hook().make_emitter() def emit_mces(self, mces: List[MetadataChangeEvent]) -> None: return self.get_underlying_hook().emit_mces(mces)
PypiClean
/pyqtgraph-for-dubble-bubble-2016.11.2.tar.gz/pyqtgraph-for-dubble-bubble-2016.11.2/pyqtgraph/widgets/VerticalLabel.py
from ..Qt import QtGui, QtCore __all__ = ['VerticalLabel'] #class VerticalLabel(QtGui.QLabel): #def paintEvent(self, ev): #p = QtGui.QPainter(self) #p.rotate(-90) #self.hint = p.drawText(QtCore.QRect(-self.height(), 0, self.height(), self.width()), QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter, self.text()) #p.end() #self.setMinimumWidth(self.hint.height()) #self.setMinimumHeight(self.hint.width()) #def sizeHint(self): #if hasattr(self, 'hint'): #return QtCore.QSize(self.hint.height(), self.hint.width()) #else: #return QtCore.QSize(16, 50) class VerticalLabel(QtGui.QLabel): def __init__(self, text, orientation='vertical', forceWidth=True): QtGui.QLabel.__init__(self, text) self.forceWidth = forceWidth self.orientation = None self.setOrientation(orientation) def setOrientation(self, o): if self.orientation == o: return self.orientation = o self.update() self.updateGeometry() def paintEvent(self, ev): p = QtGui.QPainter(self) #p.setBrush(QtGui.QBrush(QtGui.QColor(100, 100, 200))) #p.setPen(QtGui.QPen(QtGui.QColor(50, 50, 100))) #p.drawRect(self.rect().adjusted(0, 0, -1, -1)) #p.setPen(QtGui.QPen(QtGui.QColor(255, 255, 255))) if self.orientation == 'vertical': p.rotate(-90) rgn = QtCore.QRect(-self.height(), 0, self.height(), self.width()) else: rgn = self.contentsRect() align = self.alignment() #align = QtCore.Qt.AlignTop|QtCore.Qt.AlignHCenter self.hint = p.drawText(rgn, align, self.text()) p.end() if self.orientation == 'vertical': self.setMaximumWidth(self.hint.height()) self.setMinimumWidth(0) self.setMaximumHeight(16777215) if self.forceWidth: self.setMinimumHeight(self.hint.width()) else: self.setMinimumHeight(0) else: self.setMaximumHeight(self.hint.height()) self.setMinimumHeight(0) self.setMaximumWidth(16777215) if self.forceWidth: self.setMinimumWidth(self.hint.width()) else: self.setMinimumWidth(0) def sizeHint(self): if self.orientation == 'vertical': if hasattr(self, 'hint'): return QtCore.QSize(self.hint.height(), self.hint.width()) else: return QtCore.QSize(19, 50) else: if hasattr(self, 'hint'): return QtCore.QSize(self.hint.width(), self.hint.height()) else: return QtCore.QSize(50, 19) if __name__ == '__main__': app = QtGui.QApplication([]) win = QtGui.QMainWindow() w = QtGui.QWidget() l = QtGui.QGridLayout() w.setLayout(l) l1 = VerticalLabel("text 1", orientation='horizontal') l2 = VerticalLabel("text 2") l3 = VerticalLabel("text 3") l4 = VerticalLabel("text 4", orientation='horizontal') l.addWidget(l1, 0, 0) l.addWidget(l2, 1, 1) l.addWidget(l3, 2, 2) l.addWidget(l4, 3, 3) win.setCentralWidget(w) win.show()
PypiClean
/dimsim-0.2.2.tar.gz/dimsim-0.2.2/README.md
# DimSim - A Chinese Soundex Library (Python version) DimSim is a library developed by the Scalable Knowledge Intelligence team at IBM Almaden Research Center as part of the [SystemT](https://researcher.watson.ibm.com/researcher/view_group.php?id=1264) project. The PyPi project page can be found [here](https://pypi.org/project/dimsim/). It was created in collaboration with IBM Center for Open-Source Data and AI Technologies ([CODAIT](codait.org)). ## Overview We provide a phonetic algorithm for indexing Chinese characters by sound. The technical details can be found in the following [paper](http://aclweb.org/anthology/K18-1043): Min Li, Marina Danilevsky, Sara Noeman and Yunyao Li. *DIMSIM: An Accurate Chinese Phonetic Similarity Algorithm based on Learned High Dimensional Encoding*. CoNLL 2018. In this library, we provide a pre-trained model that can perform the following functions: - Given two Chinese phrases (of the same length), return the phonetic distance of the input phrases. Optionally you can feed in pinyin strings of Chinese phrases too. - Given a Chinese phrase, return its top-k similar (phoentically) Chinese phrases. ## How to install **Dependencies**: - [pypinyin](https://github.com/mozillazg/python-pinyin): used for translating Chinese characters into their correponding pinyins. There are two ways to install this library: - Install from PyPi ```shell pip install dimsim ``` - Download the source code by cloning this repo and compile it yourself. ```shell git clone [email protected]:System-T/DimSim.git cd DimSim/ pip install -e . ``` ## How to use Once you have the package installed you can use it for the two functions as shown below. - Computing phonetic distance of two Chinese phrases. The optional argument `pinyin` (False by default) can be used to provide a pinyin string list directly. See example usage below. ```python import dimsim dist = dimsim.get_distance("大侠","大虾") 0.0002380952380952381 dist = dimsim.get_distance("大侠","大人") 25.001417183349876 dist = dimsim.get_distance(['da4','xia2'],['da4','xia1']], pinyin=True) 0.0002380952380952381 dist = dimsim.get_distance(['da4','xia2'],['da4','ren2']], pinyin=True) 25.001417183349876 ``` *** - Return top-k phonetically similar phrases of a given Chinese phrase. Two parameters: - **mode** controls the character type of the returned Chinese phrases, where 'simplified' represents simplified Chinese and 'traditional' represents traditional Chinese. - **theta** controls the size of search space for the candidate phrases. ```python import dimsim candidates = dimsim.get_candidates("大侠", mode="simplified", theta=1) ['打下', '大虾', '大侠'] candidates = dimsim.get_candidates("粉丝", mode="traditinoal", theta=1) ['門市', '分時', '焚屍', '粉飾', '粉絲'] ``` ## Citation Please cite the library by referencing the published paper: ``` @InProceedings{K18-1043, author = {Li, Min and Danilevsky, Marina and Noeman, Sara and Li, Yunyao}, title = {{DIMSIM:} An Accurate Chinese Phonetic Similarity Algorithm Based on Learned High Dimensional Encoding}, booktitle = {Proceedings of the 22nd Conference on Computational Natural Language Learning}, year = {2018}, publisher = {Association for Computational Linguistics}, pages = {444-453}, location = {Brussels, Belgium}, url = {http://aclweb.org/anthology/K18-1043} } ```
PypiClean
/Orange3-Survival-Analysis-0.5.1.tar.gz/Orange3-Survival-Analysis-0.5.1/README.md
Orange3-Survival-Analysis ========================= Orange3 Survival Analysis add-on for the [Orange3](http://orange.biolab.si) data mining suite. Installation ------------ Install from Orange add-on installer through Options - Add-ons. To install the add-on from source run pip install . To register this add-on with Orange, but keep the code in the development directory (do not copy it to Python's site-packages directory), run pip install -e . Usage ----- After the installation, the widget from this add-on is registered with Orange. To run Orange from the terminal, use orange-canvas or python -m Orange.canvas The new widget appears in the toolbox bar under the section Survival Analysis.
PypiClean
/aliyun-python-sdk-rds-2.7.43.tar.gz/aliyun-python-sdk-rds-2.7.43/aliyunsdkrds/request/v20140815/CheckCloudResourceAuthorizedRequest.py
from aliyunsdkcore.request import RpcRequest from aliyunsdkrds.endpoint import endpoint_data class CheckCloudResourceAuthorizedRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Rds', '2014-08-15', 'CheckCloudResourceAuthorized') self.set_method('POST') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_ResourceOwnerId(self): # Long return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self, ResourceOwnerId): # Long self.add_query_param('ResourceOwnerId', ResourceOwnerId) def get_ResourceGroupId(self): # String return self.get_query_params().get('ResourceGroupId') def set_ResourceGroupId(self, ResourceGroupId): # String self.add_query_param('ResourceGroupId', ResourceGroupId) def get_SecurityToken(self): # String return self.get_query_params().get('SecurityToken') def set_SecurityToken(self, SecurityToken): # String self.add_query_param('SecurityToken', SecurityToken) def get_DBInstanceId(self): # String return self.get_query_params().get('DBInstanceId') def set_DBInstanceId(self, DBInstanceId): # String self.add_query_param('DBInstanceId', DBInstanceId) def get_ResourceOwnerAccount(self): # String return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount) def get_OwnerAccount(self): # String return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self, OwnerAccount): # String self.add_query_param('OwnerAccount', OwnerAccount) def get_OwnerId(self): # Long return self.get_query_params().get('OwnerId') def set_OwnerId(self, OwnerId): # Long self.add_query_param('OwnerId', OwnerId) def get_TargetRegionId(self): # String return self.get_query_params().get('TargetRegionId') def set_TargetRegionId(self, TargetRegionId): # String self.add_query_param('TargetRegionId', TargetRegionId)
PypiClean
/landsatxplore-0.10.tar.gz/landsatxplore-0.10/README.md
# Description ![CLI Demo](https://raw.githubusercontent.com/yannforget/landsatxplore/master/demo.gif?s=0.5) The **landsatxplore** Python package provides an interface to the [EarthExplorer](http://earthexplorer.usgs.gov/) portal to search and download [Landsat Collections](https://landsat.usgs.gov/landsat-collections) scenes through a command-line interface or a Python API. It supports four data sets: `LANDSAT_TM_C1`, `LANDSAT_ETM_C1`, `LANDSAT_8_C1`, and `SENTINEL_2A`. # Quick start Searching for Landsat 5 TM scenes that contains the location (12.53, -1.53) acquired during the year 1995. ``` landsatxplore search --dataset LANDSAT_TM_C1 --location 12.53 -1.53 \ --start 1995-01-01 --end 1995-12-31 ``` Search for Landsat 7 ETM scenes in Brussels with less than 5% of clouds. Save the returned results in a `.csv` file. ``` landsatxplore search --dataset LANDSAT_ETM_C1 \ --location 50.83 4.38 --clouds 5 > results.csv ``` Downloading three Landsat scenes from different datasets in the current directory. ``` landsatxplore download LT51960471995178MPS00 LC80390222013076EDC00 LC82150682015350LGN01 ``` To use the package, Earth Explorer credentials are required ([registration](https://ers.cr.usgs.gov/register)). # Installation The package can be installed using pip. ``` pip install landsatxplore ``` # Usage **landsatxplore** can be used both through its command-line interface and as a Python module. ## Command-line interface ``` landsatxplore --help ``` ``` Usage: landsatxplore [OPTIONS] COMMAND [ARGS]... Options: --help Show this message and exit. Commands: download Download one or several Landsat scenes. search Search for Landsat scenes. ``` ### Credentials Credentials for the Earth Explorer portal can be obtained [here](https://ers.cr.usgs.gov/register/). `--username` and `--password` can be provided as command-line options or as environment variables: ``` shell export LANDSATXPLORE_USERNAME=<your_username> export LANDSATXPLORE_PASSWORD=<your_password> ``` ### Searching ``` landsatxplore search --help ``` ``` Usage: landsatxplore search [OPTIONS] Search for Landsat scenes. Options: -u, --username TEXT EarthExplorer username. -p, --password TEXT EarthExplorer password. -d, --dataset [LANDSAT_TM_C1|LANDSAT_ETM_C1|LANDSAT_8_C1|SENTINEL_2A] EO data set. -l, --location FLOAT... Point of interest (latitude, longitude). -b, --bbox FLOAT... Bounding box (xmin, ymin, xmax, ymax). -c, --clouds INTEGER Max. cloud cover (1-100). -s, --start TEXT Start date (YYYY-MM-DD). -e, --end TEXT End date (YYYY-MM-DD). -o, --output [scene_id|product_id|json|csv] Output format. -m, --limit INTEGER Max. results returned. --help Show this message and exit. ``` ### Downloading ``` landsatxplore download --help ``` ``` Usage: landsatxplore download [OPTIONS] [SCENES]... Download one or several Landsat scenes. Options: -u, --username TEXT EarthExplorer username. -p, --password TEXT EarthExplorer password. -o, --output PATH Output directory (default to current). -t, --timeout INTEGER Download timeout in seconds (default 300s). --help Show this message and exit. ``` ## API ### EarthExplorer API **landsatxplore** provides an interface to the Earth Explorer JSON API. Please refer to the official ([documentation](https://earthexplorer.usgs.gov/inventory/documentation/json-api)) for possible request codes and parameters. #### Basic usage ``` python import landsatxplore.api # Initialize a new API instance and get an access key api = landsatxplore.api.API(username, password) # Perform a request. Results are returned in a dictionnary response = api.request('<request_code>', parameter1=value1, parameter2=value2) # Log out api.logout() ``` #### Searching for scenes ``` python import landsatxplore.api # Initialize a new API instance and get an access key api = landsatxplore.api.API(username, password) # Request scenes = api.search( dataset='LANDSAT_ETM_C1', latitude=19.53, longitude=-1.53, start_date='1995-01-01', end_date='1997-01-01', max_cloud_cover=10) print('{} scenes found.'.format(len(scenes))) for scene in scenes: print(scene['acquisitionDate']) api.logout() ``` Output: ``` 8 scenes found. 1995-05-10 1995-05-26 1995-06-11 1995-06-11 1995-06-27 1995-07-29 1995-08-14 1995-08-14 ``` #### Downloading scenes ``` python from landsatxplore.earthexplorer import EarthExplorer ee = EarthExplorer(username, password) ee.download(scene_id='LT51960471995178MPS00', output_dir='./data') ee.logout() ```
PypiClean
/allenact_plugins-0.5.3.tar.gz/allenact_plugins-0.5.3/allenact_plugins/ithor_plugin/ithor_tasks.py
import random from typing import Dict, Tuple, List, Any, Optional, Union, Sequence, cast import gym import numpy as np from allenact.base_abstractions.misc import RLStepResult from allenact.base_abstractions.sensor import Sensor from allenact.base_abstractions.task import Task from allenact.utils.system import get_logger from allenact_plugins.ithor_plugin.ithor_constants import ( MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, LOOK_DOWN, LOOK_UP, END, ) from allenact_plugins.ithor_plugin.ithor_environment import IThorEnvironment from allenact_plugins.ithor_plugin.ithor_util import round_to_factor class ObjectNaviThorGridTask(Task[IThorEnvironment]): """Defines the object navigation task in AI2-THOR. In object navigation an agent is randomly initialized into an AI2-THOR scene and must find an object of a given type (e.g. tomato, television, etc). An object is considered found if the agent takes an `End` action and the object is visible to the agent (see [here](https://ai2thor.allenai.org/documentation/concepts) for a definition of visibiliy in AI2-THOR). The actions available to an agent in this task are: 1. Move ahead * Moves agent ahead by 0.25 meters. 1. Rotate left / rotate right * Rotates the agent by 90 degrees counter-clockwise / clockwise. 1. Look down / look up * Changes agent view angle by 30 degrees up or down. An agent cannot look more than 30 degrees above horizontal or less than 60 degrees below horizontal. 1. End * Ends the task and the agent receives a positive reward if the object type is visible to the agent, otherwise it receives a negative reward. # Attributes env : The ai2thor environment. sensor_suite: Collection of sensors formed from the `sensors` argument in the initializer. task_info : The task info. Must contain a field "object_type" that specifies, as a string, the goal object type. max_steps : The maximum number of steps an agent can take an in the task before it is considered failed. observation_space: The observation space returned on each step from the sensors. """ _actions = (MOVE_AHEAD, ROTATE_LEFT, ROTATE_RIGHT, LOOK_DOWN, LOOK_UP, END) _CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE: Dict[ Tuple[str, str], List[Tuple[float, float, int, int]] ] = {} def __init__( self, env: IThorEnvironment, sensors: List[Sensor], task_info: Dict[str, Any], max_steps: int, **kwargs, ) -> None: """Initializer. See class documentation for parameter definitions. """ super().__init__( env=env, sensors=sensors, task_info=task_info, max_steps=max_steps, **kwargs ) self._took_end_action: bool = False self._success: Optional[bool] = False self._subsampled_locations_from_which_obj_visible: Optional[ List[Tuple[float, float, int, int]] ] = None self.task_info["followed_path"] = [self.env.get_agent_location()] self.task_info["action_names"] = self.class_action_names() @property def action_space(self): return gym.spaces.Discrete(len(self._actions)) def reached_terminal_state(self) -> bool: return self._took_end_action @classmethod def class_action_names(cls, **kwargs) -> Tuple[str, ...]: return cls._actions def close(self) -> None: self.env.stop() def _step(self, action: Union[int, Sequence[int]]) -> RLStepResult: assert isinstance(action, int) action = cast(int, action) action_str = self.class_action_names()[action] if action_str == END: self._took_end_action = True self._success = self.is_goal_object_visible() self.last_action_success = self._success else: self.env.step({"action": action_str}) self.last_action_success = self.env.last_action_success if ( not self.last_action_success ) and self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE is not None: self.env.update_graph_with_failed_action(failed_action=action_str) self.task_info["followed_path"].append(self.env.get_agent_location()) step_result = RLStepResult( observation=self.get_observations(), reward=self.judge(), done=self.is_done(), info={"last_action_success": self.last_action_success}, ) return step_result def render(self, mode: str = "rgb", *args, **kwargs) -> np.ndarray: assert mode == "rgb", "only rgb rendering is implemented" return self.env.current_frame def is_goal_object_visible(self) -> bool: """Is the goal object currently visible?""" return any( o["objectType"] == self.task_info["object_type"] for o in self.env.visible_objects() ) def judge(self) -> float: """Compute the reward after having taken a step.""" reward = -0.01 if not self.last_action_success: reward += -0.03 if self._took_end_action: reward += 1.0 if self._success else -1.0 return float(reward) def metrics(self) -> Dict[str, Any]: if not self.is_done(): return {} else: return { "success": self._success, **super(ObjectNaviThorGridTask, self).metrics(), } def query_expert(self, **kwargs) -> Tuple[int, bool]: target = self.task_info["object_type"] if self.is_goal_object_visible(): return self.class_action_names().index(END), True else: key = (self.env.scene_name, target) if self._subsampled_locations_from_which_obj_visible is None: if key not in self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE: obj_ids: List[str] = [] obj_ids.extend( o["objectId"] for o in self.env.last_event.metadata["objects"] if o["objectType"] == target ) assert len(obj_ids) != 0, "No objects to get an expert path to." locations_from_which_object_is_visible: List[ Tuple[float, float, int, int] ] = [] y = self.env.last_event.metadata["agent"]["position"]["y"] positions_to_check_interactionable_from = [ {"x": x, "y": y, "z": z} for x, z in set((x, z) for x, z, _, _ in self.env.graph.nodes) ] for obj_id in set(obj_ids): self.env.controller.step( { "action": "PositionsFromWhichItemIsInteractable", "objectId": obj_id, "positions": positions_to_check_interactionable_from, } ) assert ( self.env.last_action_success ), "Could not get positions from which item was interactable." returned = self.env.last_event.metadata["actionReturn"] locations_from_which_object_is_visible.extend( ( round(x, 2), round(z, 2), round_to_factor(rot, 90) % 360, round_to_factor(hor, 30) % 360, ) for x, z, rot, hor, standing in zip( returned["x"], returned["z"], returned["rotation"], returned["horizon"], returned["standing"], ) if standing == 1 ) self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[ key ] = locations_from_which_object_is_visible self._subsampled_locations_from_which_obj_visible = self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[ key ] if len(self._subsampled_locations_from_which_obj_visible) > 5: self._subsampled_locations_from_which_obj_visible = random.sample( self._CACHED_LOCATIONS_FROM_WHICH_OBJECT_IS_VISIBLE[key], 5 ) current_loc_key = self.env.get_key(self.env.last_event.metadata["agent"]) paths = [] for goal_key in self._subsampled_locations_from_which_obj_visible: path = self.env.shortest_state_path( source_state_key=current_loc_key, goal_state_key=goal_key ) if path is not None: paths.append(path) if len(paths) == 0: return 0, False shortest_path_ind = int(np.argmin([len(p) for p in paths])) if len(paths[shortest_path_ind]) == 1: get_logger().warning( "Shortest path computations suggest we are at the target but episode does not think so." ) return 0, False next_key_on_shortest_path = paths[shortest_path_ind][1] return ( self.class_action_names().index( self.env.action_transitioning_between_keys( current_loc_key, next_key_on_shortest_path ) ), True, )
PypiClean
/custom-e-celery-4.0.2.tar.gz/custom-e-celery-4.0.2/celery/worker/consumer/mingle.py
from __future__ import absolute_import, unicode_literals from celery import bootsteps from celery.five import items from celery.utils.log import get_logger from .events import Events __all__ = ['Mingle'] logger = get_logger(__name__) debug, info, exception = logger.debug, logger.info, logger.exception class Mingle(bootsteps.StartStopStep): """Bootstep syncing state with neighbor workers. At startup, or upon consumer restart, this will: - Sync logical clocks. - Sync revoked tasks. """ label = 'Mingle' requires = (Events,) compatible_transports = {'amqp', 'redis'} def __init__(self, c, without_mingle=False, **kwargs): self.enabled = not without_mingle and self.compatible_transport(c.app) super(Mingle, self).__init__( c, without_mingle=without_mingle, **kwargs) def compatible_transport(self, app): with app.connection_for_read() as conn: return conn.transport.driver_type in self.compatible_transports def start(self, c): self.sync(c) def sync(self, c): info('mingle: searching for neighbors') replies = self.send_hello(c) if replies: info('mingle: sync with %s nodes', len([reply for reply, value in items(replies) if value])) [self.on_node_reply(c, nodename, reply) for nodename, reply in items(replies) if reply] info('mingle: sync complete') else: info('mingle: all alone') def send_hello(self, c): inspect = c.app.control.inspect(timeout=1.0, connection=c.connection) our_revoked = c.controller.state.revoked replies = inspect.hello(c.hostname, our_revoked._data) or {} replies.pop(c.hostname, None) # delete my own response return replies def on_node_reply(self, c, nodename, reply): debug('mingle: processing reply from %s', nodename) try: self.sync_with_node(c, **reply) except MemoryError: raise except Exception as exc: # pylint: disable=broad-except exception('mingle: sync with %s failed: %r', nodename, exc) def sync_with_node(self, c, clock=None, revoked=None, **kwargs): self.on_clock_event(c, clock) self.on_revoked_received(c, revoked) def on_clock_event(self, c, clock): c.app.clock.adjust(clock) if clock else c.app.clock.forward() def on_revoked_received(self, c, revoked): if revoked: c.controller.state.revoked.update(revoked)
PypiClean
/AbsBox-0.12.5.tar.gz/AbsBox-0.12.5/absbox/local/component.py
from absbox.local.util import mkTag, DC, mkTs, guess_locale, readTagStr from enum import Enum import itertools import functools import logging import pandas as pd from pyspecter import query, S datePattern = {"月末": "MonthEnd", "季度末": "QuarterEnd", "年末": "YearEnd", "月初": "MonthFirst", "季度初": "QuarterFirst", "年初": "YearFirst", "每年": "MonthDayOfYear", "每月": "DayOfMonth", "每周": "DayOfWeek"} freqMap = {"每月": "Monthly", "每季度": "Quarterly", "每半年": "SemiAnnually", "每年": "Annually", "Monthly": "Monthly", "Quarterly": "Quarterly", "SemiAnnually": "SemiAnnually", "Annually": "Annually", "monthly": "Monthly", "quarterly": "Quarterly", "semiAnnually": "SemiAnnually", "annually": "Annually" } baseMap = {"资产池余额": "CurrentPoolBalance", "资产池期末余额": "CurrentPoolBalance", "资产池期初余额": "CurrentPoolBegBalance", "资产池初始余额": "OriginalPoolBalance", "初始资产池余额": "OriginalPoolBalance", "资产池当期利息": "PoolCollectionInt", "债券余额": "CurrentBondBalance", "债券初始余额": "OriginalBondBalance", "当期已付债券利息": "LastBondIntPaid", "当期已付费用": "LastFeePaid", "当期未付债券利息": "CurrentDueBondInt", "当期未付费用": "CurrentDueFee" } def mkLiq(x): match x: case {"正常余额折价": cf, "违约余额折价": df}: return mkTag(("BalanceFactor", [cf, df])) case {"CurrentFactor": cf, "DefaultFactor": df}: return mkTag(("BalanceFactor", [cf, df])) case {"贴现计价": df, "违约余额回收率": r}: return mkTag(("PV", [df, r])) case {"PV": df, "DefaultRecovery": r}: return mkTag(("PV", [df, r])) def mkDatePattern(x): match x: case ["每月", _d]: return mkTag((datePattern["每月"], _d)) case ["每年", _m, _d]: return mkTag((datePattern["每年"], [_m, _d])) case ["DayOfMonth", _d]: return mkTag(("DayOfMonth", _d)) case ["MonthDayOfYear", _m, _d]: return mkTag(("MonthDayOfYear", _m, _d)) case ["CustomDate", *_ds]: return mkTag(("CustomDate", _ds)) case ["AllDatePattern", *_dps]: return mkTag(("AllDatePattern", [ mkDatePattern(_) for _ in _dps])) case _x if (_x in datePattern.values()): return mkTag((_x)) case _x if (_x in datePattern.keys()): return mkTag((datePattern[x])) case _: raise RuntimeError(f"Failed to match {x}") def mkDate(x): match x: case {"封包日": a, "起息日": b, "首次兑付日": c, "法定到期日": d, "收款频率": pf, "付款频率": bf} | \ {"cutoff": a, "closing": b, "firstPay": c, "stated": d, "poolFreq": pf, "payFreq": bf}: firstCollection = x.get("首次归集日", b) mr = x.get("循环结束日", None) return mkTag(("PreClosingDates", [a, b, mr, d, [firstCollection, mkDatePattern(pf)], [c, mkDatePattern(bf)]])) case {"归集日": (lastCollected, nextCollect), "兑付日": (pp, np), "法定到期日": c, "收款频率": pf, "付款频率": bf} | \ {"collect": (lastCollected, nextCollect), "pay": (pp, np), "stated": c, "poolFreq": pf, "payFreq": bf}: mr = x.get("循环结束日", None) return mkTag(("CurrentDates", [[lastCollected, pp], mr, c, [nextCollect, mkDatePattern(pf)], [np, mkDatePattern(bf)]])) case {"回款日": cdays, "分配日": ddays, "封包日": cutoffDate, "起息日": closingDate} | \ {"poolCollection": cdays, "distirbution": ddays, "cutoff": cutoffDate, "closing": closingDate}: return mkTag(("CustomDates", [cutoffDate, [mkTag(("PoolCollection", [cd, ""])) for cd in cdays], closingDate, [mkTag(("RunWaterfall", [dd, ""])) for dd in ddays]])) case _: raise RuntimeError(f"Failed to match:{x}") def mkFeeType(x): match x: case {"年化费率": [base, rate]} | {"annualPctFee": [base, rate]}: return mkTag(("AnnualRateFee", [mkTag((baseMap[base], '1970-01-01')), rate])) case {"百分比费率": [*desc, rate]} | {"pctFee": [*desc, rate]}: match desc: case ["资产池回款", "利息"] | ["poolCollection", "interest"]: return mkTag(("PctFee", [mkTag(("PoolCollectionIncome", "CollectedInterest")), rate])) case ["已付利息合计", *bns] | ["paidInterest", *bns]: return mkTag(("PctFee", [mkTag(("LastBondIntPaid", bns)), rate])) case ["已付本金合计", *bns] | ["paidPrincipal", *bns]: return mkTag(("PctFee", [mkTag(("LastBondPrinPaid", bns)), rate])) case _: raise RuntimeError(f"Failed to match on 百分比费率:{desc,rate}") case {"固定费用": amt} | {"fixFee": amt}: return mkTag(("FixFee", amt)) case {"周期费用": [p, amt]} | {"recurFee": [p, amt]}: return mkTag(("RecurFee", [mkDatePattern(p), amt])) case {"自定义": fflow} | {"customFee": fflow}: return mkTag(("FeeFlow", mkTs("BalanceCurve", fflow))) case {"计数费用": [p, s, amt]} | {"numFee": [p, s, amt]}: return mkTag(("NumFee", [mkDatePattern(p), mkDs(s), amt])) case _: raise RuntimeError(f"Failed to match on fee type:{x}") def mkDateVector(x): match x: case dp if isinstance(dp, str): return mkTag(datePattern[dp]) case [dp, *p] if (dp in datePattern.keys()): return mkTag((datePattern[dp], p)) case _: raise RuntimeError(f"not match found: {x}") def mkDs(x): "Making Deal Stats" match x: case ("债券余额",) | ("bondBalance",): return mkTag("CurrentBondBalance") case ("债券余额", *bnds) | ("bondBalance", *bnds): return mkTag(("CurrentBondBalanceOf", bnds)) case ("初始债券余额",) | ("originalBondBalance",): return mkTag("OriginalBondBalance") case ("到期月份", bn) | ("monthsTillMaturity", bn): return mkTag(("MonthsTillMaturity", bn)) case ("资产池余额",) | ("poolBalance",): return mkTag("CurrentPoolBalance") case ("初始资产池余额",) | ("originalPoolBalance",): return mkTag("OriginalPoolBalance") case ("资产池违约余额",) | ("currentPoolDefaultedBalance",): return mkTag("CurrentPoolDefaultedBalance") case ("资产池累积违约余额",) | ("cumPoolDefaultedBalance",): return mkTag("CumulativePoolDefaultedBalance") case ("资产池累积违约率",) | ("cumPoolDefaultedRate",): return mkTag("CumulativePoolDefaultedRate") case ("债券系数",) | ("bondFactor",): return mkTag("BondFactor") case ("资产池系数",) | ("poolFactor",): return mkTag("PoolFactor") case ("所有账户余额",) | ("accountBalance"): return mkTag("AllAccBalance") case ("账户余额", *ans) | ("accountBalance", *ans): return mkTag(("AccBalance", ans)) case ("债券待付利息", *bnds) | ("bondDueInt", *bnds): return mkTag(("CurrentDueBondInt", bnds)) case ("债券已付利息", *bnds) | ("lastBondIntPaid", *bnds): return mkTag(("LastBondIntPaid", bnds)) case ("债券低于目标余额", bn) | ("behindTargetBalance", bn): return mkTag(("BondBalanceGap", bn)) case ("债务人数量",) | ("borrowerNumber",): return mkTag(("CurrentPoolBorrowerNum")) # , "当期已付债券利息":"LastBondIntPaid" # , "当期已付费用" :"LastFeePaid" # , "当期未付债券利息" :"CurrentDueBondInt" # , "当期未付费用": "CurrentDueFee" case ("待付费用", *fns) | ("feeDue", *fns): return mkTag(("CurrentDueFee", fns)) case ("已付费用", *fns) | ("lastFeePaid", *fns): return mkTag(("LastFeePaid", fns)) case ("系数", ds, f) | ("factor", ds, f): return mkTag(("Factor", [mkDs(ds), f])) case ("Min", ds1, ds2): return mkTag(("Min", [mkDs(ds1), mkDs(ds2)])) case ("Max", ds1, ds2): return mkTag(("Max", [mkDs(ds1), mkDs(ds2)])) case ("合计", *ds) | ("sum", *ds): return mkTag(("Sum", [mkDs(_ds) for _ds in ds])) case ("差额", *ds) | ("substract", *ds): return mkTag(("Substract", [mkDs(_ds) for _ds in ds])) case ("常数", n) | ("constant", n): return mkTag(("Constant", n)) case ("储备账户缺口", *accs) | ("reserveGap", *accs): return mkTag(("ReserveAccGap", accs)) case ("自定义", n) | ("custom", n): return mkTag(("UseCustomData", n)) case legacy if (legacy in baseMap.keys()): return mkDs((legacy,)) case _: raise RuntimeError(f"Failed to match DS/Formula: {x}") def isPre(x): try: return mkPre(x) is not None except RuntimeError as e: return False def mkPre(p): def isIntQuery(y): match y: case ("monthsTillMaturity", _): return True case ("到期月份", _): return True case _: return False dealStatusMap = {"摊还": "Current", "加速清偿": "Accelerated", "循环": "Revolving"} match p: case [ds, "=", n]: if isIntQuery(ds): return mkTag(("IfEqInt", [mkDs(ds), n])) else: return mkTag(("IfEqBal", [mkDs(ds), n])) case [ds, ">", amt]: if isIntQuery(ds): return mkTag(("IfGTInt", [mkDs(ds), amt])) else: return mkTag(("IfGT", [mkDs(ds), amt])) case [ds, "<", amt]: if isIntQuery(ds): return mkTag(("IfLTInt", [mkDs(ds), amt])) else: return mkTag(("IfLT", [mkDs(ds), amt])) case [ds, ">=", amt]: if isIntQuery(ds): return mkTag(("IfGETInt", [mkDs(ds), amt])) else: return mkTag(("IfGET", [mkDs(ds), amt])) case [ds, "<=", amt]: if isIntQuery(ds): return mkTag(("IfLETInt", [mkDs(ds), amt])) else: return mkTag(("IfLET", [mkDs(ds), amt])) case [ds, "=", 0]: return mkTag(("IfZero", mkDs(ds))) case [">", _d]: return mkTag(("IfAfterDate", _d)) case ["<", _d]: return mkTag(("IfBeforeDate", _d)) case [">=", _d]: return mkTag(("IfAfterOnDate", _d)) case ["<=", _d]: return mkTag(("IfBeforeOnDate", _d)) case ["状态", _st] | ["status", _st]: return mkTag(("IfDealStatus", mkStatus(_st))) case ["同时满足", _p1, _p2] | ["all", _p1, _p2]: return mkTag(("And", mkPre(_p1), mkPre(_p2))) case ["任一满足", _p1, _p2] | ["any", _p1, _p2]: return mkTag(("Or", mkPre(_p1), mkPre(_p2))) case _: raise RuntimeError(f"Failed to match on Pre: {p}") def mkAccInt(x): match x: case {"周期": _dp, "利率": idx, "利差": spd, "最近结息日": lsd} \ | {"period": _dp, "index": idx, "spread": spd, "lastSettleDate": lsd}: return mkTag(("InvestmentAccount", [idx, spd, lsd, mkDateVector(_dp)])) case {"周期": _dp, "利率": br, "最近结息日": lsd} \ | {"period": _dp, "rate": br, "lastSettleDate": lsd}: return mkTag(("BankAccount", [br, lsd, mkDateVector(_dp)])) case None: return None case _: raise RuntimeError( f"Failed to match on account interest definition: {x}") def mkAccType(x): match x: case {"固定储备金额": amt} | {"fixReserve": amt}: return mkTag(("FixReserve", amt)) case {"目标储备金额": [base, rate]} | {"targetReserve": [base, rate]}: match base: case ["合计", *qs] | ["Sum", *qs]: sumDs = [mkDs(q) for q in qs] return mkTag(("PctReserve", [mkTag(("Sum", sumDs)), rate])) case _: return mkTag(("PctReserve", [mkDs(base), rate])) case {"目标储备金额": {"公式": ds, "系数": rate}} | {"targetReserve": {"formula": ds, "factor": rate}}: return mkTag(("PctReserve", [mkDs(ds), rate])) case {"目标储备金额": {"公式": ds}} | {"targetReserve": {"formula": ds}}: return mkTag(("PctReserve", [mkDs(ds), 1.0])) case {"较高": [a, b]} | {"max": [a, b]}: return mkTag(("Max", [mkAccType(a), mkAccType(b)])) case {"较低": [a, b]} | {"min": [a, b]}: return mkTag(("Min", [mkAccType(a), mkAccType(b)])) case {"分段": [p, a, b]} | {"When": [p, a, b]}: return mkTag(("Either", [mkPre(p), mkAccType(a), mkAccType(b)])) case None: return None case _: raise RuntimeError(f"Failed to match {x} for account reserve type") def mkAccTxn(xs): "AccTxn T.Day Balance Amount Comment" if xs is None: return None else: return [mkTag(("AccTxn", x)) for x in xs] def mkAcc(an, x): match x: case {"余额": b, "类型": t, "计息": i, "记录": tx} | {"balance": b, "type": t, "interest": i, "txn": tx}: return {"accBalance": b, "accName": an, "accType": mkAccType(t), "accInterest": mkAccInt(i), "accStmt": mkAccTxn(tx)} case {"余额": b} | {"balance": b}: return mkAcc(an, x | {"计息": x.get("计息", None), "interest": x.get("interest", None), "记录": x.get("记录", None), "txn": x.get("txn", None), "类型": x.get("类型", None), "type": x.get("type", None)}) case _: raise RuntimeError(f"Failed to match account: {an},{x}") def mkBondType(x): match x: case {"固定摊还": schedule} | {"PAC": schedule}: return mkTag(("PAC", mkTag(("BalanceCurve", schedule)))) case {"过手摊还": None} | {"Sequential": None}: return mkTag(("Sequential")) case {"锁定摊还": _after} | {"Lockout": _after}: return mkTag(("Lockout", _after)) case {"权益": _} | {"Equity": _}: return mkTag(("Equity")) case _: raise RuntimeError(f"Failed to match bond type: {x}") def mkRateReset(x): match x: case {"重置期间": interval, "起始": sdate} | {"resetInterval": interval, "starts": sdate}: return mkTag(("ByInterval", [freqMap[interval], sdate])) case {"重置期间": interval} | {"resetInterval": interval}: return mkTag(("ByInterval", [freqMap[interval], None])) case {"重置月份": monthOfYear} | {"resetMonth": monthOfYear}: return mkTag(("MonthOfYear", monthOfYear)) case _: raise RuntimeError(f"Failed to match:{x}: mkRateReset") def mkBondRate(x): indexMapping = {"LPR5Y": "LPR5Y", "LIBOR1M": "LIBOR1M"} match x: case {"浮动": [_index, Spread, resetInterval], "日历": dc} | \ {"floater": [_index, Spread, resetInterval], "dayCount": dc}: return mkTag(("Floater", [indexMapping[_index], Spread, mkRateReset(resetInterval), dc, None, None])) case {"浮动": [_index, Spread, resetInterval]} | {"floater": [_index, Spread, resetInterval]}: return mkBondRate(x | {"日历": DC.DC_ACT_365F.value, "dayCount": DC.DC_ACT_365F.value}) case {"固定": _rate, "日历": dc} | {"fix": _rate, "dayCount": dc}: return mkTag(("Fix", [_rate, dc])) case {"固定": _rate} | {"Fixed": _rate}: return mkTag(("Fix", [_rate, DC.DC_ACT_365F.value])) case {"期间收益": _yield}: return mkTag(("InterestByYield", _yield)) case _: raise RuntimeError(f"Failed to match bond rate type:{x}") def mkBnd(bn, x): match x: case {"当前余额": bndBalance, "当前利率": bndRate, "初始余额": originBalance, "初始利率": originRate, "起息日": originDate, "利率": bndInterestInfo, "债券类型": bndType} | \ {"balance": bndBalance, "rate": bndRate, "originBalance": originBalance, "originRate": originRate, "startDate": originDate, "rateType": bndInterestInfo, "bondType": bndType}: md = x.get("到期日", None) or x.get("maturityDate", None) return {bn: {"bndName": bn, "bndBalance": bndBalance, "bndRate": bndRate, "bndOriginInfo": {"originBalance": originBalance, "originDate": originDate, "originRate": originRate} | {"maturityDate": md}, "bndInterestInfo": mkBondRate(bndInterestInfo), "bndType": mkBondType(bndType), "bndDuePrin": 0, "bndDueInt": 0, "bndDueIntDate": None}} case _: raise RuntimeError(f"Failed to match bond:{bn},{x}:mkBnd") def mkLiqMethod(x): match x: case ["正常|违约", a, b] | ["Cuurent|Defaulted", a, b]: return mkTag(("BalanceFactor", [a, b])) case ["正常|拖欠|违约", a, b, c] | ["Cuurent|Delinquent|Defaulted", a, b, c]: return mkTag(("BalanceFactor2", [a, b, c])) case ["贴现|违约", a, b] | ["PV|Defaulted", a, b]: return mkTag(("PV", [a, b])) case _: raise RuntimeError(f"Failed to match {x}:mkLiqMethod") def mkFeeCapType(x): match x: case {"应计费用百分比": pct} | {"duePct": pct}: return mkTag(("DuePct", pct)) case {"应计费用上限": amt} | {"dueCapAmt": amt}: return mkTag(("DueCapAmt", amt)) case _: raise RuntimeError(f"Failed to match {x}:mkFeeCapType") def mkPDA(x): match x: case {"公式": ds} | {"formula": ds}: return mkTag(("DS", mkDs(ds))) case _: raise RuntimeError(f"Failed to match {x}:mkPDA") def mkAccountCapType(x): match x: case {"余额百分比": pct} | {"balPct": pct}: return mkTag(("DuePct", pct)) case {"金额上限": amt} | {"balCapAmt": amt}: return mkTag(("DueCapAmt", amt)) case _: raise RuntimeError(f"Failed to match {x}:mkAccountCapType") def mkTransferLimit(x): match x: case {"余额百分比": pct} | {"balPct": pct}: return mkTag(("DuePct", pct)) case {"金额上限": amt} | {"balCapAmt": amt}: return mkTag(("DueCapAmt", amt)) case {"公式": "ABCD"}: return mkTag(("Formula", "ABCD")) case {"公式": formula} | {"formula": formula}: return mkTag(("DS", mkDs(formula))) case _: raise RuntimeError(f"Failed to match :{x}:mkTransferLimit") def mkAction(x): match x: case ["账户转移", source, target] | ["transfer", source, target]: return mkTag(("Transfer", [source, target])) case ["按公式账户转移", _limit, source, target] | ["transferBy", _limit, source, target]: return mkTag(("TransferBy", [mkTransferLimit(_limit), source, target])) case ["计提费用", *feeNames] | ["calcFee", *feeNames]: return mkTag(("CalcFee", feeNames)) case ["计提利息", *bndNames] | ["calcInt", *bndNames]: return mkTag(("CalcBondInt", bndNames)) case ["支付费用", source, target] | ["payFee", source, target]: return mkTag(("PayFee", [source, target])) case ["支付费用收益", source, target, _limit] | ["payFeeResidual", source, target, _limit]: limit = mkAccountCapType(_limit) return mkTag(("PayFeeResidual", [limit, source, target])) case ["支付费用收益", source, target] | ["payFeeResidual", source, target]: return mkTag(("PayFeeResidual", [None, source, target])) case ["支付费用限额", source, target, _limit] | ["payFeeBy", source, target, _limit]: limit = mkFeeCapType(_limit) return mkTag(("PayFeeBy", [limit, source, target])) case ["支付利息", source, target] | ["payInt", source, target]: return mkTag(("PayInt", [source, target])) case ["支付本金", source, target, _limit] | ["payPrin", source, target, _limit]: pda = mkPDA(_limit) return mkTag(("PayPrinBy", [pda, source, target])) case ["支付本金", source, target] | ["payPrin", source, target]: return mkTag(("PayPrin", [source, target])) case ["支付剩余本金", source, target] | ["payPrinResidual", source, target]: return mkTag(("PayPrinResidual", [source, target])) case ["支付期间收益", source, target]: return mkTag(("PayTillYield", [source, target])) case ["支付收益", source, target, limit] | ["payResidual", source, target, limit]: return mkTag(("PayResidual", [limit, source, target])) case ["支付收益", source, target] | ["payResidual", source, target]: return mkTag(("PayResidual", [None, source, target])) case ["储备账户转移", source, target, satisfy] | ["transferReserve", source, target, satisfy]: _map = {"源储备": "Source", "目标储备": "Target"} return mkTag(("TransferReserve", [_map[satisfy], source, target])) case ["出售资产", liq, target] | ["sellAsset", liq, target]: return mkTag(("LiquidatePool", [mkLiqMethod(liq), target])) case ["流动性支持", source, target, limit] | ["liqSupport", source, target, limit]: return mkTag(("LiqSupport", [mkTag(("DS", mkDs(limit))), source, target])) case ["流动性支持", source, target] | ["liqSupport", source, target]: return mkTag(("LiqSupport", [None, source, target])) case ["流动性支持偿还", source, target] | ["liqRepay", source, target]: return mkTag(("LiqRepay", [None, source, target])) case ["流动性支持报酬", source, target] | ["liqRepayResidual", source, target]: return mkTag(("LiqYield", [None, source, target])) case ["流动性支持计提", target] | ["liqAccrue", target]: return mkTag(("LiqAccrue", target)) case _: raise RuntimeError(f"Failed to match :{x}:mkAction") def mkWaterfall2(x): match x: case (pre, *_action) if isPre(pre) and len(x) > 2: # pre with multiple actions _pre = mkPre(pre) return [[_pre, mkAction(a)] for a in _action] case (pre, _action) if isPre(pre) and len(x) == 2: # pre with 1 actions _pre = mkPre(pre) return [[_pre, mkAction(_action)]] case _: return [[None, mkAction(x)]] def mkStatus(x): match x: case "摊销" | "Amortizing": return mkTag(("Amortizing")) case "循环" | "Revolving": return mkTag(("Revolving")) case "加速清偿" | "Accelerated": return mkTag(("DealAccelerated", None)) case "违约" | "Defaulted": return mkTag(("DealDefaulted", None)) case "结束" | "Ended": return mkTag(("Ended")) case "设计" | "PreClosing": return mkTag(("PreClosing")) case _: raise RuntimeError(f"Failed to match :{x}:mkStatus") def readStatus(x, locale): m = {"en": {'amort': "Amortizing", 'def': "Defaulted", 'acc': "Accelerated", 'end': "Ended", 'pre': "PreClosing"}, "cn": {'amort': "摊销", 'def': "违约", 'acc': "加速清偿", 'end': "结束", 'pre': "设计"}} match x: case {"tag": "Amortizing"}: return m[locale]['amort'] case {"tag": "DealAccelerated"}: return m[locale]['acc'] case {"tag": "DealDefaulted"}: return m[locale]['def'] case {"tag": "Ended"}: return m[locale]['end'] case {"tag": "PreClosing"}: return m[locale]['pre'] case _: raise RuntimeError( f"Failed to read deal status:{x} with locale: {locale}") def mkWhenTrigger(x): match x: case "回收后" | "BeforeCollect": return "BeginCollectionWF" case "回收动作后" | "AfterCollect": return "EndCollectionWF" case "分配前" | "BeforeDistribution": return "BeginDistributionWF" case "分配后" | "AfterDistribution": return "EndDistributionWF" case _: raise RuntimeError(f"Failed to match :{x}:mkWhenTrigger") def mkThreshold(x): match x: case ">": return "Above" case ">=": return "EqAbove" case "<": return "Below" case "<=": return "EqBelow" case _: raise RuntimeError(f"Failed to match :{x}:mkThreshold") def _rateTypeDs(x): h = x[0] if h in set(["资产池累积违约率", "cumPoolDefaultedRate", "债券系数", "bondFactor", "资产池系数", "poolFactor"]): return True return False def mkTrigger(x): match x: case [">", _d]: return mkTag(("AfterDate", _d)) case [">=", _d]: return mkTag(("AfterOnDate", _d)) case ["到期日未兑付", _bn] | ["passMaturity", _bn]: return mkTag(("PassMaturityDate", _bn)) case ["所有满足", *trgs] | ["all", *trgs]: return mkTag(("AllTrigger", [mkTrigger(t) for t in trgs])) case ["任一满足", *trgs] | ["any", *trgs]: return mkTag(("AnyTrigger", [mkTrigger(t) for t in trgs])) case ["一直", b] | ["always", b]: return mkTag(("Always", b)) case [ds, cmp, v] if (isinstance(v, float) and _rateTypeDs(ds)): return mkTag(("ThresholdRate", [mkThreshold(cmp), mkDs(ds), v])) case [ds, cmp, ts] if _rateTypeDs(ds): return mkTag(("ThresholdRateCurve", [mkThreshold(cmp), mkDs(ds), mkTs("ThresholdCurve", ts)])) case [ds, cmp, v] if (isinstance(v, float) or isinstance(v, int)): return mkTag(("ThresholdBal", [mkThreshold(cmp), mkDs(ds), v])) case [ds, cmp, ts]: return mkTag(("ThresholdBalCurve", [mkThreshold(cmp), mkDs(ds), mkTs("ThresholdCurve", ts)])) case _: raise RuntimeError(f"Failed to match :{x}:mkTrigger") def mkTriggerEffect(x): match x: case ("新状态", s) | ("newStatus", s): return mkTag(("DealStatusTo", mkStatus(s))) case ["计提费用", *fn] | ["accrueFees", *fn]: return mkTag(("DoAccrueFee", fn)) case ["新增事件", trg] | ["newTrigger", trg]: return mkTag(("AddTrigger", mkTrigger(trg))) case ["结果", *efs] | ["Effects", *efs]: return mkTag(("TriggerEffects", [mkTriggerEffect(e) for e in efs])) case _: raise RuntimeError(f"Failed to match :{x}:mkTriggerEffect") def mkWaterfall(r, x): mapping = { "未违约": "Amortizing", "摊销": "Amortizing", "循环": "Revolving", "加速清偿": "DealAccelerated", "违约": "DealDefaulted", "未设立": "PreClosing", } if len(x) == 0: return {k: list(v) for k, v in r.items()} _k, _v = x.popitem() _w_tag = None match _k: case ("兑付日", "加速清偿") | ("amortizing", "accelerated"): _w_tag = f"DistributionDay (DealAccelerated Nothing)" case ("兑付日", "违约") | ("amortizing", "defaulted"): _w_tag = f"DistributionDay (DealDefaulted Nothing)" case ("兑付日", _st) | ("amortizing", _st): _w_tag = f"DistributionDay {mapping.get(_st,_st)}" case "兑付日" | "未违约" | "amortizing": _w_tag = f"DistributionDay Amortizing" case "清仓回购" | "cleanUp": _w_tag = "CleanUp" case "回款日" | "回款后" | "endOfCollection": _w_tag = f"EndOfPoolCollection" case "设立日" | "closingDay": _w_tag = f"OnClosingDay" case _: raise RuntimeError(f"Failed to match :{x}:mkWaterfall") r[_w_tag] = itertools.chain.from_iterable([mkWaterfall2(_a) for _a in _v]) return mkWaterfall(r, x) def mkAssetRate(x): match x: case ["固定", r] | ["fix", r]: return mkTag(("Fix", r)) case ["浮动", r, {"基准": idx, "利差": spd, "重置频率": p}]: return mkTag(("Floater", [idx, spd, r, freqMap[p], None])) case ["floater", r, {"index": idx, "spread": spd, "reset": p}]: return mkTag(("Floater", [idx, spd, r, freqMap[p], None])) case ["Floater", r, {"index": idx, "spread": spd, "reset": p}]: return mkTag(("Floater", [idx, spd, r, freqMap[p], None])) case _: raise RuntimeError(f"Failed to match {x}:mkAssetRate") def mkAmortPlan(x) -> dict: match x: case "等额本息" | "Level" | "level": return mkTag("Level") case "等额本金" | "Even" | "even": return mkTag("Even") case "先息后本" | "I_P" | "i_p": return mkTag("I_P") case "等本等费" | "F_P" | "f_p": return mkTag("F_P") case _: raise RuntimeError(f"Failed to match AmortPlan {x}:mkAmortPlan") def mkAsset(x): _statusMapping = {"正常": mkTag(("Current")), "违约": mkTag(("Defaulted", None)), "current": mkTag(("Current")), "defaulted": mkTag(("Defaulted", None)), "Current": mkTag(("Current")), "Defaulted": mkTag(("Defaulted", None)) } match x: case ["按揭贷款", {"放款金额": originBalance, "放款利率": originRate, "初始期限": originTerm, "频率": freq, "类型": _type, "放款日": startDate}, {"当前余额": currentBalance, "当前利率": currentRate, "剩余期限": remainTerms, "状态": status}] | \ ["Mortgage", {"originBalance": originBalance, "originRate": originRate, "originTerm": originTerm, "freq": freq, "type": _type, "originDate": startDate}, {"currentBalance": currentBalance, "currentRate": currentRate, "remainTerm": remainTerms, "status": status}]: borrowerNum1 = x[2].get("borrowerNum", None) borrowerNum2 = x[2].get("借款数量", None) return mkTag(("Mortgage", [ {"originBalance": originBalance, "originRate": mkAssetRate(originRate), "originTerm": originTerm, "period": freqMap[freq], "startDate": startDate, "prinType": mkAmortPlan(_type) } | mkTag("MortgageOriginalInfo"), currentBalance, currentRate, remainTerms, (borrowerNum1 or borrowerNum2), _statusMapping[status]])) case ["贷款", {"放款金额": originBalance, "放款利率": originRate, "初始期限": originTerm, "频率": freq, "类型": _type, "放款日": startDate}, {"当前余额": currentBalance, "当前利率": currentRate, "剩余期限": remainTerms, "状态": status}] \ | ["Loan", {"originBalance": originBalance, "originRate": originRate, "originTerm": originTerm, "freq": freq, "type": _type, "originDate": startDate}, {"currentBalance": currentBalance, "currentRate": currentRate, "remainTerm": remainTerms, "status": status}]: return mkTag(("PersonalLoan", [ {"originBalance": originBalance, "originRate": mkAssetRate(originRate), "originTerm": originTerm, "period": freqMap[freq], "startDate": startDate, "prinType": mkAmortPlan(_type) } | mkTag("LoanOriginalInfo"), currentBalance, currentRate, remainTerms, _statusMapping[status]])) case ["分期", {"放款金额": originBalance, "放款费率": originRate, "初始期限": originTerm, "频率": freq, "类型": _type, "放款日": startDate, "剩余期限": remainTerms}, {"当前余额": currentBalance, "状态": status}] \ | ["Installment", {"originBalance": originBalance, "feeRate": originRate, "originTerm": originTerm, "freq": freq, "type": _type, "originDate": startDate, "remainTerm": remainTerms}, {"currentBalance": currentBalance, "status": status}]: return mkTag(("Installment", [ {"originBalance": originBalance, "originRate": mkAssetRate(originRate), "originTerm": originTerm, "period": freqMap[freq], "startDate": startDate, "prinType": mkAmortPlan(_type) } | mkTag("LoanOriginalInfo"), currentBalance, remainTerms, _statusMapping[status]])) case ["租赁", {"固定租金": dailyRate, "初始期限": originTerm, "频率": dp, "起始日": startDate, "状态": status, "剩余期限": remainTerms}] \ | ["Lease", {"fixRental": dailyRate, "originTerm": originTerm, "freq": dp, "originDate": startDate, "status": status, "remainTerm": remainTerms}]: return mkTag(("RegularLease", [{"originTerm": originTerm, "startDate": startDate, "paymentDates": mkDatePattern(dp), "originRental": dailyRate} | mkTag("LeaseInfo"), 0, remainTerms, _statusMapping[status]])) case ["租赁", {"初始租金": dailyRate, "初始期限": originTerm, "频率": dp, "起始日": startDate, "计提周期": accDp, "涨幅": rate, "状态": status, "剩余期限": remainTerms}] \ | ["Lease", {"initRental": dailyRate, "originTerm": originTerm, "freq": dp, "originDate": startDate, "accrue": accDp, "pct": rate, "status": status, "remainTerm": remainTerms}]: dailyRatePlan = None _stepUpType = "curve" if isinstance(rate, list) else "constant" if _stepUpType == "constant": dailyRatePlan = mkTag( ("FlatRate", [mkDatePattern(accDp), rate])) else: dailyRatePlan = mkTag( ("ByRateCurve", [mkDatePattern(accDp), rate])) return mkTag(("StepUpLease", [{"originTerm": originTerm, "startDate": startDate, "paymentDates": mkDatePattern(dp), "originRental": dailyRate} | mkTag("LeaseInfo"), dailyRatePlan, 0, remainTerms, _statusMapping[status]])) case _: raise RuntimeError(f"Failed to match {x}:mkAsset") def identify_deal_type(x): match x: case {"pool": {"assets": [{'tag': 'PersonalLoan'}, *rest]}}: return "LDeal" case {"pool": {"assets": [{'tag': 'Mortgage'}, *rest]}}: return "MDeal" case {"pool": {"assets": [], "futureCf": cfs}} if cfs[0]['tag'] == 'MortgageFlow': return "MDeal" case {"pool": {"assets": [{'tag': 'Installment'}, *rest]}}: return "IDeal" case {"pool": {"assets": [{'tag': 'Lease'}, *rest]}} | {"pool": {"assets": [{'tag': 'RegularLease'}, *rest]}}: return "RDeal" case {"pool": {"assets": [{'tag': 'StepUpLease'}, *rest]}}: return "RDeal" case _: raise RuntimeError(f"Failed to identify deal type {x}") def mkCallOptions(x): match x: case {"资产池余额": bal} | {"poolBalance": bal}: return mkTag(("PoolBalance", bal)) case {"债券余额": bal} | {"bondBalance": bal}: return mkTag(("BondBalance", bal)) case {"资产池余额剩余比率": factor} | {"poolFactor": factor}: return mkTag(("PoolFactor", factor)) case {"债券余额剩余比率": factor} | {"bondFactor": factor}: return mkTag(("BondFactor", factor)) case {"指定日之后": d} | {"afterDate": d}: return mkTag(("AfterDate", d)) case {"任意满足": xs} | {"or": xs}: return mkTag(("Or", [mkCallOptions(_x) for _x in xs])) case {"全部满足": xs} | {"and": xs}: return mkTag(("And", [mkCallOptions(_x) for _x in xs])) case _: raise RuntimeError(f"Failed to match {x}:mkCallOptions") def mkAssumption(x) -> dict: match x: case {"CPR": cpr} if isinstance(cpr, list): return mkTag(("PrepaymentVec", cpr)) case {"CDR": cdr} if isinstance(cdr, list): return mkTag(("DefaultVec", cdr)) case {"CPR": cpr}: return mkTag(("PrepaymentCPR", cpr)) case {"CPR调整": [*cprAdj, ed]} | {"CPRAdjust": [*cprAdj, ed]}: return mkTag(("PrepaymentFactors", mkTs("FactorCurveClosed", [cprAdj, ed]))) case {"CDR": cdr}: return mkTag(("DefaultCDR", cdr)) case {"CDR调整": [*cdrAdj, ed]} | {"CDRAdjust": [*cdrAdj, ed]}: return mkTag(("DefaultFactors", mkTs("FactorCurveClosed", [cdrAdj, ed]))) case {"回收": (rr, rlag)} | {"Recovery": (rr, rlag)}: return mkTag(("Recovery", (rr, rlag))) case {"利率": [idx, rate]} if isinstance(rate, float): return mkTag(("InterestRateConstant", [idx, rate])) case {"Rate": [idx, rate]} if isinstance(rate, float): return mkTag(("InterestRateConstant", [idx, rate])) case {"利率": [idx, *rateCurve]} | {"Rate": [idx, *rateCurve]}: return mkTag(("InterestRateCurve", [idx, *rateCurve])) case {"清仓": opts} | {"CleanUp": opts}: return mkTag(("CallWhen", [mkCallOptions(co) for co in opts])) case {"停止": d} | {"StopAt": d}: return mkTag(("StopRunBy", d)) case {"租赁截止日": d} | {"LeaseProjectEnd": d}: return mkTag(("LeaseProjectionEnd", d)) case {"租赁年涨幅": r} | {"LeaseAnnualIncreaseRate": r} if not isinstance(r, list): return mkTag(("LeaseBaseAnnualRate", r)) case {"租赁年涨幅": r} | {"LeaseAnnualIncreaseRate": r}: return mkTag(("LeaseBaseCurve", mkTs("FloatCurve", r))) case {"租赁间隔": n} | {"LeaseGapDays": n}: return mkTag(("LeaseGapDays", n)) case {"租赁间隔表": (tbl, n)} | {"LeaseGapDaysByAmount": (tbl, n)}: return mkTag(("LeaseGapDaysByAmount", [tbl, n])) case {"查看":inspects} | {"Inspect":inspects}: inspectVars = [ [mkDatePattern(dp),mkDs(ds)] for dp,ds in inspects ] return mkTag(("InspectOn", inspectVars)) case _: raise RuntimeError(f"Failed to match {x}:Assumption") def mkAssumpList(xs): return [mkAssumption(x) for x in xs] def mkAssumption2(x) -> dict: match x: case (assetAssumpList, dealAssump) if isinstance(x, tuple): return mkTag(("ByIndex", [[(ids, mkAssumpList(aps)) for ids, aps in assetAssumpList], mkAssumpList(dealAssump)])) case xs if isinstance(xs, list): return mkTag(("PoolLevel", mkAssumpList(xs))) case None: return None case _: raise RuntimeError(f"Failed to match {x}:mkAssumption2") def mkPool(x): mapping = {"LDeal": "LPool", "MDeal": "MPool", "IDeal": "IPool", "RDeal": "RPool"} match x: case {"清单": assets, "封包日": d} | {"assets": assets, "cutoffDate": d}: _pool = {"assets": [mkAsset(a) for a in assets], "asOfDate": d} _pool_asset_type = identify_deal_type({"pool": _pool}) return mkTag((mapping[_pool_asset_type], _pool)) case _: raise RuntimeError(f"Failed to match {x}:mkPool") def mkCustom(x): match x: case {"常量": n} | {"Constant": n}: return mkTag(("CustomConstant", n)) case {"余额曲线": ts} | {"BalanceCurve": ts}: return mkTag(("CustomCurve", mkTs("BalanceCurve", ts))) case {"公式": ds} | {"Formula": ds}: return mkTag(("CustomDS", mkDs(ds))) def mkLiqProviderType(x): match x: case {"总额度": amt} | {"Total": amt}: return mkTag(("FixSupport")) case {"日期": dp, "限额": amt} | {"Reset": dp, "Quota": amt}: return mkTag(("ReplenishSupport", [mkDatePattern(dp), amt])) case {"日期": dp, "公式": ds,"系数":pct} | {"Reset": dp, "Formula":ds, "Pct":pct}: return mkTag(("ByPct", [mkDatePattern(dp),mkDs(ds),pct])) case {}: return mkTag(("UnLimit")) case _: raise RuntimeError(f"Failed to match LiqProvider Type:{x}") def mkLiqProviderRate(x): match x: case {"fixRate":r ,"rateAccDates":rateAccDates,"lastAccDate":lastAccDate} | \ {"固定利率":r ,"结息日":rateAccDates,"上次结息日":lastAccDate} : return mkTag(("FixRate",[mkDatePattern(rateAccDates),r,lastAccDate])) case _: return None def mkLiqProvider(n, x): match x: case {"类型": "无限制", "起始日": _sd, **p} \ | {"type": "Unlimited", "start": _sd, **p}: return {"liqName": n, "liqType": mkLiqProviderType({}) , "liqBalance": None, "liqCredit": p.get("已提供", 0) | p.get("credit",0), "liqStart": _sd ,"liqRate":mkLiqProviderRate(p)} case {"类型": _sp, "额度": _ab, "起始日": _sd, **p} \ | {"type": _sp, "lineOfCredit": _ab, "start": _sd, **p}: return {"liqName": n, "liqType": mkLiqProviderType(_sp) , "liqBalance": _ab, "liqCredit": p.get("已提供", 0) | p.get("credit",0), "liqStart": _sd ,"liqRate":mkLiqProviderRate(p)} case {"额度": _ab, "起始日": _sd, **p} \ | {"lineOfCredit": _ab, "start": _sd, **p}: return {"liqName": n, "liqType": mkTag(("FixSupport")) , "liqBalance": _ab, "liqCredit": p.get("已提供", 0) | p.get("credit",0), "liqStart": _sd ,"liqRate":mkLiqProviderRate(p)} case _: raise RuntimeError(f"Failed to match LiqProvidere:{x}") def mkCf(x): if len(x) == 0: return None else: return [mkTag(("MortgageFlow", _x+[0.0]*5+[None])) for _x in x] def mkCollection(xs): sourceMapping = {"利息回款": "CollectedInterest", "本金回款": "CollectedPrincipal", "早偿回款": "CollectedPrepayment", "回收回款": "CollectedRecoveries", "租金回款": "CollectedRental", "CollectedInterest": "CollectedInterest", "CollectedPrincipal": "CollectedPrincipal", "CollectedPrepayment": "CollectedPrepayment", "CollectedRecoveries": "CollectedRecoveries", "CollectedRental": "CollectedRental" } return [[sourceMapping[x], acc] for (x, acc) in xs] def mkAccTxn(xs): "AccTxn T.Day Balance Amount Comment" if xs is None: return None else: return [mkTag(("AccTxn", x)) for x in xs] def mk(x): match x: case ["资产", assets]: return {"assets": [mkAsset(a) for a in assets]} case ["账户", accName, attrs] | ["account", accName, attrs]: return {accName: mkAcc(accName, attrs)} case ["费用", feeName, {"类型": feeType, **fi}] \ | ["fee", feeName, {"type": feeType, **fi}]: return {feeName: {"feeName": feeName, "feeType": mkFeeType(feeType), "feeStart": fi.get("起算日", None), "feeDueDate": fi.get("计算日", None), "feeDue": 0, "feeArrears": 0, "feeLastPaidDay": None}} case ["债券", bndName, bnd] | ["bond", bndName, bnd]: return mkBnd(bndName, bnd) case ["归集规则", collection]: return mkCollection(collection) def mkPricingAssump(x): match x: case {"贴现日": pricingDay, "贴现曲线": xs} | {"PVDate": pricingDay, "PVCurve": xs}: return mkTag(("DiscountCurve", [pricingDay, mkTs("IRateCurve", xs)])) case {"债券": bnd_with_price, "利率曲线": rdps} | {"bonds": bnd_with_price, "curve": rdps}: return mkTag(("RunZSpread", [mkTs("IRateCurve", rdps), bnd_with_price])) case _: raise RuntimeError(f"Failed to match pricing assumption: {x}") def readPricingResult(x, locale) -> dict: if x is None: return None h = None tag = query(x, [S.MVALS, S.ALL, "tag"])[0] if tag == "PriceResult": h = {"cn": ["估值", "票面估值", "WAL", "久期", "凸性", "应计利息"], "en": ["pricing", "face", "WAL", "duration", "convexity", "accure interest"]} elif tag == "ZSpread": h = {"cn": ["静态利差"], "en": ["Z-spread"]} else: raise RuntimeError( f"Failed to read princing result: {x} with tag={tag}") return pd.DataFrame.from_dict({k: v['contents'] for k, v in x.items()}, orient='index', columns=h[locale]).sort_index() def readRunSummary(x, locale) -> dict: def filter_by_tags(xs, tags): tags_set = set(tags) return [ x for x in xs if x['tag'] in tags_set] r = {} if x is None: return None bndStatus = {'cn': ["本金违约", "利息违约", "起算余额"], 'en': [ "Balance Defaults", "Interest Defaults", "Original Balance"]} bond_defaults = [(_['contents'][0], _['tag'], _['contents'][1], _['contents'][2]) for _ in x if _['tag'] in set(['BondOutstanding', 'BondOutstandingInt'])] _fmap = {"cn": {'BondOutstanding': "本金违约", "BondOutstandingInt": "利息违约"}, "en": { 'BondOutstanding': "Balance Defaults", "BondOutstandingInt": "Interest Defaults"}} bndNames = set([y[0] for y in bond_defaults]) bndSummary = pd.DataFrame(columns=bndStatus[locale], index=list(bndNames)) for bn, amt_type, amt, begBal in bond_defaults: bndSummary.loc[bn][_fmap[locale][amt_type]] = amt bndSummary.loc[bn][bndStatus[locale][2]] = begBal bndSummary.fillna(0, inplace=True) bndSummary["Total"] = bndSummary[bndStatus[locale][0]] + \ bndSummary[bndStatus[locale][1]] r['bonds'] = bndSummary dealStatusLog = {'cn': ["日期", "旧状态", "新状态"], 'en': ["Date", "From", "To"]} status_change_logs = [(_['contents'][0], readStatus(_['contents'][1], locale), readStatus(_['contents'][2], locale)) for _ in x if _['tag'] in set(['DealStatusChangeTo'])] r['status'] = pd.DataFrame(data=status_change_logs, columns=dealStatusLog[locale]) # inspection variables def uplift_ds(df): ds_name = readTagStr(df['DealStats'].iloc[0]) df.drop(columns=["DealStats"],inplace=True) df.rename(columns={"Value":ds_name},inplace=True) df.set_index("Date",inplace=True) return df inspect_vars = filter_by_tags(x, ["InspectBal"]) inspect_df = pd.DataFrame(data = [ (c['contents'][0],str(c['contents'][1]),c['contents'][2]) for c in inspect_vars ] ,columns=["Date","DealStats","Value"]) grped_inspect_df = inspect_df.groupby("DealStats") r['inspect'] = {readTagStr(k):uplift_ds(v) for k,v in grped_inspect_df} return r def aggAccs(x, locale): _header = { "cn": {"idx": "日期", "change": "变动额", "bal": ("期初余额", '余额', "期末余额")}, "en": {"idx": "date", "change": "change", "bal": ("begin balance", 'balance', "end balance")} } header = _header[locale] agg_acc = {} for k, v in x.items(): acc_by_date = v.groupby(header["idx"]) acc_txn_amt = acc_by_date.agg(change=(header["change"], sum)) ending_bal_column = acc_by_date.last( )[header["bal"][1]].rename(header["bal"][2]) begin_bal_column = ending_bal_column.shift(1).rename(header["bal"][0]) agg_acc[k] = acc_txn_amt.join([begin_bal_column, ending_bal_column]) if agg_acc[k].empty: agg_acc[k].columns = header["bal"][0], header['change'], header["bal"][2] continue fst_idx = agg_acc[k].index[0] agg_acc[k].at[fst_idx, header["bal"][0]] = round( agg_acc[k].at[fst_idx, header["bal"][2]] - agg_acc[k].at[fst_idx, header['change']], 2) agg_acc[k] = agg_acc[k][[header["bal"][0], header['change'], header["bal"][2]]] return agg_acc def readIssuance(pool): _map = {'cn': "发行", 'en': "Issuance"} lang_flag = None if '发行' in pool.keys(): lang_flag = 'cn' elif 'Issuance' in pool.keys(): lang_flag = 'en' else: return None validIssuanceFields = { "资产池规模": "IssuanceBalance", "IssuanceBalance": "IssuanceBalance" } r = {} for k, v in pool[_map[lang_flag]].items(): if k in validIssuanceFields: r[validIssuanceFields[k]] = v else: logging.warning( "Key {k} is not in pool fields {validIssuanceFields.keys()}") return r def show(r, x="full"): ''' show cashflow of SPV during the projection ''' def _map(y): if y == 'cn': return {"agg_accounts": "账户", "fees": "费用", "bonds": "债券", "pool": "资产池", "idx": "日期"} else: return {"agg_accounts": "Accounts", "fees": "Fees", "bonds": "Bonds", "pool": "Pool", "idx": "date"} _comps = ['agg_accounts', 'fees', 'bonds'] dfs = {c: pd.concat(r[c].values(), axis=1, keys=r[c].keys()) for c in _comps if r[c]} locale = guess_locale(r) _m = _map(locale) dfs2 = {} for k, v in dfs.items(): dfs2[_m[k]] = pd.concat([v], keys=[_m[k]], axis=1) agg_pool = pd.concat([r['pool']['flow']], axis=1, keys=[_m["pool"]]) agg_pool = pd.concat([agg_pool], axis=1, keys=[_m["pool"]]) _full = functools.reduce(lambda acc, x: acc.merge( x, how='outer', on=[_m["idx"]]), [agg_pool]+list(dfs2.values())) match x: case "full": return _full.loc[:, [_m["pool"]]+list(dfs2.keys())].sort_index() case "cash": return None # "" def flow_by_scenario(rs, flowpath, annotation=True, aggFunc=None, rnd=2): "pull flows from multiple scenario" scenario_names = rs.keys() locale = guess_locale(list(rs.values())[0]) def _map(y): if y == 'cn': return {"idx": "日期"} else: return {"idx": "date"} m = _map(locale) dflow = None aggFM = {"max": pd.Series.max, "sum": pd.Series.sum, "min": pd.Series.min} if aggFunc is None: dflows = [query(rs, [s]+flowpath) for s in scenario_names] else: dflows = [query(rs, [s]+flowpath).groupby(m['idx']).aggregate( aggFM.get(aggFunc, aggFunc)) for s in scenario_names] if annotation: dflows = [f.rename(f"{s}({flowpath[-1]})") for (s, f) in zip(scenario_names, dflows)] try: return pd.concat(dflows, axis=1).round(rnd) except ValueError as e: return f"need to pass function to `aggFunc` to aggregate duplication rows, options: Min/Max/Sum "
PypiClean
/das7pad-dns-lexicon-3.5.1.post1.tar.gz/das7pad-dns-lexicon-3.5.1.post1/lexicon/providers/cloudns.py
from __future__ import absolute_import import logging import requests from lexicon.providers.base import Provider as BaseProvider LOGGER = logging.getLogger(__name__) NAMESERVER_DOMAINS = ["cloudns.net"] def provider_parser(subparser): """Configure provider parser for CloudNS""" identity_group = subparser.add_mutually_exclusive_group() identity_group.add_argument("--auth-id", help="specify user id for authentication") identity_group.add_argument( "--auth-subid", help="specify subuser id for authentication" ) identity_group.add_argument( "--auth-subuser", help="specify subuser name for authentication" ) subparser.add_argument( "--auth-password", help="specify password for authentication" ) subparser.add_argument("--weight", help="specify the SRV record weight") subparser.add_argument("--port", help="specify the SRV record port") class Provider(BaseProvider): """Provider class for CloudNS""" def __init__(self, config): super(Provider, self).__init__(config) self.domain_id = None self.api_endpoint = "https://api.cloudns.net" def _authenticate(self): payload = self._get("/dns/get-zone-info.json", {"domain-name": self.domain}) self.domain_id = payload["name"] LOGGER.debug("authenticate: %s", payload) def _create_record(self, rtype, name, content): # Skip execution if such a record already exists existing_records = self._list_records(rtype, name, content) if existing_records: return True # Build parameters for adding a new record params = { "domain-name": self.domain_id, "record-type": rtype, "host": self._relative_name(name), "record": content, } if self._get_lexicon_option("ttl"): params["ttl"] = self._get_lexicon_option("ttl") if self._get_lexicon_option("priority"): params["priority"] = self._get_lexicon_option("priority") if self._get_provider_option("weight"): params["weight"] = self._get_lexicon_option("weight") if self._get_provider_option("port"): params["port"] = self._get_lexicon_option("port") # Add new record by calling the ClouDNS API payload = self._post("/dns/add-record.json", params) LOGGER.debug("create_record: %s", payload) # Error handling is already covered by self._request return True def _list_records(self, rtype=None, name=None, content=None): # Build parameters to make use of the built-in API filtering params = {"domain-name": self.domain_id} if rtype: params["type"] = rtype if name: params["host"] = self._relative_name(name) # Fetch and parse all records for the given zone payload = self._get("/dns/records.json", params) payload = payload if not isinstance(payload, list) else {} records = [] for record in payload.values(): records.append( { "type": record["type"], "name": self._full_name(record["host"]), "ttl": record["ttl"], "content": record["record"], "id": record["id"], } ) # Filter by content manually as API does not support that if content: records = [record for record in records if record["content"] == content] # Print records as debug output and return them LOGGER.debug("list_records: %s", records) return records def _update_record(self, identifier, rtype=None, name=None, content=None): # Try to find record if no identifier was specified if not identifier: identifier = self._find_record_identifier(rtype, name, None) # Build parameters for updating an existing record params = {"domain-name": self.domain_id, "record-id": identifier} if name: params["host"] = self._relative_name(name) if content: params["record"] = content if self._get_lexicon_option("ttl"): params["ttl"] = self._get_lexicon_option("ttl") if self._get_lexicon_option("priority"): params["priority"] = self._get_lexicon_option("priority") if self._get_provider_option("weight"): params["weight"] = self._get_provider_option("weight") if self._get_provider_option("port"): params["port"] = self._get_provider_option("port") # Update existing record by calling the ClouDNS API payload = self._post("/dns/mod-record.json", params) LOGGER.debug("update_record: %s", payload) # Error handling is already covered by self._request return True def _delete_record(self, identifier=None, rtype=None, name=None, content=None): # Try to find record if no identifier was specified delete_record_id = [] if not identifier: records = self._list_records(rtype, name, content) delete_record_id = [record["id"] for record in records] else: delete_record_id.append(identifier) LOGGER.debug("delete_records: %s", delete_record_id) for record_id in delete_record_id: # Delete existing record by calling the ClouDNS API self._post( "/dns/delete-record.json", {"domain-name": self.domain_id, "record-id": record_id}, ) LOGGER.debug("delete_record: %s", True) # Error handling is already covered by self._request return True def _build_authentication_data(self): if not self._get_provider_option("auth_password"): raise Exception( "No valid authentication data passed, expected: auth-password" ) if self._get_provider_option("auth_id"): return { "auth-id": self._get_provider_option("auth_id"), "auth-password": self._get_provider_option("auth_password"), } if self._get_provider_option("auth_subid"): return { "sub-auth-id": self._get_provider_option("auth_subid"), "auth-password": self._get_provider_option("auth_password"), } if self._get_provider_option("auth_subuser"): return { "sub-auth-user": self._get_provider_option("auth_subuser"), "auth-password": self._get_provider_option("auth_password"), } if ( self._get_provider_option("auth_id") or self._get_provider_option("auth_subid") or self._get_provider_option("auth_subuser") ): # All the options were passed with a fallback value, return an empty dictionary. return {} raise Exception( "No valid authentication data passed, expected: auth-id, auth-subid, auth-subuser" ) def _find_record_identifier(self, rtype, name, content): records = self._list_records(rtype, name, content) LOGGER.debug("records: %s", records) if len(records) == 1: return records[0]["id"] raise Exception("Record identifier could not be found.") def _request(self, action="GET", url="/", data=None, query_params=None): # Set default values for missing arguments data = data if data else {} query_params = query_params if query_params else {} # Merge authentication data into request if action == "GET": query_params.update(self._build_authentication_data()) else: data.update(self._build_authentication_data()) # Fire request against ClouDNS API and parse result as JSON response = requests.request( action, self.api_endpoint + url, params=query_params, data=data ) response.raise_for_status() payload = response.json() # Check ClouDNS specific status code and description if ( "status" in payload and "statusDescription" in payload and payload["status"] != "Success" ): raise Exception( "ClouDNS API request has failed: " + payload["statusDescription"] ) # Return payload return payload
PypiClean
/bigml_sensenet-0.7.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl/sensenet/models/settings.py
from sensenet.constants import WARP, PAD, CROP COLOR_SPACES = ["bgr", "rgb", "bgra", "rgba"] COLOR_SPACES += [f.upper() for f in COLOR_SPACES] OPTIONAL = { "bounding_box_threshold": [1e-8, 1.0], "color_space": COLOR_SPACES, "extract_image_features": bool, "iou_threshold": [1e-8, 1.0], "load_pretrained_weights": bool, "max_objects": int, "output_unfiltered_boxes": bool, "pad_bounding_box_output": bool, "regression_normalize": bool, "rescale_type": [WARP, PAD, CROP], } REQUIRED = {} class Settings(object): _required_attributes = REQUIRED _attribute_validators = {} _attribute_validators.update(OPTIONAL) _attribute_validators.update(REQUIRED) def __init__(self, amap): for key in self.__class__._attribute_validators.keys(): if key not in amap: self.__setattr__(key, None) else: self.__setattr__(key, amap[key]) for key in sorted(amap.keys()): if key not in self.__class__._attribute_validators: raise AttributeError('"%s" is not a valid field' % key) def __setattr__(self, name, value): if name not in self.__class__._attribute_validators: raise AttributeError('"%s" is not a valid field' % name) if value is not None: validator = self.__class__._attribute_validators[name] if type(validator) == list: if len(validator) == 2 and type(validator[0]) in [int, float]: assert validator[0] <= value <= validator[1], (name, value) elif validator[0] == list: assert type(value) == list, (name, value) for v in value: assert type(v) == validator[1] elif validator[0] == dict: assert type(value) == dict, (name, value) ktype, vtype = validator[1] for key in value: assert type(key) == ktype, (name, ktype, key) assert type(value[key]) == vtype, (vtype, value[key]) else: assert value in validator, (name, value, validator) elif type(validator) == type: assert type(value) == validator, (name, value, validator) else: raise ValueError('Validator is "%s"' % str(validator)) super().__setattr__(name, value) def __getattribute__(self, name): value = super().__getattribute__(name) if value is None and name in self.__class__._required_attributes: raise AttributeError('"%s" not in settings and is required' % name) return value def ensure_settings(avalue): if type(avalue) == Settings: return avalue elif type(avalue) == dict: return Settings(avalue) elif avalue is None: return Settings({}) else: raise ValueError('settings input type is "%s"' % type(avalue))
PypiClean
/kabirrec-1.0.11.tar.gz/kabirrec-1.0.11/dataset/ml-1m/README
SUMMARY ================================================================================ These files contain 1,000,209 anonymous ratings of approximately 3,900 movies made by 6,040 MovieLens users who joined MovieLens in 2000. USAGE LICENSE ================================================================================ Neither the University of Minnesota nor any of the researchers involved can guarantee the correctness of the data, its suitability for any particular purpose, or the validity of results based on the use of the data set. The data set may be used for any research purposes under the following conditions: * The user may not state or imply any endorsement from the University of Minnesota or the GroupLens Research Group. * The user must acknowledge the use of the data set in publications resulting from the use of the data set (see below for citation information). * The user may not redistribute the data without separate permission. * The user may not use this information for any commercial or revenue-bearing purposes without first obtaining permission from a faculty member of the GroupLens Research Project at the University of Minnesota. If you have any further questions or comments, please contact GroupLens <[email protected]>. CITATION ================================================================================ To acknowledge use of the dataset in publications, please cite the following paper: F. Maxwell Harper and Joseph A. Konstan. 2015. The MovieLens Datasets: History and Context. ACM Transactions on Interactive Intelligent Systems (TiiS) 5, 4, Article 19 (December 2015), 19 pages. DOI=http://dx.doi.org/10.1145/2827872 ACKNOWLEDGEMENTS ================================================================================ Thanks to Shyong Lam and Jon Herlocker for cleaning up and generating the data set. FURTHER INFORMATION ABOUT THE GROUPLENS RESEARCH PROJECT ================================================================================ The GroupLens Research Project is a research group in the Department of Computer Science and Engineering at the University of Minnesota. Members of the GroupLens Research Project are involved in many research projects related to the fields of information filtering, collaborative filtering, and recommender systems. The project is lead by professors John Riedl and Joseph Konstan. The project began to explore automated collaborative filtering in 1992, but is most well known for its world wide trial of an automated collaborative filtering system for Usenet news in 1996. Since then the project has expanded its scope to research overall information filtering solutions, integrating in content-based methods as well as improving current collaborative filtering technology. Further information on the GroupLens Research project, including research publications, can be found at the following web site: http://www.grouplens.org/ GroupLens Research currently operates a movie recommender based on collaborative filtering: http://www.movielens.org/ RATINGS FILE DESCRIPTION ================================================================================ All ratings are contained in the file "ratings.dat" and are in the following format: UserID::MovieID::Rating::Timestamp - UserIDs range between 1 and 6040 - MovieIDs range between 1 and 3952 - Ratings are made on a 5-star scale (whole-star ratings only) - Timestamp is represented in seconds since the epoch as returned by time(2) - Each user has at least 20 ratings USERS FILE DESCRIPTION ================================================================================ User information is in the file "users.dat" and is in the following format: UserID::Gender::Age::Occupation::Zip-code All demographic information is provided voluntarily by the users and is not checked for accuracy. Only users who have provided some demographic information are included in this data set. - Gender is denoted by a "M" for male and "F" for female - Age is chosen from the following ranges: * 1: "Under 18" * 18: "18-24" * 25: "25-34" * 35: "35-44" * 45: "45-49" * 50: "50-55" * 56: "56+" - Occupation is chosen from the following choices: * 0: "other" or not specified * 1: "academic/educator" * 2: "artist" * 3: "clerical/admin" * 4: "college/grad student" * 5: "customer service" * 6: "doctor/health care" * 7: "executive/managerial" * 8: "farmer" * 9: "homemaker" * 10: "K-12 student" * 11: "lawyer" * 12: "programmer" * 13: "retired" * 14: "sales/marketing" * 15: "scientist" * 16: "self-employed" * 17: "technician/engineer" * 18: "tradesman/craftsman" * 19: "unemployed" * 20: "writer" MOVIES FILE DESCRIPTION ================================================================================ Movie information is in the file "movies.dat" and is in the following format: MovieID::Title::Genres - Titles are identical to titles provided by the IMDB (including year of release) - Genres are pipe-separated and are selected from the following genres: * Action * Adventure * Animation * Children's * Comedy * Crime * Documentary * Drama * Fantasy * Film-Noir * Horror * Musical * Mystery * Romance * Sci-Fi * Thriller * War * Western - Some MovieIDs do not correspond to a movie due to accidental duplicate entries and/or test entries - Movies are mostly entered by hand, so errors and inconsistencies may exist
PypiClean
/ceilometer-18.0.0.0rc1.tar.gz/ceilometer-18.0.0.0rc1/HACKING.rst
Ceilometer Style Commandments ============================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Ceilometer Specific Commandments -------------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up.
PypiClean
/batoid-0.3.7.tar.gz/batoid-0.3.7/pybind11/tools/mkdoc.py
import os import sys import platform import re import textwrap from clang import cindex from clang.cindex import CursorKind from collections import OrderedDict from glob import glob from threading import Thread, Semaphore from multiprocessing import cpu_count RECURSE_LIST = [ CursorKind.TRANSLATION_UNIT, CursorKind.NAMESPACE, CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.ENUM_DECL, CursorKind.CLASS_TEMPLATE ] PRINT_LIST = [ CursorKind.CLASS_DECL, CursorKind.STRUCT_DECL, CursorKind.ENUM_DECL, CursorKind.ENUM_CONSTANT_DECL, CursorKind.CLASS_TEMPLATE, CursorKind.FUNCTION_DECL, CursorKind.FUNCTION_TEMPLATE, CursorKind.CONVERSION_FUNCTION, CursorKind.CXX_METHOD, CursorKind.CONSTRUCTOR, CursorKind.FIELD_DECL ] PREFIX_BLACKLIST = [ CursorKind.TRANSLATION_UNIT ] CPP_OPERATORS = { '<=': 'le', '>=': 'ge', '==': 'eq', '!=': 'ne', '[]': 'array', '+=': 'iadd', '-=': 'isub', '*=': 'imul', '/=': 'idiv', '%=': 'imod', '&=': 'iand', '|=': 'ior', '^=': 'ixor', '<<=': 'ilshift', '>>=': 'irshift', '++': 'inc', '--': 'dec', '<<': 'lshift', '>>': 'rshift', '&&': 'land', '||': 'lor', '!': 'lnot', '~': 'bnot', '&': 'band', '|': 'bor', '+': 'add', '-': 'sub', '*': 'mul', '/': 'div', '%': 'mod', '<': 'lt', '>': 'gt', '=': 'assign', '()': 'call' } CPP_OPERATORS = OrderedDict( sorted(CPP_OPERATORS.items(), key=lambda t: -len(t[0]))) job_count = cpu_count() job_semaphore = Semaphore(job_count) class NoFilenamesError(ValueError): pass def d(s): return s if isinstance(s, str) else s.decode('utf8') def sanitize_name(name): name = re.sub(r'type-parameter-0-([0-9]+)', r'T\1', name) for k, v in CPP_OPERATORS.items(): name = name.replace('operator%s' % k, 'operator_%s' % v) name = re.sub('<.*>', '', name) name = ''.join([ch if ch.isalnum() else '_' for ch in name]) name = re.sub('_$', '', re.sub('_+', '_', name)) return '__doc_' + name def process_comment(comment): result = '' # Remove C++ comment syntax leading_spaces = float('inf') for s in comment.expandtabs(tabsize=4).splitlines(): s = s.strip() if s.startswith('/*'): s = s[2:].lstrip('*') elif s.endswith('*/'): s = s[:-2].rstrip('*') elif s.startswith('///'): s = s[3:] if s.startswith('*'): s = s[1:] if len(s) > 0: leading_spaces = min(leading_spaces, len(s) - len(s.lstrip())) result += s + '\n' if leading_spaces != float('inf'): result2 = "" for s in result.splitlines(): result2 += s[leading_spaces:] + '\n' result = result2 # Doxygen tags cpp_group = '([\w:]+)' param_group = '([\[\w:\]]+)' s = result s = re.sub(r'\\c\s+%s' % cpp_group, r'``\1``', s) s = re.sub(r'\\a\s+%s' % cpp_group, r'*\1*', s) s = re.sub(r'\\e\s+%s' % cpp_group, r'*\1*', s) s = re.sub(r'\\em\s+%s' % cpp_group, r'*\1*', s) s = re.sub(r'\\b\s+%s' % cpp_group, r'**\1**', s) s = re.sub(r'\\ingroup\s+%s' % cpp_group, r'', s) s = re.sub(r'\\param%s?\s+%s' % (param_group, cpp_group), r'\n\n$Parameter ``\2``:\n\n', s) s = re.sub(r'\\tparam%s?\s+%s' % (param_group, cpp_group), r'\n\n$Template parameter ``\2``:\n\n', s) for in_, out_ in { 'return': 'Returns', 'author': 'Author', 'authors': 'Authors', 'copyright': 'Copyright', 'date': 'Date', 'remark': 'Remark', 'sa': 'See also', 'see': 'See also', 'extends': 'Extends', 'throw': 'Throws', 'throws': 'Throws' }.items(): s = re.sub(r'\\%s\s*' % in_, r'\n\n$%s:\n\n' % out_, s) s = re.sub(r'\\details\s*', r'\n\n', s) s = re.sub(r'\\brief\s*', r'', s) s = re.sub(r'\\short\s*', r'', s) s = re.sub(r'\\ref\s*', r'', s) s = re.sub(r'\\code\s?(.*?)\s?\\endcode', r"```\n\1\n```\n", s, flags=re.DOTALL) # HTML/TeX tags s = re.sub(r'<tt>(.*?)</tt>', r'``\1``', s, flags=re.DOTALL) s = re.sub(r'<pre>(.*?)</pre>', r"```\n\1\n```\n", s, flags=re.DOTALL) s = re.sub(r'<em>(.*?)</em>', r'*\1*', s, flags=re.DOTALL) s = re.sub(r'<b>(.*?)</b>', r'**\1**', s, flags=re.DOTALL) s = re.sub(r'\\f\$(.*?)\\f\$', r'$\1$', s, flags=re.DOTALL) s = re.sub(r'<li>', r'\n\n* ', s) s = re.sub(r'</?ul>', r'', s) s = re.sub(r'</li>', r'\n\n', s) s = s.replace('``true``', '``True``') s = s.replace('``false``', '``False``') # Re-flow text wrapper = textwrap.TextWrapper() wrapper.expand_tabs = True wrapper.replace_whitespace = True wrapper.drop_whitespace = True wrapper.width = 70 wrapper.initial_indent = wrapper.subsequent_indent = '' result = '' in_code_segment = False for x in re.split(r'(```)', s): if x == '```': if not in_code_segment: result += '```\n' else: result += '\n```\n\n' in_code_segment = not in_code_segment elif in_code_segment: result += x.strip() else: for y in re.split(r'(?: *\n *){2,}', x): wrapped = wrapper.fill(re.sub(r'\s+', ' ', y).strip()) if len(wrapped) > 0 and wrapped[0] == '$': result += wrapped[1:] + '\n' wrapper.initial_indent = \ wrapper.subsequent_indent = ' ' * 4 else: if len(wrapped) > 0: result += wrapped + '\n\n' wrapper.initial_indent = wrapper.subsequent_indent = '' return result.rstrip().lstrip('\n') def extract(filename, node, prefix, output): if not (node.location.file is None or os.path.samefile(d(node.location.file.name), filename)): return 0 if node.kind in RECURSE_LIST: sub_prefix = prefix if node.kind not in PREFIX_BLACKLIST: if len(sub_prefix) > 0: sub_prefix += '_' sub_prefix += d(node.spelling) for i in node.get_children(): extract(filename, i, sub_prefix, output) if node.kind in PRINT_LIST: comment = d(node.raw_comment) if node.raw_comment is not None else '' comment = process_comment(comment) sub_prefix = prefix if len(sub_prefix) > 0: sub_prefix += '_' if len(node.spelling) > 0: name = sanitize_name(sub_prefix + d(node.spelling)) output.append((name, filename, comment)) class ExtractionThread(Thread): def __init__(self, filename, parameters, output): Thread.__init__(self) self.filename = filename self.parameters = parameters self.output = output job_semaphore.acquire() def run(self): print('Processing "%s" ..' % self.filename, file=sys.stderr) try: index = cindex.Index( cindex.conf.lib.clang_createIndex(False, True)) tu = index.parse(self.filename, self.parameters) extract(self.filename, tu.cursor, '', self.output) finally: job_semaphore.release() def read_args(args): parameters = [] filenames = [] if "-x" not in args: parameters.extend(['-x', 'c++']) if not any(it.startswith("-std=") for it in args): parameters.append('-std=c++11') if platform.system() == 'Darwin': dev_path = '/Applications/Xcode.app/Contents/Developer/' lib_dir = dev_path + 'Toolchains/XcodeDefault.xctoolchain/usr/lib/' sdk_dir = dev_path + 'Platforms/MacOSX.platform/Developer/SDKs' libclang = lib_dir + 'libclang.dylib' if os.path.exists(libclang): cindex.Config.set_library_path(os.path.dirname(libclang)) if os.path.exists(sdk_dir): sysroot_dir = os.path.join(sdk_dir, next(os.walk(sdk_dir))[1][0]) parameters.append('-isysroot') parameters.append(sysroot_dir) elif platform.system() == 'Linux': # clang doesn't find its own base includes by default on Linux, # but different distros install them in different paths. # Try to autodetect, preferring the highest numbered version. def clang_folder_version(d): return [int(ver) for ver in re.findall(r'(?<!lib)(?<!\d)\d+', d)] clang_include_dir = max(( path for libdir in ['lib64', 'lib', 'lib32'] for path in glob('/usr/%s/clang/*/include' % libdir) if os.path.isdir(path) ), default=None, key=clang_folder_version) if clang_include_dir: parameters.extend(['-isystem', clang_include_dir]) for item in args: if item.startswith('-'): parameters.append(item) else: filenames.append(item) if len(filenames) == 0: raise NoFilenamesError("args parameter did not contain any filenames") return parameters, filenames def extract_all(args): parameters, filenames = read_args(args) output = [] for filename in filenames: thr = ExtractionThread(filename, parameters, output) thr.start() print('Waiting for jobs to finish ..', file=sys.stderr) for i in range(job_count): job_semaphore.acquire() return output def write_header(comments, out_file=sys.stdout): print('''/* This file contains docstrings for the Python bindings. Do not edit! These were automatically extracted by mkdoc.py */ #define __EXPAND(x) x #define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT #define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1)) #define __CAT1(a, b) a ## b #define __CAT2(a, b) __CAT1(a, b) #define __DOC1(n1) __doc_##n1 #define __DOC2(n1, n2) __doc_##n1##_##n2 #define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3 #define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4 #define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5 #define __DOC6(n1, n2, n3, n4, n5, n6) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6 #define __DOC7(n1, n2, n3, n4, n5, n6, n7) __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7 #define DOC(...) __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__)) #if defined(__GNUG__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-variable" #endif ''', file=out_file) name_ctr = 1 name_prev = None for name, _, comment in list(sorted(comments, key=lambda x: (x[0], x[1]))): if name == name_prev: name_ctr += 1 name = name + "_%i" % name_ctr else: name_prev = name name_ctr = 1 print('\nstatic const char *%s =%sR"doc(%s)doc";' % (name, '\n' if '\n' in comment else ' ', comment), file=out_file) print(''' #if defined(__GNUG__) #pragma GCC diagnostic pop #endif ''', file=out_file) def mkdoc(args): args = list(args) out_path = None for idx, arg in enumerate(args): if arg.startswith("-o"): args.remove(arg) try: out_path = arg[2:] or args.pop(idx) except IndexError: print("-o flag requires an argument") exit(-1) break comments = extract_all(args) if out_path: try: with open(out_path, 'w') as out_file: write_header(comments, out_file) except: # In the event of an error, don't leave a partially-written # output file. try: os.unlink(out_path) except: pass raise else: write_header(comments) if __name__ == '__main__': try: mkdoc(sys.argv[1:]) except NoFilenamesError: print('Syntax: %s [.. a list of header files ..]' % sys.argv[0]) exit(-1)
PypiClean
/FAdo3-1.0.tar.gz/FAdo3-1.0/FAdo/witness.py
from __future__ import absolute_import from __future__ import division from __future__ import unicode_literals from __future__ import print_function from future import standard_library standard_library.install_aliases() from builtins import range from builtins import * from past.utils import old_div from . import fa from .comboperations import * # Useful automata def emptyDFA(sigma=None): """ Returns the minimal DFA for emptyset (incomplete) :param sigma: :return: """ d = DFA() if sigma is not None: d.setSigma(sigma) i = d.addState() d.setInitial(i) return d def epsilonDFA(sigma=None): """ Returns the minimal DFA for {epsilon} (incomplete) :param sigma: :return: """ d = DFA() if sigma is not None: d.setSigma(sigma) i = d.addState() d.setInitial(i) d.addFinal(i) return d ### Worst case automata for each operation witnessDFA = {"toDFA": [("toDFAWCMF", "int"), ("toDFAWC2", "int"), ("toDFAWC3", "int")], "reversal": [("reversalWC3M", "int"), ("reversalMB", "int"), ("reversalWC3L", "int"), ("reversalternaryWC", "int"), ("reversalbinaryWC", "int")], "star": [("starWC", "int"), ("starWCM", "int")], "concat": [("concatWC", "int", "int"), ("concatWCM", "int", "int")], "conjunction": [("interWC", "int", "int")], "__or__": [("disjWC", "int", "int")], "shuffle": [("shuffleWC", "int", "int")], "starDisj": [("starDisjWC", "int", "int")], "starInter": [("starInterBC", "int", "int")], "disjWStar": [("disjWStarWC", "int", "int")]} def toDFAWC2MF(m=5): """ Worst case automata for toDFA(NFA) with n > 2, k=2 ..seealso:: A. R. Meyer and M. J. Fischer. Economy of description by automata, grammars, and formal systems. Twelfth Annual Symposium on Switching and Automata Theory, 1971, 188–191. IEEE Society Press. :param m: number of states :type m: integer :return: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = NFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial([0]) f.addFinal(0) f.addTransition(0, "a", 1) for i in range(1, m): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", i) f.addTransition(i, "b", 0) return f def toDFAWC2(m=5): """ Worst case automata for toDFA(NFA) with n > 2, k=2 ..seealso:: F.R. Moore. On the bounds for state-set size in the proofs of equivalence between deterministic, nondeterministic, and two-way finite automata. IEEE Transactions on computers, 2:1211–1214, 1971. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = NFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial([0]) f.addFinal(m - 1) f.addTransition(0, "a", 1) f.addTransition(0, "b", 0) f.addTransition(m - 1, "a", 0) f.addTransition(m - 1, "a", 1) for i in range(1, m - 1): f.addTransition(i, "a", i + 1) f.addTransition(i, "b", i + 1) return f def toDFAWC3(m=5): """ Worst case automata for toDFA(NFA) with n > 2, k=3. ..seealso:: O. B. Lupanov. A comparison of two types of finite sources. Problemy Kibernetiki, 9:321–326, 1963. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = NFA() f.setSigma(["a", "b", "c"]) f.States = list(range(m)) f.setInitial([0]) f.addFinal(0) f.addTransition(0, "a", 1) f.addTransition(0, "b", 1) f.addTransition(1, "b", 0) f.addTransition(1, "c", 0) f.addTransition(1, "c", 1) f.addTransition(1, "a", 2) f.addTransition(m - 1, "a", 0) f.addTransition(m - 1, "b", m - 1) f.addTransition(m - 1, "c", m - 1) for i in range(2, m - 1): f.addTransition(i, "a", i + 1) f.addTransition(i, "b", i) f.addTransition(i, "c", i) return f def reversalWC3M(m=5): """ Worst case automata for reversal(DFA) with m > 2, k=3. ..seealso:: Boris G. Mirkin. On dual automata. Kibernetika, 2:7–10, 1966. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") return toDFAWC3(m).reversal() def starSC(m=5): """ Worst case state complexity for star :arg m: number of states :type m: integer :returns: state complexity :rtype: integer""" if m > 1: return 3 * 2 ** (m - 2) return 1 def starWC(m=5): """ Worst case automata for star(DFA) with m > 2, k=2 ..seealso:: S. Yu, Q. Zhuang, and K. Salomaa. The state complexities of some basic operations on regular languages. Theor. Comput. Sci., 125(2):315–328, 1994. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") # for m=2, L=\{w\in\{a,b\}*| |w|a odd \} f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(m - 1) f.addTransition(0, "a", 1) f.addTransition(0, "b", 0) for i in range(1, m): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", (i + 1) % m) return f def starWCM(m=5): """ Worst case automata for star(DFA) with m > 2, k=2 ..seealso:: A. N. Maslov. Estimates of the number of states of finite automata. Dokllady Akademii Nauk SSSR, 194:1266–1268, 1970. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(m - 1) f.addTransition(m - 1, "a", 0) f.addTransition(m - 1, "b", m - 2) f.addTransition(0, "b", 0) f.addTransition(0, "a", 1) for i in range(1, m - 1): f.addTransition(i, "a", (i + 1)) f.addTransition(i, "b", (i - 1)) return f def concatSC(m, n, k=1): """Worst case state complecity for concatenation :arg m: number of states :arg n: number of states :arg k: number of letters :type m: integer :type n: integer :type k: integer :returns: state compelxity :rtype: integer""" return m * 2 ** n - k * 2 ** (n - 1) def concatWCM(m=4, n=4): """ Worst case automata for catenation(DFA,DFA) with m,n > 1, k=2, ..seealso:: A. N. Maslov. Estimates of the number of states of finite automata. Dokllady Akademii Nauk SSSR, 194:1266–1268, 1970. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA, DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) d1.addTransition(m - 1, "b", 0) d1.addTransition(m - 1, "a", m - 1) for i in range(m - 1): d1.addTransition(i, "a", i) d1.addTransition(i, "b", i + 1) d2.setSigma(["a", "b"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(n - 1, "a", n - 1) d2.addTransition(n - 1, "b", n - 2) d2.addTransition(n - 2, "b", n - 1) d2.addTransition(n - 2, "a", n - 1) for i in range(n - 2): d2.addTransition(i, "a", i + 1) d2.addTransition(i, "b", i) return d1, d2 def concatWC(m=6, n=6): """ Worst case automata for catenation(DFA,DFA) with m,n > 1 ..seealso:: S. Yu, Q. Zhuang, and K. Salomaa. The state complexities of some basic operations on regular languages. Theor. Comput. Sci., 125(2):315–328, 1994. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA, DFA)""" if n < 2 or m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) for i in range(m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", 0) d1.addTransition(i, "c", i) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", 1) return d1, d2 def interWC(m=6, n=5): """ Worst case automata for intersection(DFA,DFA) with m,n >1 ..seealso:: S. Yu, Q. Zhuang, and K. Salomaa. The state complexities of some basic operations on regular languages. Theor. Comput. Sci., 125(2):315–328, 1994. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA, DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(0) for i in range(m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", i) d2.setSigma(["a", "b"]) d2.States = list(range(m)) d2.setInitial(0) d2.addFinal(0) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) return d1, d2 def disjWC(m=6, n=5): """ Worst case automata for disjunction(DFA,DFA) with m,n >1 ..seealso:: S. Yu, Q. Zhuang, and K. Salomaa. The state complexities of some basic operations on regular languages. Theor. Comput. Sci., 125(2):315–328, 1994. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA, DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b"]) d1.States = list(range(m)) d1.setInitial(0) d1.addTransition(0, "a", 1) d1.addTransition(0, "b", 0) for i in range(1, m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", i) d1.addFinal(i) d2.setSigma(["a", "b"]) d2.States = list(range(m)) d2.setInitial(0) d2.addTransition(0, "b", 1) d2.addTransition(0, "a", 0) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addFinal(0) return d1, d2 def reversalMB(m=8): """Worst case automata for reversal(DFA) ..seealso:: S. Yu, Q. Zhuang, and K. Salomaa. The state complexities of some basic operations on regular languages. Theor. Comput. Sci., 125(2):315–328, 1994. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") d = DFA() d.setSigma(["a", "b"]) d.States = list(range(m)) d.setInitial(0) for i in range(m): if i == m - 1: d.addTransition(m - 1, "a", 0) else: d.addTransition(i, "a", i + 1) if i == 2: d.addTransition(2, "b", 0) elif i == 3: d.addTransition(3, "b", 2) else: d.addTransition(i, "b", i) return d def reversalWC3L(m=5): """ Worst case automata for reversal(DFA) with m > 2, k=3 ..seealso:: E. L. Leiss. Succinct representation of regular languages by boolean automata ii. Theor. Comput. Sci., 38:133–136, 1985. :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b", "c"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(0) f.addTransition(0, "b", 1) f.addTransition(1, "b", 0) f.addTransition(0, "a", 1) f.addTransition(1, "a", 2) f.addTransition(0, "c", m - 1) f.addTransition(1, "c", 1) for i in range(2, m): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", i) f.addTransition(i, "c", i) return f def reversalternaryWC(m=5): """Worst case automata for reversal(DFA) ternary alphabet :arg m: number of states :type m: integer :returns: a dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") d = DFA() d.setSigma(["a", "b", "c"]) d.setInitial(0) d.addFinal(0) d.States = list(range(m)) d.addTransition(0, "a", m - 1) d.addTransition(0, "c", 0) d.addTransition(0, "b", 0) d.addTransition(1, "c", m - 1) d.addTransition(1, "b", 0) d.addTransition(1, "a", 0) for i in range(2, m): d.addTransition(i, "a", i - 1) d.addTransition(i, "c", i - 1) d.addTransition(i, "b", i) return d def reversalbinaryWC(m=5): """Worst case automata for reversal(DFA) binary ..seealso:: G. Jir{\'a}skov{\'a} and J. S\v ebej. Note on Reversal of binary regular languages. Proc. DCFS 2011, LNCS 6808, Springer, pp 212-221. @arg m: number of states @type m: integer @returns: a dfa @rtype: DFA""" if m < 2: raise TestsError("number of states must be greater than 1") d = DFA() d.setSigma(["a", "b"]) d.States = list(range(m)) d.setInitial(0) d.addFinal(m - 1) d.addTransition(0, "a", 1) d.addTransition(0, "b", 0) d.addTransition(1, "b", 0) if m == 2: d.addTransition(1, "a", 0) else: d.addTransition(1, "a", 2) d.addTransition(2, "a", 0) if m == 3: d.addTransition(2, "b", 2) else: d.addTransition(2, "b", 3) d.addTransition(3, "b", 2) d.addTransition(3, "a", 4) d.addTransition(m - 1, "a", 3) d.addTransition(m - 1, "b", m - 1) for i in range(4, m - 1): d.addTransition(i, "a", i + 1) d.addTransition(i, "b", i) return d def shuffleWC(m=3, n=3): """Worst case automata for shuffle(DFA,DFA) with m.n>1 ..seealso:: C. Campeanu, K. Salomaa, and S. Yu. Tight lower bound for the state complexity of shuffle of regular languages. Journal of Automata, Languages and Combinatorics, 7(3):303–310, 2002. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA, DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.States = list(range(m)) d1.setSigma(["a", "b", "c", "d", "f"]) d1.setInitial(0) d1.addFinal(0) for i in range(m): d1.addTransition(i, "a", (i + 1) % m) if i != m - 1: d1.addTransition(i, "c", i + 1) d1.addTransition(i, "d", i) if i != 0: d1.addTransition(i, "f", i) d2.States = list(range(n)) d2.setSigma(["a", "b", "c", "d", "f"]) d2.setInitial(0) d2.addFinal(0) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "c", i) if i != n - 1: d2.addTransition(i, "d", i + 1) if i != 0: d2.addTransition(i, "f", i) return d1, d2 def starDisjWC(m=6, n=5): """Worst case automata for starDisj(DFA,DFA) with m.n>1 ..seealso: Arto Salomaa, Kai Salomaa, and Sheng Yu. 'State complexity of combined operations'. Theor. Comput. Sci., 383(2-3):140–152, 2007. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.States = list(range(m)) d1.setSigma(["a", "b", "c"]) d1.setInitial(0) d1.addFinal(0) for i in range(m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", i) if i != 0: d1.addTransition(i, "c", i) d1.addTransition(0, "c", 1) d2.States = list(range(n)) d2.setSigma(["a", "b", "c"]) d2.setInitial(0) d2.addFinal(0) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) if i != 0: d2.addTransition(i, "c", i) d2.addTransition(0, "c", 1) return d1, d2 def starInterBC(m=3, n=3): """Bad case automata for starInter(DFA,DFA) with m,n>1 ..seealso:: Arto Salomaa, Kai Salomaa, and Sheng Yu. 'State complexity of combined operations'. Theor. Comput. Sci., 383(2-3):140–152, 2007. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must be both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c", "d", "e"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) for i in range(m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", i) d1.addTransition(i, "c", i) d1.addTransition(i, "d", i) d1.addTransition(i, "e", i) d2.setSigma(["a", "b", "c", "d", "e"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) for i in range(n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", n - 2) if i == n - 2: d2.addTransition(i, "d", n - 1) elif i == n - 1: d2.addTransition(i, "d", n - 2) else: d2.addTransition(i, "d", i) if i > n - 4: d2.addTransition(i, "e", i) else: d2.addTransition(i, "e", i + 1) return d1, d2 def disjWStarWC(m=6, n=5): """ ..seealso:: Yuan Gao and Sheng Yu. 'State complexity of union and intersection combined with star and reversal'. CoRR, abs/1006.3755, 2010. :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 3 or m < 3: raise TestsError("number of states must be greater than 2") f1 = DFA() f1.setSigma(["a", "b", "c"]) f1.States = list(range(m)) f1.setInitial(0) f1.addFinal(m - 1) f1.addTransition(0, "a", 1) f1.addTransition(0, "b", 0) f1.addTransition(0, "c", 0) for i in range(1, m): f1.addTransition(i, "a", (i + 1) % m) f1.addTransition(i, "b", (i + 1) % m) f1.addTransition(i, "c", i) f2 = DFA() f2.setSigma(["a", "b", "c"]) f2.States = list(range(n)) f2.setInitial(0) f2.addFinal(n - 1) for i in range(n): f2.addTransition(i, "a", i) f2.addTransition(i, "b", i) f2.addTransition(i, "c", (i + 1) % n) return f1, f2 ### worst cases for transition complexity ####### UNION ###### def unionWCTk2(m=6, n=6): """ @ worst-case family union where @m>=2 and n>=2 and k=2 ..seealso:: Gao, Y., Salomaa, K., Yu, S.: Transition complexity of incomplete dfas. Fundam. Inform. 110(1-4), 143–158 (2011) @ the conjecture in this article fails for this family :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(0) d1.addTransition(m - 1, "a", 0) for i in range(0, m - 1): d1.addTransition(i, "b", i + 1) d2.setSigma(["a", "b"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(n - 1, "b", n - 1) for i in range(0, n - 1): d2.addTransition(i, "a", i + 1) d2.addTransition(i, "b", i) return d1, d2 def unionWCT2(n=6): """ @ worst-case family union where @m=1 and n>=2 and k=3 @ Note that the same happens to m>=2 and n=1 :arg n: number of states :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" m = 1 if n < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(0) d1.addTransition(0, "b", 0) d1.addTransition(0, "c", 0) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(0, "a", 0) d2.addTransition(0, "b", 1) for i in range(1, n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", 1) return d1, d2 def unionWCT(m=6, n=6): """ @ worst-case family union where @m>=2 and n>=2 and k=3 :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) d1.addTransition(0, "a", 1) d1.addTransition(0, "c", 0) for i in range(1, m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", 0) d1.addTransition(i, "c", i) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(0, "a", 0) d2.addTransition(0, "b", 1) for i in range(1, n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", 1) return d1, d2 ### CONCAT def concatWCT2(n=6): """ @ worst-case family concatenation where @m=1 and n>=2 and k=3 :arg n: number of states :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" m = 1 if n < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(0) d1.addTransition(0, "b", 0) d1.addTransition(0, "c", 0) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(0, "a", 0) d2.addTransition(0, "b", 1) for i in range(1, n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", (i + 1) % n) return d1, d2 def concatWCT3(m=6): """ @ worst-case family concatenation where @m>=2 and n=1 and k=3 :arg m: number of states :type m: integer :returns: two dfas :rtype: (DFA,DFA)""" n = 1 if m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) d1.addTransition(0, "a", 0) d1.addTransition(0, "b", 1) d1.addTransition(0, "c", 1) d1.addTransition(1, "a", 1) d1.addTransition(1, "b", 2) for i in range(2, m): d1.addTransition(i, "b", (i + 1) % m) d1.addTransition(i, "c", (i + 1) % m) d1.addTransition(i, "a", i) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(0) d2.addTransition(0, "c", 0) d2.addTransition(0, "b", 0) return d1, d2 def concatWCT(m=6, n=6): """ @ worst-case family concatenation where @m>=2 and n>=2 and k=3 :arg m: number of states :arg n: number of states :type m: integer :type n: integer :returns: two dfas :rtype: (DFA,DFA)""" if n < 2 or m < 2: raise TestsError("number of states must both greater than 1") d1, d2 = DFA(), DFA() d1.setSigma(["a", "b", "c"]) d1.States = list(range(m)) d1.setInitial(0) d1.addFinal(m - 1) d1.addTransition(0, "a", 1) d1.addTransition(0, "c", 0) for i in range(1, m): d1.addTransition(i, "a", (i + 1) % m) d1.addTransition(i, "b", 0) d1.addTransition(i, "c", i) d2.setSigma(["a", "b", "c"]) d2.States = list(range(n)) d2.setInitial(0) d2.addFinal(n - 1) d2.addTransition(0, "a", 0) d2.addTransition(0, "b", 1) for i in range(1, n): d2.addTransition(i, "b", (i + 1) % n) d2.addTransition(i, "a", i) d2.addTransition(i, "c", 1) return d1, d2 ##### Star def starWCT(m=5): """ @ worst-case family star where @m>=2 and k=2 :arg m: number of states :type m: integer :returns: dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(m - 1) f.addTransition(0, "a", 1) for i in range(1, m): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", (i + 1) % m) return f def starWCT1(m=5): """ @ worst-case family star where @m>=2 and k=2 :arg m: number of states :type m: integer :returns: dfa :rtype: DFA""" if m < 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(m - 1) f.addTransition(0, "b", 0) f.addTransition(0, "a", 1) f.addTransition(m - 2, "a", m - 1) f.addTransition(m - 1, "a", 0) for i in range(1, m - 2): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", (i + 1) % m) return f def universal(n, l=["a", "b", "c"], Finals=None, dialect=False, d=None): """Universal witness for state compelxity :arg n: number of states :type n: integer :arg l: alphabet :type l: list of strings :arg Finals: list of final states :type Finals: list of integers :returns: dfa :rtype: DFA """ u = DFA() u.States = list(range(n)) u.setSigma(l) u.setInitial(0) u.addFinal(n - 1) u.addTransition(0, "b", 1) u.addTransition(1, "b", 0) u.addTransition(n - 1, "c", 0) for i in range(n): u.addTransition(i, "a", (i + 1) % n) if i >= 2: u.addTransition(i, "b", i) if i != n - 1: u.addTransition(i, "c", i) return u def nCr(n, r): import math if r > n: return 0 else: f = math.factorial return old_div(f(n), (f(r) * f(n - r))) def boundarySC(n, k): return 4 ** (n - 1) - nCr(n - 1, k - 1) + 2 ** (n - k) * 2 ** (n - 1) - 3 ** (n - k) * 2 ** (k - 1) + 2 ** ( k - 1) * 2 ** (n - 1) - 3 ** (k - 1) * 2 ** (n - k) + 1 # Don't care automata def dcMilano1(n): """Return the special dcNFA to prove the titness of proposed bound .. versionadded:: 0.9.8 :param n: number of 'columns' :type n: int :rtype: NFA""" new = fa.NFA() st = [] for _ in range(3 * n): s = new.addState() st.append(s) new.setInitial([st[n - 1]]) for s in range(n): new.addTransition(st[n - 1], 'c', st[s]) for s in range(3): for r in range(n - 1): new.addTransition(st[(n * s) + r], 'a', st[(n * s) + r + 1]) new.addTransition(st[(n * s) + n - 1], 'a', st[0]) for s in range(n): new.addTransition(st[s], 'b', st[s + n]) new.addTransition(st[s + n], 'b', st[s + 2 * n]) new.addTransition(st[s + 2 * n], 'b', st[s]) new.addFinal(st[n - 1]) return new def dcMilano2(n): """Return the special dcNFA to prove the titness of proposed bound .. versionadded:: 0.9.8 :param n: number of 'columns' :type n: int :rtype: NFA""" new = fa.NFA() st = [] for _ in range(3 * n): s = new.addState() st.append(s) for s in range(n): new.addInitial(st[s]) for s in range(3): for r in range(n - 1): new.addTransition(st[(n * s) + r], 'a', st[(n * s) + r + 1]) new.addTransition(st[(n * s) + n - 1], 'a', st[0]) for s in range(n): new.addTransition(st[s], 'b', st[s + n]) new.addTransition(st[s + n], 'b', st[s + 2 * n]) new.addTransition(st[s + 2 * n], 'b', st[s]) new.addFinal(st[n - 1]) return new # Closure operations def suffWCe(m=3): """Witness for suff(L) when L does not have empty as a quotient :rtype: DFA ..seealso: Janusz A. Brzozowski, Galina Jirásková, Chenglong Zou, Quotient Complexity of Closed Languages. Theory Comput. Syst. 54(2): 277-292 (2014) """ if m< 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(0) f.addTransition(0, "a", 1) f.addTransition(1, "a", 2) f.addTransition(0, "b", 0) f.addTransition(1, "b", 0) for i in range(2, m): f.addTransition(i, "a", (i + 1) % m) f.addTransition(i, "b", i) return f def suffWCd(m=3): """Witness for suff(L) when L has empty as a quotient :rtype: DFA ..seealso: as above """ if m< 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(0) f.addTransition(0, "a", 1) f.addTransition(1, "a", 2) f.addTransition(0, "b", m-1) f.addTransition(1, "b", 0) f.addTransition(m-1, "b", m-1) f.addTransition(m-1, "a", m-1) for i in range(2, m-1): f.addTransition(i, "a", (i + 1) % (m-1)) f.addTransition(i, "b", i) return f def suffWCsynt(m=3): """ Worst case witness for synt of suff(L) """ if m< 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b", "c", "d", "e"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(m-1) f.addTransition(0, "a", 0) f.addTransition(0, "b", 0) f.addTransition(0, "c", 0) f.addTransition(0, "d", 0) f.addTransition(0, "e", 1) f.addTransition(1, "a", 2) f.addTransition(1, "b", 2) f.addTransition(1, "c", 1) f.addTransition(1, "d", 1) f.addTransition(1, "e", 1) f.addTransition(2, "b", 1) f.addTransition(2, "e", 1) f.addTransition(2, "c", 2) f.addTransition(2, "d", 2) f.addTransition(2, "a", 3) for i in range(3,m-1): f.addTransition(i, "a", (i+1)% m) f.addTransition(i,"b",i) f.addTransition(i,"c",i) f.addTransition(i,"d",i) f.addTransition(i,"e",1) f.addTransition(m-1,"a",1) f.addTransition(m-1,"c",1) f.addTransition(m-1,"e",1) f.addTransition(m-1,"d",0) return f def booleanWCSymGrp(m=3): """Witness for symmetric group :rtype: DFA ..seealso: Jason Bell, Janusz A. Brzozowski, Nelma Moreira, Rogério Reis. Symmetric Groups and Quotient Complexity of Boolean Operations. ICALP (2) 2014: 1-12 """ if m< 3: raise TestsError("number of states must be greater than 2") f = DFA() f.setSigma(["a", "b"]) f.States = list(range(m)) f.setInitial(0) f.addFinal(0) f.addFinal(1) f.addTransition(0, "a", 1) f.addTransition(1, "a", 0) f.addTransition(0, "b", 1) f.addTransition(1, "b", 2) for i in range(2, m): f.addTransition(i, "b", (i + 1) % m) f.addTransition(i, "a", i) return f
PypiClean
/Funspec-0.1.0.tar.gz/Funspec-0.1.0/src/funspec/grader.py
from faker import Factory from lazyutils import delegate_to from funspec.exceptions import BuildError from funspec.mod import FunspecMod, TestMod from funspec.result import Result, ErrorResult fake = Factory.create() class Grader: """ Represents a Funspec testing module. Usage: >>> grader = Grader(mod_source) >>> results = grader.run_tests(input_source, lang='python') >>> results.is_correct() True """ source = delegate_to('funspec') num_cases = delegate_to('funspec') def __init__(self, source, num_cases=100): self.funspec = FunspecMod(source, num_cases) # Create a code object associated with the module source code try: self.funspec.build() self.error = None self.error_message = '' self._valid = True except BuildError as ex: self.error_message = ex.message self.error = ex self._valid = False def is_valid(self): """ Returns True if input funspec mod source code is valid. Only valid modules can test code. """ return self._valid def supports_language(self, lang): """ Returns True, if the given programming language is supported by the given funspec module. """ # For now, we only support python return self._valid and lang == 'python' def run_tests(self, source, lang, force_result=False, stop_at_error=False): """ Test the given source code Args: source (str): Input source code string. lang (str): Input programming language. force_result (bool): Return an ErrorResult object even if module is invalid. Returns: A :cls:`funspec.TestResult` object. """ if not self.is_valid(): if force_result: return ErrorResult('module-error', self.error_message) raise ValueError('cannot test code based on an invalid funspec ' 'module') if not self.supports_language(lang): msg = 'this modules does not support %s' % lang if force_result: return ErrorResult('module-error', 'LanguageError: ' + msg) raise ValueError(msg) test_mod = TestMod(source, self.funspec, lang=lang) try: test_mod.build() except BuildError as ex: return ErrorResult('build-error', ex.message) return Result(test_mod.run_tests(stop_at_error)) def format_error(ex): """ Format syntax error into a error_message string. """ return str(ex)
PypiClean
/xnni-0.7.4-py3-none-manylinux1_x86_64.whl/xnni-0.7.4.data/data/nni/node_modules/reflect-metadata/Reflect.d.ts
declare module "reflect-metadata" { // The "reflect-metadata" module has no imports or exports, but can be used by modules to load the polyfill. } declare namespace Reflect { /** * Applies a set of decorators to a target object. * @param decorators An array of decorators. * @param target The target object. * @returns The result of applying the provided decorators. * @remarks Decorators are applied in reverse order of their positions in the array. * @example * * class Example { } * * // constructor * Example = Reflect.decorate(decoratorsArray, Example); * */ function decorate(decorators: ClassDecorator[], target: Function): Function; /** * Applies a set of decorators to a property of a target object. * @param decorators An array of decorators. * @param target The target object. * @param targetKey The property key to decorate. * @param descriptor A property descriptor * @remarks Decorators are applied in reverse order. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod() { } * method() { } * } * * // property (on constructor) * Reflect.decorate(decoratorsArray, Example, "staticProperty"); * * // property (on prototype) * Reflect.decorate(decoratorsArray, Example.prototype, "property"); * * // method (on constructor) * Object.defineProperty(Example, "staticMethod", * Reflect.decorate(decoratorsArray, Example, "staticMethod", * Object.getOwnPropertyDescriptor(Example, "staticMethod"))); * * // method (on prototype) * Object.defineProperty(Example.prototype, "method", * Reflect.decorate(decoratorsArray, Example.prototype, "method", * Object.getOwnPropertyDescriptor(Example.prototype, "method"))); * */ function decorate(decorators: (PropertyDecorator | MethodDecorator)[], target: Object, targetKey: string | symbol, descriptor?: PropertyDescriptor): PropertyDescriptor; /** * A default metadata decorator factory that can be used on a class, class member, or parameter. * @param metadataKey The key for the metadata entry. * @param metadataValue The value for the metadata entry. * @returns A decorator function. * @remarks * If `metadataKey` is already defined for the target and target key, the * metadataValue for that key will be overwritten. * @example * * // constructor * @Reflect.metadata(key, value) * class Example { * } * * // property (on constructor, TypeScript only) * class Example { * @Reflect.metadata(key, value) * static staticProperty; * } * * // property (on prototype, TypeScript only) * class Example { * @Reflect.metadata(key, value) * property; * } * * // method (on constructor) * class Example { * @Reflect.metadata(key, value) * static staticMethod() { } * } * * // method (on prototype) * class Example { * @Reflect.metadata(key, value) * method() { } * } * */ function metadata(metadataKey: any, metadataValue: any): { (target: Function): void; (target: Object, targetKey: string | symbol): void; }; /** * Define a unique metadata entry on the target. * @param metadataKey A key used to store and retrieve metadata. * @param metadataValue A value that contains attached metadata. * @param target The target object on which to define metadata. * @example * * class Example { * } * * // constructor * Reflect.defineMetadata("custom:annotation", options, Example); * * // decorator factory as metadata-producing annotation. * function MyAnnotation(options): ClassDecorator { * return target => Reflect.defineMetadata("custom:annotation", options, target); * } * */ function defineMetadata(metadataKey: any, metadataValue: any, target: Object): void; /** * Define a unique metadata entry on the target. * @param metadataKey A key used to store and retrieve metadata. * @param metadataValue A value that contains attached metadata. * @param target The target object on which to define metadata. * @param targetKey The property key for the target. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * Reflect.defineMetadata("custom:annotation", Number, Example, "staticProperty"); * * // property (on prototype) * Reflect.defineMetadata("custom:annotation", Number, Example.prototype, "property"); * * // method (on constructor) * Reflect.defineMetadata("custom:annotation", Number, Example, "staticMethod"); * * // method (on prototype) * Reflect.defineMetadata("custom:annotation", Number, Example.prototype, "method"); * * // decorator factory as metadata-producing annotation. * function MyAnnotation(options): PropertyDecorator { * return (target, key) => Reflect.defineMetadata("custom:annotation", options, target, key); * } * */ function defineMetadata(metadataKey: any, metadataValue: any, target: Object, targetKey: string | symbol): void; /** * Gets a value indicating whether the target object or its prototype chain has the provided metadata key defined. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @returns `true` if the metadata key was defined on the target object or its prototype chain; otherwise, `false`. * @example * * class Example { * } * * // constructor * result = Reflect.hasMetadata("custom:annotation", Example); * */ function hasMetadata(metadataKey: any, target: Object): boolean; /** * Gets a value indicating whether the target object or its prototype chain has the provided metadata key defined. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns `true` if the metadata key was defined on the target object or its prototype chain; otherwise, `false`. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.hasMetadata("custom:annotation", Example, "staticProperty"); * * // property (on prototype) * result = Reflect.hasMetadata("custom:annotation", Example.prototype, "property"); * * // method (on constructor) * result = Reflect.hasMetadata("custom:annotation", Example, "staticMethod"); * * // method (on prototype) * result = Reflect.hasMetadata("custom:annotation", Example.prototype, "method"); * */ function hasMetadata(metadataKey: any, target: Object, targetKey: string | symbol): boolean; /** * Gets a value indicating whether the target object has the provided metadata key defined. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @returns `true` if the metadata key was defined on the target object; otherwise, `false`. * @example * * class Example { * } * * // constructor * result = Reflect.hasOwnMetadata("custom:annotation", Example); * */ function hasOwnMetadata(metadataKey: any, target: Object): boolean; /** * Gets a value indicating whether the target object has the provided metadata key defined. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns `true` if the metadata key was defined on the target object; otherwise, `false`. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.hasOwnMetadata("custom:annotation", Example, "staticProperty"); * * // property (on prototype) * result = Reflect.hasOwnMetadata("custom:annotation", Example.prototype, "property"); * * // method (on constructor) * result = Reflect.hasOwnMetadata("custom:annotation", Example, "staticMethod"); * * // method (on prototype) * result = Reflect.hasOwnMetadata("custom:annotation", Example.prototype, "method"); * */ function hasOwnMetadata(metadataKey: any, target: Object, targetKey: string | symbol): boolean; /** * Gets the metadata value for the provided metadata key on the target object or its prototype chain. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @returns The metadata value for the metadata key if found; otherwise, `undefined`. * @example * * class Example { * } * * // constructor * result = Reflect.getMetadata("custom:annotation", Example); * */ function getMetadata(metadataKey: any, target: Object): any; /** * Gets the metadata value for the provided metadata key on the target object or its prototype chain. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns The metadata value for the metadata key if found; otherwise, `undefined`. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.getMetadata("custom:annotation", Example, "staticProperty"); * * // property (on prototype) * result = Reflect.getMetadata("custom:annotation", Example.prototype, "property"); * * // method (on constructor) * result = Reflect.getMetadata("custom:annotation", Example, "staticMethod"); * * // method (on prototype) * result = Reflect.getMetadata("custom:annotation", Example.prototype, "method"); * */ function getMetadata(metadataKey: any, target: Object, targetKey: string | symbol): any; /** * Gets the metadata value for the provided metadata key on the target object. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @returns The metadata value for the metadata key if found; otherwise, `undefined`. * @example * * class Example { * } * * // constructor * result = Reflect.getOwnMetadata("custom:annotation", Example); * */ function getOwnMetadata(metadataKey: any, target: Object): any; /** * Gets the metadata value for the provided metadata key on the target object. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns The metadata value for the metadata key if found; otherwise, `undefined`. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.getOwnMetadata("custom:annotation", Example, "staticProperty"); * * // property (on prototype) * result = Reflect.getOwnMetadata("custom:annotation", Example.prototype, "property"); * * // method (on constructor) * result = Reflect.getOwnMetadata("custom:annotation", Example, "staticMethod"); * * // method (on prototype) * result = Reflect.getOwnMetadata("custom:annotation", Example.prototype, "method"); * */ function getOwnMetadata(metadataKey: any, target: Object, targetKey: string | symbol): any; /** * Gets the metadata keys defined on the target object or its prototype chain. * @param target The target object on which the metadata is defined. * @returns An array of unique metadata keys. * @example * * class Example { * } * * // constructor * result = Reflect.getMetadataKeys(Example); * */ function getMetadataKeys(target: Object): any[]; /** * Gets the metadata keys defined on the target object or its prototype chain. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns An array of unique metadata keys. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.getMetadataKeys(Example, "staticProperty"); * * // property (on prototype) * result = Reflect.getMetadataKeys(Example.prototype, "property"); * * // method (on constructor) * result = Reflect.getMetadataKeys(Example, "staticMethod"); * * // method (on prototype) * result = Reflect.getMetadataKeys(Example.prototype, "method"); * */ function getMetadataKeys(target: Object, targetKey: string | symbol): any[]; /** * Gets the unique metadata keys defined on the target object. * @param target The target object on which the metadata is defined. * @returns An array of unique metadata keys. * @example * * class Example { * } * * // constructor * result = Reflect.getOwnMetadataKeys(Example); * */ function getOwnMetadataKeys(target: Object): any[]; /** * Gets the unique metadata keys defined on the target object. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns An array of unique metadata keys. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.getOwnMetadataKeys(Example, "staticProperty"); * * // property (on prototype) * result = Reflect.getOwnMetadataKeys(Example.prototype, "property"); * * // method (on constructor) * result = Reflect.getOwnMetadataKeys(Example, "staticMethod"); * * // method (on prototype) * result = Reflect.getOwnMetadataKeys(Example.prototype, "method"); * */ function getOwnMetadataKeys(target: Object, targetKey: string | symbol): any[]; /** * Deletes the metadata entry from the target object with the provided key. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @returns `true` if the metadata entry was found and deleted; otherwise, false. * @example * * class Example { * } * * // constructor * result = Reflect.deleteMetadata("custom:annotation", Example); * */ function deleteMetadata(metadataKey: any, target: Object): boolean; /** * Deletes the metadata entry from the target object with the provided key. * @param metadataKey A key used to store and retrieve metadata. * @param target The target object on which the metadata is defined. * @param targetKey The property key for the target. * @returns `true` if the metadata entry was found and deleted; otherwise, false. * @example * * class Example { * // property declarations are not part of ES6, though they are valid in TypeScript: * // static staticProperty; * // property; * * static staticMethod(p) { } * method(p) { } * } * * // property (on constructor) * result = Reflect.deleteMetadata("custom:annotation", Example, "staticProperty"); * * // property (on prototype) * result = Reflect.deleteMetadata("custom:annotation", Example.prototype, "property"); * * // method (on constructor) * result = Reflect.deleteMetadata("custom:annotation", Example, "staticMethod"); * * // method (on prototype) * result = Reflect.deleteMetadata("custom:annotation", Example.prototype, "method"); * */ function deleteMetadata(metadataKey: any, target: Object, targetKey: string | symbol): boolean; }
PypiClean
/woo-0.1.0.tar.gz/woo-0.1.0/README.md
📦 setup.py (for humans) ======================= This repo exists to provide [an example setup.py] file, that can be used to bootstrap your next Python project. It includes some advanced patterns and best practices for `setup.py`, as well as some commented–out nice–to–haves. For example, this `setup.py` provides a `$ python setup.py upload` command, which creates a *universal wheel* (and *sdist*) and uploads your package to [PyPi] using [Twine], without the need for an annoying `setup.cfg` file. It also creates/uploads a new git tag, automatically. In short, `setup.py` files can be daunting to approach, when first starting out — even Guido has been heard saying, "everyone cargo cults thems". It's true — so, I want this repo to be the best place to copy–paste from :) [Check out the example!][an example setup.py] Installation ----- ```bash cd your_project # Download the setup.py file: # download with wget wget https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py -O setup.py # download with curl curl -O https://raw.githubusercontent.com/navdeep-G/setup.py/master/setup.py ``` To Do ----- - Tests via `$ setup.py test` (if it's concise). Pull requests are encouraged! More Resources -------------- - [What is setup.py?] on Stack Overflow - [Official Python Packaging User Guide](https://packaging.python.org) - [The Hitchhiker's Guide to Packaging] - [Cookiecutter template for a Python package] License ------- This is free and unencumbered software released into the public domain. Anyone is free to copy, modify, publish, use, compile, sell, or distribute this software, either in source code form or as a compiled binary, for any purpose, commercial or non-commercial, and by any means. [an example setup.py]: https://github.com/navdeep-G/setup.py/blob/master/setup.py [PyPi]: https://docs.python.org/3/distutils/packageindex.html [Twine]: https://pypi.python.org/pypi/twine [image]: https://farm1.staticflickr.com/628/33173824932_58add34581_k_d.jpg [What is setup.py?]: https://stackoverflow.com/questions/1471994/what-is-setup-py [The Hitchhiker's Guide to Packaging]: https://the-hitchhikers-guide-to-packaging.readthedocs.io/en/latest/creation.html [Cookiecutter template for a Python package]: https://github.com/audreyr/cookiecutter-pypackage
PypiClean
/Ax_Handoff-3.1.0.tar.gz/Ax_Handoff-3.1.0/axonchisel/handoff/protocol/__init__.py
import math import hashlib import hmac import axonchisel.handoff.config as config import axonchisel.handoff.util as util import axonchisel.handoff.error as error # ---------------------------------------------------------------------------- def get_variant(encstr): """Given encoded object (string), return the variant identifier (char).""" MAGIC = "XH" if not isinstance(encstr, str): raise TypeError("Unable to determine variant from non-string {0}".format(type(encstr))) if len(encstr) < 3: raise error.UnserializeError("Not enough data (length {0})".format(len(encstr))) if encstr[0:2] != MAGIC: raise error.UnserializeError("Header magic '{0}' is not '{1}'".format(encstr[0:2], MAGIC)) variant = encstr[2:3] return variant # ---------------------------------------------------------------------------- # # Protocol Elements Superclass. # class ProtocolElement(object): """Abstract superclass for protocol elements.""" pass # ---------------------------------------------------------------------------- # # Protocol Elements: Header, Body, Footer. # class Header(ProtocolElement): """Header protocol element (Abstract Superclass).""" MAGIC = "XH" VARIANT = "?" # (specified by subclasses) HMAC_BITS = config.HMAC_DIGEST_BITS # (160 bits for SHA1) LENGTH_HMAC = int(math.ceil(HMAC_BITS/8*4/3)) # 27 LENGTH = len(MAGIC) + len(VARIANT) + LENGTH_HMAC # 30=2+1+27 def __init__(self): self.body_hmac = "" # byte string @classmethod def from_serialized(cls, encstr): """Construct and return new object by unserializing encoded string. Raise UnserializeError on errors.""" o = Header() o.unserialize(encstr) return o def serialize(self): """Serialize self into string and return. Raise SerializeError on errors.""" if len(self.body_hmac) != self.HMAC_BITS/8: raise error.SerializeError("Header body_hmac size {0}B is not {1}B".format(len(self.body_hmac), self.HMAC_BITS/8)) return "{magic}{variant}{hmac}".format( magic=self.MAGIC, variant=self.VARIANT, hmac=util.ub64encode(self.body_hmac)) def unserialize(self, encstr): """Unserialize encoded string into self. Raise UnserializeError on errors.""" if not isinstance(encstr, str): raise TypeError("Header can only unserialize from strings, but {0} given".format(type(encstr))) if len(encstr) != self.LENGTH: raise error.UnserializeError("Header length {0} is not {1}".format(len(encstr), self.LENGTH)) if encstr[0:2] != self.MAGIC: raise error.UnserializeError("Header magic '{0}' is not '{1}'".format(encstr[0:2], self.MAGIC)) if encstr[2:3] != self.VARIANT: raise error.UnserializeError("Header variant '{0}' is not '{1}'".format(encstr[2:3], self.VARIANT)) try: self.body_hmac = util.ub64decode(encstr[3:]) except (TypeError, ValueError) as e: raise error.UnserializeError("Error decoding body_hmac: {0!r}".format(e)) def compute_body_hmac(self, serialized_body, secret=""): """Compute and return the body hmac based on serialized Body ProtocolElement passed.""" return hmac.new(self._hmac_key(secret), serialized_body.encode('utf-8'), config.HMAC_DIGEST).digest() def _hmac_key(self, secret=""): """Generate and return HMAC key (bytes) based on secret.""" secret_hash_512 = config.HMAC_SECRET_HASH(secret.encode('utf-8')).digest() # make lots of bits return secret_hash_512[0:(config.HMAC_KEY_BYTES)] # use all 512 bits class Body(ProtocolElement): """Body protocol element (Abstract Superclass).""" pass class Footer(ProtocolElement): """Footer protocol element (Abstract Superclass).""" MAGIC = "HX" LENGTH = len(MAGIC) # 2 def __init__(self): pass @classmethod def from_serialized(cls, encstr): """Construct and return new object by unserializing encoded string. Raise UnserializeError on errors.""" o = Footer() o.unserialize(encstr) return o def serialize(self): """Serialize self into string and return. Raise SerializeError on errors.""" return "{magic}".format(magic=self.MAGIC) def unserialize(self, encstr): """Unserialize encoded string into self. Raise UnserializeError on errors.""" if not isinstance(encstr, str): raise TypeError("Footer can only unserialize from strings, but {0} given".format(type(encstr))) if len(encstr) != self.LENGTH: raise error.UnserializeError("Footer length {0} is not {1}".format(len(encstr), self.LENGTH)) if encstr[0:2] != self.MAGIC: raise error.UnserializeError("Footer magic '{0}' is not '{1}'".format(encstr[0:2], self.MAGIC)) # ---------------------------------------------------------------------------- # # Protocol Envelope -- wraps Header, Body, Footer. # class Envelope(ProtocolElement): """Protocol envelope wrapping header, body, footer (Abstract Superclass).""" def __init__(self): self.header = Header() self.body = Body() self.footer = Footer() @classmethod def from_serialized(cls, encstr, secret=""): """Construct and return new object by unserializing encoded string. Raise UnserializeError on errors.""" o = Envelope() o.unserialize(encstr, secret=secret) return o def serialize(self, secret=""): """Serialize self into string and return. Raise SerializeError on errors.""" if secret == "": raise error.SerializeError("No secret specific for envelope serialization") # Serialize body and calc/store HMAC in header: s_body = self.body.serialize(secret=secret) try: self.header.body_hmac = self.header.compute_body_hmac(s_body, secret=secret) except TypeError as e: raise error.SerializeError("Unable to compute body HMAC: {0!r}".format(e)) # Serialize remainder: s_header = self.header.serialize() s_footer = self.footer.serialize() # Combine and return: envelope = "{h}{b}{f}".format(h=s_header, b=s_body, f=s_footer) return envelope def unserialize(self, encstr, secret=""): """Unserialize encoded string into self. Raise UnserializeError on errors.""" if not isinstance(encstr, str): raise TypeError("Envelope can only unserialize from strings, but {0} given".format(type(encstr))) if secret == "": raise error.UnserializeError("No secret specific for envelope unserialization") # Break envelope into chunks (fixed size header and footer, variable body): enc_header = encstr[:Header.LENGTH] enc_footer = encstr[-Footer.LENGTH:] enc_body = encstr[Header.LENGTH:-Footer.LENGTH] # Unserialize header and verify HMAC against computed body HMAC: self.header.unserialize(enc_header) try: body_hmac = self.header.compute_body_hmac(enc_body, secret=secret) except TypeError as e: raise error.UnserializeError("Unable to compute body HMAC: {0!r}".format(e)) if self.header.body_hmac != body_hmac: raise error.DataTamperedError("Body HMAC ({0!r}) does not match header's ({1!r}).".format(body_hmac, self.header.body_hmac)) # Unserialize and verify remainder: self.body.unserialize(enc_body, secret=secret) self.footer.unserialize(enc_footer)
PypiClean
/confine-controller-1.0.2.tar.gz/confine-controller-1.0.2/controller/apps/firmware/migrations/0040_datamigration__b620_drop_legacy_fw_config.py
from south.utils import datetime_utils as datetime from south.db import db from south.v2 import DataMigration from django.db import models class Migration(DataMigration): def forwards(self, orm): "Drop legacy firmware configuration. #245 note-25" # Note: Don't use "from appname.models import ModelName". # Use orm.ModelName to refer to models in this application, # and orm['appname.ModelName'] for models in other applications. orm.Config.objects.update(version='0.3.2') # remove legacy UCI entries orm.ConfigUCI.objects.filter(section='server server', option='base_path').delete() orm.ConfigUCI.objects.filter(section='tinc-net confine', option='enabled').delete() # remove legacy Config files orm.ConfigFile.objects.filter(path='/etc/config/tinc').delete() orm.ConfigFile.objects.filter(path='/etc/tinc/confine/tinc-down').delete() orm.ConfigFile.objects.filter(path='/etc/tinc/confine/tinc-up').delete() # update confine Config file: don't render 'server server' section cfile = orm.ConfigFile.objects.get(path='/etc/config/confine') cfile.content = "self.config.render_uci(node, sections=['node node', 'registry registry', 'testbed testbed'])" cfile.save() def backwards(self, orm): "Restore legacy firmware configuration." orm.Config.objects.update(version='0.3.1') cfg = orm.Config.objects.get() # restore legacy UCI entries orm.ConfigUCI.objects.create(config=cfg, section='server server', option='base_path', value="'/api'") orm.ConfigUCI.objects.create(config=cfg, section='tinc-net confine', option='enabled', value="'1'") # restore legacy Config files orm.ConfigFile.objects.create( config=cfg, path='/etc/config/tinc', content="self.config.render_uci(node, sections=['tinc-net confine'])", is_active=False ) orm.ConfigFile.objects.create( config=cfg, path='/etc/tinc/confine/tinc-down', content="node.tinc.get_tinc_down()", is_active=False, mode='+x' ) orm.ConfigFile.objects.create( config=cfg, path='/etc/tinc/confine/tinc-up', content="node.tinc.get_tinc_up()", is_active=False, mode='+x' ) # restore confine Config file: render 'server server' section cfile = orm.ConfigFile.objects.get(path='/etc/config/confine') cfile.content = ("self.config.render_uci(node, sections=['node node', " "'registry registry', 'server server', 'testbed testbed'])") cfile.save() models = { u'firmware.baseimage': { 'Meta': {'object_name': 'BaseImage'}, 'architectures': ('controller.models.fields.MultiSelectField', [], {'max_length': '250'}), 'config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': u"orm['firmware.Config']"}), 'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}) }, u'firmware.build': { 'Meta': {'ordering': "['-date']", 'object_name': 'Build'}, 'base_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100'}), 'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'image': ('privatefiles.models.fields.PrivateFileField', [], {'max_length': '256'}), 'kwargs': ('django.db.models.fields.TextField', [], {}), 'node': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'firmware_build'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['nodes.Node']"}), 'task_id': ('django.db.models.fields.CharField', [], {'max_length': '36', 'unique': 'True', 'null': 'True'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, u'firmware.buildfile': { 'Meta': {'unique_together': "(('build', 'path'),)", 'object_name': 'BuildFile'}, 'build': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['firmware.Build']"}), 'config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['firmware.ConfigFile']"}), 'content': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, u'firmware.config': { 'Meta': {'object_name': 'Config'}, 'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'image_name': ('django.db.models.fields.CharField', [], {'default': "'firmware-%(node_name)s-%(arch)s-%(version)s-%(build_id)d.img.gz'", 'max_length': '255'}), 'version': ('django.db.models.fields.CharField', [], {'max_length': '64'}) }, u'firmware.configfile': { 'Meta': {'ordering': "['-priority']", 'unique_together': "(['config', 'path'],)", 'object_name': 'ConfigFile'}, 'config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['firmware.Config']"}), 'content': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_optional': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'mode': ('django.db.models.fields.CharField', [], {'max_length': '6', 'blank': 'True'}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '256'}), 'priority': ('django.db.models.fields.IntegerField', [], {'default': '0'}) }, u'firmware.configfilehelptext': { 'Meta': {'object_name': 'ConfigFileHelpText'}, 'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['firmware.Config']"}), 'file': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'help_text'", 'unique': 'True', 'to': u"orm['firmware.ConfigFile']"}), 'help_text': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}) }, u'firmware.configplugin': { 'Meta': {'object_name': 'ConfigPlugin'}, 'config': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'plugins'", 'to': u"orm['firmware.Config']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128', 'blank': 'True'}), 'module': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}) }, u'firmware.configuci': { 'Meta': {'ordering': "['section', 'option']", 'unique_together': "(['config', 'section', 'option'],)", 'object_name': 'ConfigUCI'}, 'config': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['firmware.Config']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'option': ('django.db.models.fields.CharField', [], {'max_length': '32'}), 'section': ('django.db.models.fields.CharField', [], {'default': "'node'", 'max_length': '32'}), 'value': ('django.db.models.fields.TextField', [], {'max_length': '255'}) }, u'firmware.nodebuildfile': { 'Meta': {'unique_together': "(('node', 'path'),)", 'object_name': 'NodeBuildFile'}, 'content': ('django.db.models.fields.TextField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'node': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'files'", 'to': u"orm['nodes.Node']"}), 'path': ('django.db.models.fields.CharField', [], {'max_length': '256'}) }, u'firmware.nodekeys': { 'Meta': {'object_name': 'NodeKeys'}, 'allow_node_admins': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'node': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'keys'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['nodes.Node']"}), 'ssh_auth': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'ssh_pass': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'sync_node_admins': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, u'nodes.island': { 'Meta': {'object_name': 'Island'}, 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}) }, u'nodes.node': { 'Meta': {'object_name': 'Node'}, 'arch': ('django.db.models.fields.CharField', [], {'default': "'i686'", 'max_length': '16'}), 'boot_sn': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nodes'", 'to': u"orm['users.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'island': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['nodes.Island']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'local_iface': ('django.db.models.fields.CharField', [], {'default': "'eth0'", 'max_length': '16'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}), 'priv_ipv4_prefix': ('controller.models.fields.NullableCharField', [], {'max_length': '19', 'null': 'True', 'blank': 'True'}), 'set_state': ('django.db.models.fields.CharField', [], {'default': "'debug'", 'max_length': '16'}), 'sliver_mac_prefix': ('controller.models.fields.NullableCharField', [], {'max_length': '5', 'null': 'True', 'blank': 'True'}), 'sliver_pub_ipv4': ('django.db.models.fields.CharField', [], {'default': "'dhcp'", 'max_length': '8'}), 'sliver_pub_ipv4_range': ('controller.models.fields.NullableCharField', [], {'default': "'#8'", 'max_length': '256', 'null': 'True', 'blank': 'True'}), 'sliver_pub_ipv6': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '8'}) }, u'users.group': { 'Meta': {'ordering': "['name']", 'object_name': 'Group'}, 'allow_nodes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'allow_slices': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}) } } complete_apps = ['firmware'] symmetrical = True
PypiClean
/scikit-diveMove-0.3.0.tar.gz/scikit-diveMove-0.3.0/docs/source/demo_simulbouts.rst
.. _demo_simulbouts-label: ================== Simulating bouts ================== This follows the simulation of mixed Poisson distributions in `Luque & Guinet (2007)`_, and the comparison of models for characterizing such distributions. .. _Luque & Guinet (2007): https://doi.org/10.1163/156853907782418213 Set up the environment. .. jupyter-execute:: # Set up import numpy as np import pandas as pd import matplotlib.pyplot as plt import skdiveMove.bouts as skbouts # For figure sizes _FIG3X1 = (9, 12) .. jupyter-execute:: :hide-code: :hide-output: pd.set_option("display.precision", 3) np.set_printoptions(precision=3, sign="+") %matplotlib inline Generate two-process mixture ============================ For a mixed distribution of two random Poisson processes with a mixing parameter :math:`p=0.7`, and density parameters :math:`\lambda_f=0.05`, and :math:`\lambda_s=0.005`, we use the `random_mixexp` function to generate samples. Define the true values described above, grouping the parameters into a `Series` to simplify further operations. .. jupyter-execute:: :linenos: p_true = 0.7 lda0_true = 0.05 lda1_true = 0.005 pars_true = pd.Series({"lambda0": lda0_true, "lambda1": lda1_true, "p": p_true}) Declare the number of simulations and the number of samples to generate: .. jupyter-execute:: :linenos: # Number of simulations nsims = 500 # Size of each sample nsamp = 1000 Set up variables to accumulate simulations: .. jupyter-execute:: :linenos: # Set up NLS simulations coefs_nls = [] # Set up MLE simulations coefs_mle = [] # Fixed bounds fit 1 p_bnd = (-2, None) lda0_bnd = (-5, None) lda1_bnd = (-10, None) opts1 = dict(method="L-BFGS-B", bounds=(p_bnd, lda0_bnd, lda1_bnd)) # Fixed bounds fit 2 p_bnd = (1e-1, None) lda0_bnd = (1e-3, None) lda1_bnd = (1e-6, None) opts2 = dict(method="L-BFGS-B", bounds=(p_bnd, lda0_bnd, lda1_bnd)) Perform the simulations in a loop, fitting the nonlinear least squares (NLS) model, and the alternative maximum likelihood (MLE) model at each iteration. .. jupyter-execute:: :linenos: # Set up a random number generator for efficiency rng = np.random.default_rng() # Estimate parameters `nsims` times for i in range(nsims): x = skbouts.random_mixexp(nsamp, pars_true["p"], (pars_true[["lambda0", "lambda1"]] .to_numpy()), rng=rng) # NLS xbouts = skbouts.BoutsNLS(x, 5) init_pars = xbouts.init_pars([80], plot=False) coefs, _ = xbouts.fit(init_pars) p_i = skbouts.bouts.calc_p(coefs)[0][0] # only one here coefs_i = pd.concat([coefs.loc["lambda"], pd.Series({"p": p_i})]) coefs_nls.append(coefs_i.to_numpy()) # MLE xbouts = skbouts.BoutsMLE(x, 5) init_pars = xbouts.init_pars([80], plot=False) fit1, fit2 = xbouts.fit(init_pars, fit1_opts=opts1, fit2_opts=opts2) coefs_mle.append(np.roll(fit2.x, -1)) Non-linear least squares (NLS) ============================== Collect and display NLS results from the simulations: .. jupyter-execute:: :linenos: nls_coefs = pd.DataFrame(np.row_stack(coefs_nls), columns=["lambda0", "lambda1", "p"]) # Centrality and variance nls_coefs.describe() Maximum likelihood estimation (MLE) =================================== Collect and display MLE results from the simulations: .. jupyter-execute:: :linenos: mle_coefs = pd.DataFrame(np.row_stack(coefs_mle), columns=["lambda0", "lambda1", "p"]) # Centrality and variance mle_coefs.describe() Comparing NLS vs MLE ==================== The bias relative to the true values of the mixed distribution can be readily assessed for NLS: .. jupyter-execute:: nls_coefs.mean() - pars_true and for MLE: .. jupyter-execute:: mle_coefs.mean() - pars_true To visualize the estimates obtained throughout the simulations, we can compare density plots, along with the true parameter values: .. jupyter-execute:: :hide-code: # Combine results coefs_merged = pd.concat((mle_coefs, nls_coefs), keys=["mle", "nls"], names=["method", "idx"]) # Density plots kwargs = dict(alpha=0.8) fig, axs = plt.subplots(3, 1, figsize=_FIG3X1) lda0 = (coefs_merged["lambda0"].unstack(level=0) .plot(ax=axs[0], kind="kde", legend=False, **kwargs)) axs[0].set_ylabel(r"Density $[\lambda_f]$") # True value axs[0].axvline(pars_true["lambda0"], linestyle="dashed", color="k") lda1 = (coefs_merged["lambda1"].unstack(level=0) .plot(ax=axs[1], kind="kde", legend=False, **kwargs)) axs[1].set_ylabel(r"Density $[\lambda_s]$") # True value axs[1].axvline(pars_true["lambda1"], linestyle="dashed", color="k") p_coef = (coefs_merged["p"].unstack(level=0) .plot(ax=axs[2], kind="kde", legend=False, **kwargs)) axs[2].set_ylabel(r"Density $[p]$") # True value axs[2].axvline(pars_true["p"], linestyle="dashed", color="k") axs[0].legend(["MLE", "NLS"], loc=8, bbox_to_anchor=(0.5, 1), frameon=False, borderaxespad=0.1, ncol=2); Three-process mixture ===================== We generate a mixture of "fast", "slow", and "very slow" processes. The probabilities considered for modeling this mixture are :math:`p0` and :math:`p1`, representing the proportion of "fast" to "slow" events, and the proportion of "slow" to "slow" *and* "very slow" events, respectively. .. jupyter-execute:: p_fast = 0.6 p_svs = 0.7 # prop of slow to (slow + very slow) procs p_true = [p_fast, p_svs] lda_true = [0.05, 0.01, 8e-4] pars_true = pd.Series({"lambda0": lda_true[0], "lambda1": lda_true[1], "lambda2": lda_true[2], "p0": p_true[0], "p1": p_true[1]}) Mixtures with more than two processes require careful choice of constraints to avoid numerical issues to fit the models; even the NLS model may require help. .. jupyter-execute:: # Bounds for NLS fit; flattened, two per process (a, lambda). Two-tuple # with lower and upper bounds for each parameter. nls_opts = dict(bounds=( ([100, 1e-3, 100, 1e-3, 100, 1e-6]), ([5e4, 1, 5e4, 1, 5e4, 1]))) # Fixed bounds MLE fit 1 p0_bnd = (-5, None) p1_bnd = (-5, None) lda0_bnd = (-6, None) lda1_bnd = (-8, None) lda2_bnd = (-12, None) opts1 = dict(method="L-BFGS-B", bounds=(p0_bnd, p1_bnd, lda0_bnd, lda1_bnd, lda2_bnd)) # Fixed bounds MLE fit 2 p0_bnd = (1e-3, 9.9e-1) p1_bnd = (1e-3, 9.9e-1) lda0_bnd = (2e-2, 1e-1) lda1_bnd = (3e-3, 5e-2) lda2_bnd = (1e-5, 1e-3) opts2 = dict(method="L-BFGS-B", bounds=(p0_bnd, p1_bnd, lda0_bnd, lda1_bnd, lda2_bnd)) x = skbouts.random_mixexp(nsamp, [pars_true["p0"], pars_true["p1"]], [pars_true["lambda0"], pars_true["lambda1"], pars_true["lambda2"]], rng=rng) We fit the three-process data with the two models: .. jupyter-execute:: x_nls = skbouts.BoutsNLS(x, 5) init_pars = x_nls.init_pars([75, 220], plot=False) coefs, _ = x_nls.fit(init_pars, **nls_opts) x_mle = skbouts.BoutsMLE(x, 5) init_pars = x_mle.init_pars([75, 220], plot=False) fit1, fit2 = x_mle.fit(init_pars, fit1_opts=opts1, fit2_opts=opts2) Plot both fits and BECs: .. jupyter-execute:: fig, axs = plt.subplots(1, 2, figsize=(13, 5)) x_nls.plot_fit(coefs, ax=axs[0]) x_mle.plot_fit(fit2, ax=axs[1]); Compare cumulative frequency distributions: .. jupyter-execute:: fig, axs = plt.subplots(1, 2, figsize=(13, 5)) axs[0].set_title("NLS") x_nls.plot_ecdf(coefs, ax=axs[0]) axs[1].set_title("MLE") x_mle.plot_ecdf(fit2, ax=axs[1]); Feel free to download a copy of this demo (:jupyter-download:script:`demo_simulbouts`).
PypiClean
/mplleaflet-0.0.5.tar.gz/mplleaflet-0.0.5/mplexporter/mplexporter/renderers/base.py
import warnings import itertools from contextlib import contextmanager import numpy as np from matplotlib import transforms from .. import utils from .. import _py3k_compat as py3k class Renderer(object): @staticmethod def ax_zoomable(ax): return bool(ax and ax.get_navigate()) @staticmethod def ax_has_xgrid(ax): return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines()) @staticmethod def ax_has_ygrid(ax): return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines()) @property def current_ax_zoomable(self): return self.ax_zoomable(self._current_ax) @property def current_ax_has_xgrid(self): return self.ax_has_xgrid(self._current_ax) @property def current_ax_has_ygrid(self): return self.ax_has_ygrid(self._current_ax) @contextmanager def draw_figure(self, fig, props): if hasattr(self, "_current_fig") and self._current_fig is not None: warnings.warn("figure embedded in figure: something is wrong") self._current_fig = fig self._fig_props = props self.open_figure(fig=fig, props=props) yield self.close_figure(fig=fig) self._current_fig = None self._fig_props = {} @contextmanager def draw_axes(self, ax, props): if hasattr(self, "_current_ax") and self._current_ax is not None: warnings.warn("axes embedded in axes: something is wrong") self._current_ax = ax self._ax_props = props self.open_axes(ax=ax, props=props) yield self.close_axes(ax=ax) self._current_ax = None self._ax_props = {} @contextmanager def draw_legend(self, legend, props): self._current_legend = legend self._legend_props = props self.open_legend(legend=legend, props=props) yield self.close_legend(legend=legend) self._current_legend = None self._legend_props = {} # Following are the functions which should be overloaded in subclasses def open_figure(self, fig, props): """ Begin commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The Figure which will contain the ensuing axes and elements props : dictionary The dictionary of figure properties """ pass def close_figure(self, fig): """ Finish commands for a particular figure. Parameters ---------- fig : matplotlib.Figure The figure which is finished being drawn. """ pass def open_axes(self, ax, props): """ Begin commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which will contain the ensuing axes and elements props : dictionary The dictionary of axes properties """ pass def close_axes(self, ax): """ Finish commands for a particular axes. Parameters ---------- ax : matplotlib.Axes The Axes which is finished being drawn. """ pass def open_legend(self, legend, props): """ Beging commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend that will contain the ensuing elements props : dictionary The dictionary of legend properties """ pass def close_legend(self, legend): """ Finish commands for a particular legend. Parameters ---------- legend : matplotlib.legend.Legend The Legend which is finished being drawn """ pass def draw_marked_line(self, data, coordinates, linestyle, markerstyle, label, mplobj=None): """Draw a line that also has markers. If this isn't reimplemented by a renderer object, by default, it will make a call to BOTH draw_line and draw_markers when both markerstyle and linestyle are not None in the same Line2D object. """ if linestyle is not None: self.draw_line(data, coordinates, linestyle, label, mplobj) if markerstyle is not None: self.draw_markers(data, coordinates, markerstyle, label, mplobj) def draw_line(self, data, coordinates, style, label, mplobj=None): """ Draw a line. By default, draw the line via the draw_path() command. Some renderers might wish to override this and provide more fine-grained behavior. In matplotlib, lines are generally created via the plt.plot() command, though this command also can create marker collections. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the line. mplobj : matplotlib object the matplotlib plot element which generated this line """ pathcodes = ['M'] + (data.shape[0] - 1) * ['L'] pathstyle = dict(facecolor='none', **style) pathstyle['edgecolor'] = pathstyle.pop('color') pathstyle['edgewidth'] = pathstyle.pop('linewidth') self.draw_path(data=data, coordinates=coordinates, pathcodes=pathcodes, style=pathstyle, mplobj=mplobj) @staticmethod def _iter_path_collection(paths, path_transforms, offsets, styles): """Build an iterator over the elements of the path collection""" N = max(len(paths), len(offsets)) if path_transforms is None or len(path_transforms) == 0: path_transforms = [np.eye(3)] edgecolor = styles['edgecolor'] if np.size(edgecolor) == 0: edgecolor = ['none'] facecolor = styles['facecolor'] if np.size(facecolor) == 0: facecolor = ['none'] elements = [paths, path_transforms, offsets, edgecolor, styles['linewidth'], facecolor] it = itertools return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N) def draw_path_collection(self, paths, path_coordinates, path_transforms, offsets, offset_coordinates, offset_order, styles, mplobj=None): """ Draw a collection of paths. The paths, offsets, and styles are all iterables, and the number of paths is max(len(paths), len(offsets)). By default, this is implemented via multiple calls to the draw_path() function. For efficiency, Renderers may choose to customize this implementation. Examples of path collections created by matplotlib are scatter plots, histograms, contour plots, and many others. Parameters ---------- paths : list list of tuples, where each tuple has two elements: (data, pathcodes). See draw_path() for a description of these. path_coordinates: string the coordinates code for the paths, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. path_transforms: array_like an array of shape (*, 3, 3), giving a series of 2D Affine transforms for the paths. These encode translations, rotations, and scalings in the standard way. offsets: array_like An array of offsets of shape (N, 2) offset_coordinates : string the coordinates code for the offsets, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. offset_order : string either "before" or "after". This specifies whether the offset is applied before the path transform, or after. The matplotlib backend equivalent is "before"->"data", "after"->"screen". styles: dictionary A dictionary in which each value is a list of length N, containing the style(s) for the paths. mplobj : matplotlib object the matplotlib plot element which generated this collection """ if offset_order == "before": raise NotImplementedError("offset before transform") for tup in self._iter_path_collection(paths, path_transforms, offsets, styles): (path, path_transform, offset, ec, lw, fc) = tup vertices, pathcodes = path path_transform = transforms.Affine2D(path_transform) vertices = path_transform.transform(vertices) # This is a hack: if path_coordinates == "figure": path_coordinates = "points" style = {"edgecolor": utils.color_to_hex(ec), "facecolor": utils.color_to_hex(fc), "edgewidth": lw, "dasharray": "10,0", "alpha": styles['alpha'], "zorder": styles['zorder']} self.draw_path(data=vertices, coordinates=path_coordinates, pathcodes=pathcodes, style=style, offset=offset, offset_coordinates=offset_coordinates, mplobj=mplobj) def draw_markers(self, data, coordinates, style, label, mplobj=None): """ Draw a set of markers. By default, this is done by repeatedly calling draw_path(), but renderers should generally overload this method to provide a more efficient implementation. In matplotlib, markers are created using the plt.plot() command. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the markers. mplobj : matplotlib object the matplotlib plot element which generated this marker collection """ vertices, pathcodes = style['markerpath'] pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor', 'facecolor', 'zorder', 'edgewidth']) pathstyle['dasharray'] = "10,0" for vertex in data: self.draw_path(data=vertices, coordinates="points", pathcodes=pathcodes, style=pathstyle, offset=vertex, offset_coordinates=coordinates, mplobj=mplobj) def draw_text(self, text, position, coordinates, style, text_type=None, mplobj=None): """ Draw text on the image. Parameters ---------- text : string The text to draw position : tuple The (x, y) position of the text coordinates : string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the text. text_type : string or None if specified, a type of text such as "xlabel", "ylabel", "title" mplobj : matplotlib object the matplotlib plot element which generated this text """ raise NotImplementedError() def draw_path(self, data, coordinates, pathcodes, style, offset=None, offset_coordinates="data", mplobj=None): """ Draw a path. In matplotlib, paths are created by filled regions, histograms, contour plots, patches, etc. Parameters ---------- data : array_like A shape (N, 2) array of datapoints. coordinates : string A string code, which should be either 'data' for data coordinates, 'figure' for figure (pixel) coordinates, or "points" for raw point coordinates (useful in conjunction with offsets, below). pathcodes : list A list of single-character SVG pathcodes associated with the data. Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't', 'S', 's', 'C', 'c', 'Z', 'z'] See the SVG specification for details. Note that some path codes consume more than one datapoint (while 'Z' consumes none), so in general, the length of the pathcodes list will not be the same as that of the data array. style : dictionary a dictionary specifying the appearance of the line. offset : list (optional) the (x, y) offset of the path. If not given, no offset will be used. offset_coordinates : string (optional) A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. mplobj : matplotlib object the matplotlib plot element which generated this path """ raise NotImplementedError() def draw_image(self, imdata, extent, coordinates, style, mplobj=None): """ Draw an image. Parameters ---------- imdata : string base64 encoded png representation of the image extent : list the axes extent of the image: [xmin, xmax, ymin, ymax] coordinates: string A string code, which should be either 'data' for data coordinates, or 'figure' for figure (pixel) coordinates. style : dictionary a dictionary specifying the appearance of the image mplobj : matplotlib object the matplotlib plot object which generated this image """ raise NotImplementedError()
PypiClean
/discordbot_bc_editor-0.0.1.tar.gz/discordbot_bc_editor-0.0.1/discordbot_bc_editor/edits/other/meow_medals.py
from enum import Enum import json from typing import Any, Optional from ... import helper, user_input_handler, game_data_getter def get_medal_names(is_jp: bool) -> Optional[list[str]]: """Get all medal names""" file_data = game_data_getter.get_file_latest("resLocal", "medalname.tsv", is_jp) if file_data is None: helper.error_text("Failed to get medal names") return None medal_names = file_data.decode("utf-8").splitlines() names: list[str] = [] for line in medal_names: line_split = line.split("\t") name = ( line_split[0] .rstrip("\n") .replace("&", "and") .replace("★", "") .lstrip(" ") ) names.append(name) return names def set_medals(medal_stats: dict[str, Any], ids: list[int]) -> dict[str, Any]: """Set the medal stats of a set of medals""" for medal_id in ids: if medal_id == 0: continue medal_id -= 1 if medal_id not in medal_stats["medal_data_1"]: if medal_id not in medal_stats["medal_data_2"]: medal_stats["medal_data_1"].append(medal_id) medal_stats["medal_data_2"][medal_id] = 0 return medal_stats def remove_medals(medal_stats: dict[str, Any], ids: list[int]) -> dict[str, Any]: """Remove the medal stats of a set of medals""" for medal_id in ids: if medal_id == 0: continue medal_id -= 1 if medal_id in medal_stats["medal_data_1"]: medal_stats["medal_data_1"].remove(medal_id) if medal_id in medal_stats["medal_data_2"]: medal_stats["medal_data_2"].pop(medal_id) return medal_stats class BaseMapIds(Enum): """Base map IDs""" STORY_CHAPTERS = 3000 OUTBREAKS_EOC = 20000 OUTBREAKS_ITF = 21000 OUTBREAKS_COTC = 22000 FILIBUSTER = 23000 LEGEND_STAGES = 0 EVENT_STAGES = 1000 TOWER_STAGES = 7000 LEGEND_QUEST = 16000 IDI_RE = 4026 AKU_REALM = 4042 GAUNTLETS = 24000 class ActionTypes(Enum): """Action types""" EARN_CENT = 0 GAMATOTO_EXPLORE = 1 CAT_BASE_WEAPONS = 2 USER_RANK = 3 RECRUIT_GAMATOTO_ASSISTANT = 4 class Medal: """Medal""" def __init__(self, medal_id: int, grade: int, line: int): self.medal_id = medal_id self.grade = grade self.line = line class StageMedal(Medal): """Stage medal""" def __init__( self, medal_id: int, grade: int, line: int, maps: Optional[list[int]], condition: Optional[dict[str, Any]] = None, star: Optional[int] = None, ): super().__init__(medal_id, grade, line) self.maps = maps self.condition = condition self.star = star class TreasureMedal(StageMedal): """Treasure medal""" def __init__( self, medal_id: int, grade: int, line: int, maps: Optional[list[int]], treasure: int, condition: Optional[dict[str, Any]] = None, ): super().__init__(medal_id, grade, line, maps, condition) self.treasure = treasure class ActionMedal(Medal): """Action medal""" def __init__(self, medal_id: int, grade: int, line: int, action: ActionTypes): super().__init__(medal_id, grade, line) self.action = action class CharacterMedal(StageMedal): """Character medal""" def __init__( self, medal_id: int, grade: int, line: int, maps: Optional[list[int]], chara: int, condition: Optional[dict[str, Any]] = None, ): super().__init__(medal_id, grade, line, maps, condition) self.chara = chara class Medals: """Medals""" def __init__( self, treasures: list[TreasureMedal], characters: list[CharacterMedal], actions: list[ActionMedal], stages: list[StageMedal], ): self.treasures = treasures self.characters = characters self.actions = actions self.stages = stages def get_medal_data(is_jp: bool) -> Optional[Medals]: """Get the medal data""" file_data = game_data_getter.get_file_latest("DataLocal", "medallist.json", is_jp) if file_data is None: helper.error_text("Failed to get medal data") return None medal_data = json.loads(file_data.decode("utf-8"))["iconID"] treasures: list[TreasureMedal] = [] characters: list[CharacterMedal] = [] actions: list[ActionMedal] = [] stages: list[StageMedal] = [] for i, medal in enumerate(medal_data): if "condition" not in medal: medal["condition"] = None if "treasure" in medal: treasures.append( TreasureMedal( i, medal["grade"], medal["line"], medal["map"], medal["treasure"], medal["condition"], ) ) elif "chara" in medal: characters.append( CharacterMedal( i, medal["grade"], medal["line"], None, medal["chara"], medal["condition"], ) ) elif "action" in medal: actions.append( ActionMedal( i, medal["grade"], medal["line"], ActionTypes(medal["action"]), ) ) else: if "star" not in medal: medal["star"] = None stages.append( StageMedal( i, medal["grade"], medal["line"], medal["map"], medal["condition"], medal["star"], ) ) return Medals(treasures, characters, actions, stages) def medals(save_stats: dict[str, Any]) -> dict[str, Any]: """Handler for editting meow medals""" medal_stats = save_stats["medals"] remove = ( user_input_handler.colored_input( "Do you want to add or remove medals? (&a&/&r&):" ) == "r" ) names = get_medal_names(helper.check_data_is_jp(save_stats)) if names is None: return save_stats helper.colored_list(names) ids = user_input_handler.get_range( user_input_handler.colored_input( "Enter medal ids (You can enter all to get &all&, a range e.g &1&-&50&, or ids separate by spaces e.g &5 4 7&):" ), len(names) + 1, ) if remove: medal_stats = remove_medals(medal_stats, ids) else: medal_stats = set_medals(medal_stats, ids) save_stats["medals"] = medal_stats print(f"Successfully {'gave' if not remove else 'removed'} medals") return save_stats
PypiClean
/aws_service_catalog_puppet-0.242.0.tar.gz/aws_service_catalog_puppet-0.242.0/servicecatalog_puppet/workflow/portfolio/portfolio_management/copy_into_spoke_local_portfolio_task.py
import time import luigi from servicecatalog_puppet import constants from servicecatalog_puppet.workflow.dependencies import tasks class CopyIntoSpokeLocalPortfolioTask(tasks.TaskWithReference): account_id = luigi.Parameter() region = luigi.Parameter() portfolio_task_reference = luigi.Parameter() portfolio_get_all_products_and_their_versions_ref = luigi.Parameter() portfolio_get_all_products_and_their_versions_for_hub_ref = luigi.Parameter() cachable_level = constants.CACHE_LEVEL_RUN def params_for_results_display(self): return { "task_reference": self.task_reference, "puppet_account_id": self.puppet_account_id, "region": self.region, "account_id": self.account_id, } def run(self): spoke_portfolio_details = self.get_output_from_reference_dependency( self.portfolio_task_reference ) spoke_portfolio_id = spoke_portfolio_details.get("Id") spoke_products_and_their_versions = self.get_output_from_reference_dependency( self.portfolio_get_all_products_and_their_versions_ref ) hub_products_and_their_versions = self.get_output_from_reference_dependency( self.portfolio_get_all_products_and_their_versions_for_hub_ref ) copy_product_tokens = list() versions_requiring_updates = dict() products_requiring_adding_to_portfolio = dict() with self.spoke_regional_client("servicecatalog") as servicecatalog: for ( hub_product_name, hub_product_details, ) in hub_products_and_their_versions.items(): versions_to_copy = list() args_to_use = dict( SourceProductArn=hub_product_details.get("ProductArn"), SourceProvisioningArtifactIdentifiers=versions_to_copy, CopyOptions=["CopyTags",], ) hub_versions_details = hub_product_details.get("Versions", {}) if spoke_products_and_their_versions.get(hub_product_name): args_to_use[ "TargetProductId" ] = spoke_products_and_their_versions.get(hub_product_name).get( "ProductId" ) else: products_requiring_adding_to_portfolio[hub_product_name] = True spoke_product_details = spoke_products_and_their_versions.get( hub_product_name, {} ) spoke_versions_details = spoke_product_details.get("Versions", {}) version_names_to_ignore = ["-"] + list(spoke_versions_details.keys()) for ( hub_version_name, hub_version_details, ) in hub_versions_details.items(): if hub_version_name not in version_names_to_ignore: versions_to_copy.append(dict(Id=hub_version_details.get("Id"),)) else: if hub_version_name == "-": continue spoke_product_id = spoke_product_details["ProductId"] if not versions_requiring_updates.get(spoke_product_id): versions_requiring_updates[spoke_product_id] = dict() spoke_version_id = spoke_versions_details[hub_version_name][ "Id" ] versions_requiring_updates[spoke_product_id][ spoke_version_id ] = dict( Active=hub_version_details.get("Active"), Guidance=hub_version_details.get("Guidance"), Description=hub_version_details.get("Description"), ) if len(versions_to_copy) > 0: copy_product_tokens.append( ( hub_product_name, servicecatalog.copy_product(**args_to_use).get( "CopyProductToken" ), ) ) self.info("Finished copying products") while len(copy_product_tokens) > 0: first_item_in_list = copy_product_tokens[0] product_name, copy_product_token_to_check = first_item_in_list response = servicecatalog.describe_copy_product_status( CopyProductToken=copy_product_token_to_check ) copy_product_status = response.get("CopyProductStatus") if copy_product_status == "SUCCEEDED": if products_requiring_adding_to_portfolio.get(product_name): products_requiring_adding_to_portfolio[ product_name ] = response.get("TargetProductId") copy_product_tokens.remove(first_item_in_list) elif copy_product_status == "FAILED": raise Exception(f"Failed to copy product {copy_product_status}") elif copy_product_status == "IN_PROGRESS": time.sleep(1) else: raise Exception(f"Not handled copy product status {response}") self.info("Finished waiting for copy products") for product_name, product_id in products_requiring_adding_to_portfolio.items(): servicecatalog.associate_product_with_portfolio( ProductId=product_id, PortfolioId=spoke_portfolio_id, ) self.info("Finished associating products") for product_id, product_details in versions_requiring_updates.items(): for version_id, version_details in product_details.items(): servicecatalog.update_provisioning_artifact( ProductId=product_id, ProvisioningArtifactId=version_id, **version_details, ) self.info("Finished updating versions that were copied") products_to_check = list(products_requiring_adding_to_portfolio.values()) n_products_to_check = len(products_to_check) products_found = 0 while products_found < n_products_to_check: response = servicecatalog.search_products_as_admin_single_page( # TODO optimise = swap for paginator PortfolioId=spoke_portfolio_id, ) products_ids = [ product_view_detail.get("ProductViewSummary").get("ProductId") for product_view_detail in response.get("ProductViewDetails") ] products_found = 0 for product_to_check in products_to_check: if product_to_check in products_ids: products_found += 1 self.info("Finished waiting for association of products to portfolio") self.write_empty_output()
PypiClean
/Auptimizer-2.0.tar.gz/Auptimizer-2.0/src/aup/Proposer/hpbandster/optimizers/randomsearch.py
import os import time import math import copy import logging import numpy as np import ConfigSpace as CS from hpbandster.core.master import Master from hpbandster.optimizers.iterations import SuccessiveHalving from hpbandster.optimizers.config_generators.random_sampling import RandomSampling as RS class RandomSearch(Master): def __init__(self, configspace = None, eta = 3, min_budget=1, max_budget=1, **kwargs ): """ Implements a random search across the search space for comparison. Candidates are sampled at random and run on the maximum budget. Parameters ---------- configspace: ConfigSpace object valid representation of the search space eta : float In each iteration, a complete run of sequential halving is executed. In it, after evaluating each configuration on the same subset size, only a fraction of 1/eta of them 'advances' to the next round. Must be greater or equal to 2. budget : float budget for the evaluation """ # TODO: Propper check for ConfigSpace object! if configspace is None: raise ValueError("You have to provide a valid ConfigSpace object") cg = RS( configspace = configspace ) super().__init__(config_generator=cg, **kwargs) # Hyperband related stuff self.eta = eta self.min_budget = max_budget self.max_budget = max_budget # precompute some HB stuff self.max_SH_iter = -int(np.log(min_budget/max_budget)/np.log(eta)) + 1 self.budgets = max_budget * np.power(eta, -np.linspace(self.max_SH_iter-1, 0, self.max_SH_iter)) # max total budget for one iteration self.budget_per_iteration = sum([b*self.eta**i for i, b in enumerate(self.budgets[::-1])]) self.config.update({ 'eta' : eta, 'min_budget' : max_budget, 'max_budget' : max_budget, }) def get_next_iteration(self, iteration, iteration_kwargs={}): """ Returns a SH iteration with only evaluations on the biggest budget Parameters ---------- iteration: int the index of the iteration to be instantiated Returns ------- SuccessiveHalving: the SuccessiveHalving iteration with the corresponding number of configurations """ budgets = [self.max_budget] ns = [self.budget_per_iteration//self.max_budget] return(SuccessiveHalving(HPB_iter=iteration, num_configs=ns, budgets=budgets, config_sampler=self.config_generator.get_config, **iteration_kwargs))
PypiClean
/ory_client-1.1.51-py3-none-any.whl/ory_client/model/successful_native_login.py
import re # noqa: F401 import sys # noqa: F401 from ory_client.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from ory_client.exceptions import ApiAttributeError def lazy_import(): from ory_client.model.session import Session globals()['Session'] = Session class SuccessfulNativeLogin(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'session': (Session,), # noqa: E501 'session_token': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'session': 'session', # noqa: E501 'session_token': 'session_token', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, session, *args, **kwargs): # noqa: E501 """SuccessfulNativeLogin - a model defined in OpenAPI Args: session (Session): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) session_token (str): The Session Token A session token is equivalent to a session cookie, but it can be sent in the HTTP Authorization Header: Authorization: bearer ${session-token} The session token is only issued for API flows, not for Browser flows!. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', True) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.session = session for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, session, *args, **kwargs): # noqa: E501 """SuccessfulNativeLogin - a model defined in OpenAPI Args: session (Session): Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) session_token (str): The Session Token A session token is equivalent to a session cookie, but it can be sent in the HTTP Authorization Header: Authorization: bearer ${session-token} The session token is only issued for API flows, not for Browser flows!. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: for arg in args: if isinstance(arg, dict): kwargs.update(arg) else: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) self.session = session for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/azure-cli-2.51.0.tar.gz/azure-cli-2.51.0/azure/cli/command_modules/keyvault/vendored_sdks/azure_keyvault_t1/http_bearer_challenge.py
try: import urllib.parse as parse except ImportError: import urlparse as parse # pylint: disable=import-error class HttpBearerChallenge(object): def __init__(self, request_uri, challenge): """ Parses an HTTP WWW-Authentication Bearer challenge from a server. """ self.source_authority = self._validate_request_uri(request_uri) self.source_uri = request_uri self._parameters = {} trimmed_challenge = self._validate_challenge(challenge) # split trimmed challenge into comma-separated name=value pairs. Values are expected # to be surrounded by quotes which are stripped here. for item in trimmed_challenge.split(','): # process name=value pairs comps = item.split('=') if len(comps) == 2: key = comps[0].strip(' "') value = comps[1].strip(' "') if key: self._parameters[key] = value # minimum set of parameters if not self._parameters: raise ValueError('Invalid challenge parameters') # must specify authorization or authorization_uri if 'authorization' not in self._parameters and 'authorization_uri' not in self._parameters: raise ValueError('Invalid challenge parameters') # pylint: disable=no-self-use @staticmethod def is_bearer_challenge(authentication_header): """ Tests whether an authentication header is a Bearer challenge. :param authentication_header: the authentication header to test rtype: bool """ if not authentication_header: return False return authentication_header.strip().startswith('Bearer ') def get_value(self, key): return self._parameters.get(key) def get_authorization_server(self): """ Returns the URI for the authorization server if present, otherwise empty string. """ value = '' for key in ['authorization_uri', 'authorization']: value = self.get_value(key) or '' if value: break return value def get_resource(self): """ Returns the resource if present, otherwise empty string. """ return self.get_value('resource') or '' def get_scope(self): """ Returns the scope if present, otherwise empty string. """ return self.get_value('scope') or '' # pylint: disable=no-self-use def _validate_challenge(self, challenge): """ Verifies that the challenge is a Bearer challenge and returns the key=value pairs. """ bearer_string = 'Bearer ' if not challenge: raise ValueError('Challenge cannot be empty') challenge = challenge.strip() if not challenge.startswith(bearer_string): raise ValueError('Challenge is not Bearer') return challenge[len(bearer_string):] # pylint: disable=no-self-use def _validate_request_uri(self, uri): """ Extracts the host authority from the given URI. """ if not uri: raise ValueError('request_uri cannot be empty') uri = parse.urlparse(uri) if not uri.netloc: raise ValueError('request_uri must be an absolute URI') if uri.scheme.lower() not in ['http', 'https']: raise ValueError('request_uri must be HTTP or HTTPS') return uri.netloc
PypiClean
/uhfReaderApi-0.0.9.tar.gz/uhfReaderApi-0.0.9/uhf/reader/protocol/base_writeEpc.py
from uhf.reader.protocol import * from uhf.reader.utils import * class MsgBaseWriteEpc(Message): def __init__(self, antennaEnable: int, area: int, start: int, hexWriteData: str, **kwargs): super().__init__() self.mt_8_11 = EnumG.Msg_Type_Bit_Base.value self.msgId = EnumG.BaseMid_WriteEpc.value self.antennaEnable = antennaEnable self.area = area self.start = start self.hexWriteData = hexWriteData self.filter = kwargs.get("filter", None) # type:ParamEpcFilter self.hexPassword = kwargs.get("hexPassword", None) self.block = kwargs.get("block", None) self.errorIndex = None def bytesToClass(self): pass def pack(self): buffer = DynamicBuffer() buffer.putLong(self.antennaEnable) buffer.putInt(self.area) buffer.putShort(self.start) if self.hexWriteData: to_bytes = hexToBytes(self.hexWriteData) buffer.putShort(len(to_bytes)) buffer.putBytes(to_bytes) if self.filter is not None: buffer.putInt(0x01) filter_bytes = self.filter.toBytes() buffer.putShort(len(filter_bytes)) buffer.putBytes(filter_bytes) if self.hexPassword is not None: buffer.putInt(0x02) buffer.putBytes(hexToBytes(self.hexPassword)) if self.block is not None: buffer.putInt(0x03) buffer.putInt(self.block) self.cData = buffer.tobytes() self.dataLen = buffer.len / 8 def unPack(self): if self.cData: dirMsg = {0: "Success", 1: "Port parameter error.", 2: "Filter parameter error.", 3: "Write parameter error.", 4: "CRC check error.", 5: "Underpower error.", 6: "Data area overflow.", 7: "Data area is locked.", 8: "Access password error.", 9: "Other error.", 10: "Label is missing.", 11: "Command error."} self.rtCode = self.cData[0] if self.rtCode in dirMsg: self.rtMsg = dirMsg.get(self.rtCode, None) if len(self.cData) > 1: errBuffer = DynamicBuffer("0x" + bytesToHex(self.cData)) errBuffer.pos = 8 if errBuffer.readInt() == 1: self.errorIndex = errBuffer.readShort()
PypiClean
/comuneimola.compensi-1.3.zip/comuneimola.compensi-1.3/comuneimola/compensi/content/atareacompensi.py
from zope.interface import implements from Products.Archetypes import atapi from Products.ATContentTypes.content import folder from Products.ATContentTypes.content import schemata from comuneimola.compensi.interfaces.atareacompensi import IATAreaCompensi from comuneimola.compensi.config import PROJECTNAME from comuneimola.compensi import compensiMessageFactory as _ ATAreaCompensiSchema = folder.ATFolderSchema.copy() + atapi.Schema(( atapi.LinesField(name='elenco_uffici', widget=atapi.LinesWidget( label=_(u"office_list", default=u"Office List"), description=_(u"office_list_description", default=u"List here offices for current area"), ), required=False, ), atapi.LinesField(name='modalita_affidamento', widget=atapi.LinesWidget( label=_(u"award_procedures_label", default=u"Procedures for the award"), description=_(u"relied_modality_description", default=u"List here the procedures for the award"), ), required=False, ), atapi.LinesField(name='natura_importo', widget=atapi.LinesWidget( label=_(u"type_of_amount_label", default=u"Type of amount"), description=_(u"type_of_amount_description", default=u"List here the types of amount"), ), required=False, ), atapi.LinesField(name='norma_o_titolo', widget=atapi.LinesWidget( label=_(u"type_of_norm", default=u"Type of norm or title"), description=_(u"type_of_norm_description", default=u"List here the norm or title"), ), required=False, ), )) ATAreaCompensiSchema['title'].storage = atapi.AnnotationStorage() ATAreaCompensiSchema['description'].storage = atapi.AnnotationStorage() schemata.finalizeATCTSchema( ATAreaCompensiSchema, folderish=True, moveDiscussion=False ) class ATAreaCompensi(folder.ATFolder): """Area Compensi""" implements(IATAreaCompensi) portal_type = "ATAreaCompensi" meta_type = "ATAreaCompensi" schema = ATAreaCompensiSchema title = atapi.ATFieldProperty('title') description = atapi.ATFieldProperty('description') atapi.registerType(ATAreaCompensi, PROJECTNAME)
PypiClean
/FlowTutor-0.9.0.tar.gz/FlowTutor-0.9.0/src/flowtutor/flowchart/declarations.py
from __future__ import annotations import math from typing import TYPE_CHECKING, Any, Optional import dearpygui.dearpygui as dpg from flowtutor.flowchart.node import FLOWCHART_TAG, Node if TYPE_CHECKING: from flowtutor.flowchart.flowchart import Flowchart class Declarations(Node): def __init__(self) -> None: super().__init__() self._declarations = [ self.new_declaration() ] @property def shape_width(self) -> int: return 150 @property def shape_height(self) -> int: _, height = dpg.get_text_size(self.label) return 57 + int(math.floor(height)) @property def raw_in_points(self) -> list[tuple[float, float]]: return [(75, 0)] @property def raw_out_points(self) -> list[tuple[float, float]]: return [(75, self.shape_height)] @property def color(self) -> tuple[int, int, int]: return (255, 255, 170) if self.is_initialized else (255, 0, 0) @property def shape_points(self) -> list[tuple[float, float]]: return [ (0, 0), (150, 0), (150, self.shape_height), (0, self.shape_height), (0, 0) ] @property def declarations(self) -> list[dict[str, Any]]: return self._declarations @declarations.setter def declarations(self, declarations: list[dict[str, Any]]) -> None: self._declarations = declarations @property def label(self) -> str: if all(map(lambda d: d['var_name'], self.declarations)): return '\n'.join(map(lambda d: ''.join([ 'static ' if d['is_static'] else '', d['var_type'], ' ', '*' if d['is_pointer'] else '', d['var_name'], f'[{d["array_size"]}]' if d['is_array'] else '', f' = {d["var_value"]}' if len(d['var_value']) > 0 else '', ';']), self.declarations)) else: return self.__class__.__name__ def draw(self, flowchart: Flowchart, mouse_pos: Optional[tuple[int, int]], is_selected: bool = False) -> None: # pragma: no cover super().draw(flowchart, mouse_pos, is_selected) pos_x, pos_y = self.pos tag = self.tag+'$' if dpg.does_item_exist(tag): return # Draw extra lines for the declaration node with dpg.draw_node( tag=tag, parent=FLOWCHART_TAG): text_color = (0, 0, 0) dpg.draw_line( (pos_x + 10 + self.get_left_x(), pos_y), (pos_x + 10 + self.get_left_x(), pos_y + self.shape_height), color=text_color, thickness=1) dpg.draw_line( (pos_x + self.get_left_x(), pos_y + 10), (pos_x + self.get_right_x(), pos_y + 10), color=text_color, thickness=1) def delete(self) -> None: # pragma: no cover super().delete() tag = self.tag+'$' if dpg.does_item_exist(tag): dpg.delete_item(tag) @property def is_initialized(self) -> bool: for d in self.declarations: if d['is_array'] and len(d['array_size']) <= 0: return self.is_comment elif len(d['var_name']) <= 0: return self.is_comment return True def new_declaration(self) -> dict[str, Any]: return { 'var_name': '', 'var_type': 'int', 'var_value': '', 'array_size': '', 'is_array': False, 'is_pointer': False, 'is_static': False } def add_declaration(self) -> None: self.declarations.append(self.new_declaration()) def delete_declaration(self, index: int) -> None: del self.declarations[index]
PypiClean
/fds.sdk.StocksAPIforDigitalPortals-0.10.12-py3-none-any.whl/fds/sdk/StocksAPIforDigitalPortals/model/inline_response2001_data_balance_sheet_total_deposits.py
import re # noqa: F401 import sys # noqa: F401 from fds.sdk.StocksAPIforDigitalPortals.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, OpenApiModel ) from fds.sdk.StocksAPIforDigitalPortals.exceptions import ApiAttributeError class InlineResponse2001DataBalanceSheetTotalDeposits(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ return { 'value': (float, none_type,), # noqa: E501 'growth': (float, none_type,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'value': 'value', # noqa: E501 'growth': 'growth', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """InlineResponse2001DataBalanceSheetTotalDeposits - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) value (float, none_type): Value of the total amount of deposits.. [optional] # noqa: E501 growth (float, none_type): Annual growth rate of the total amount of deposits.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """InlineResponse2001DataBalanceSheetTotalDeposits - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) value (float, none_type): Value of the total amount of deposits.. [optional] # noqa: E501 growth (float, none_type): Annual growth rate of the total amount of deposits.. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
PypiClean
/puresec-cli-1.2.1.tar.gz/puresec-cli-1.2.1/puresec_cli/resources/node_modules/typescript/lib/lib.webworker.d.ts
///////////////////////////// /// Worker APIs ///////////////////////////// interface AddEventListenerOptions extends EventListenerOptions { once?: boolean; passive?: boolean; } interface Algorithm { name: string; } interface CacheQueryOptions { cacheName?: string; ignoreMethod?: boolean; ignoreSearch?: boolean; ignoreVary?: boolean; } interface ClientQueryOptions { includeReserved?: boolean; includeUncontrolled?: boolean; type?: ClientTypes; } interface CloseEventInit extends EventInit { code?: number; reason?: string; wasClean?: boolean; } interface ErrorEventInit extends EventInit { colno?: number; error?: any; filename?: string; lineno?: number; message?: string; } interface EventInit { bubbles?: boolean; cancelable?: boolean; scoped?: boolean; } interface EventListenerOptions { capture?: boolean; } interface ExtendableEventInit extends EventInit { } interface ExtendableMessageEventInit extends ExtendableEventInit { data?: any; lastEventId?: string; origin?: string; ports?: MessagePort[] | null; source?: Client | ServiceWorker | MessagePort | null; } interface FetchEventInit extends ExtendableEventInit { clientId?: string; request: Request; reservedClientId?: string; targetClientId?: string; } interface GetNotificationOptions { tag?: string; } interface IDBIndexParameters { multiEntry?: boolean; unique?: boolean; } interface IDBObjectStoreParameters { autoIncrement?: boolean; keyPath?: string | string[]; } interface KeyAlgorithm { name: string; } interface MessageEventInit extends EventInit { channel?: string; data?: any; lastEventId?: string; origin?: string; ports?: MessagePort[]; source?: object | null; } interface NotificationEventInit extends ExtendableEventInit { action?: string; notification: Notification; } interface NotificationOptions { body?: string; data?: any; dir?: NotificationDirection; icon?: string; lang?: string; tag?: string; } interface ObjectURLOptions { oneTimeOnly?: boolean; } interface ProgressEventInit extends EventInit { lengthComputable?: boolean; loaded?: number; total?: number; } interface PushEventInit extends ExtendableEventInit { data?: Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | string | null; } interface PushSubscriptionChangeInit extends ExtendableEventInit { newSubscription?: PushSubscription; oldSubscription?: PushSubscription; } interface PushSubscriptionOptionsInit { applicationServerKey?: Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | string | null; userVisibleOnly?: boolean; } interface RequestInit { body?: Blob | Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | FormData | string | null; cache?: RequestCache; credentials?: RequestCredentials; headers?: HeadersInit; integrity?: string; keepalive?: boolean; method?: string; mode?: RequestMode; redirect?: RequestRedirect; referrer?: string; referrerPolicy?: ReferrerPolicy; signal?: object; window?: any; } interface ResponseInit { headers?: HeadersInit; status?: number; statusText?: string; } interface SyncEventInit extends ExtendableEventInit { lastChance?: boolean; tag: string; } interface EventListener { (evt: Event): void; } interface AbstractWorkerEventMap { "error": ErrorEvent; } interface AbstractWorker { onerror: ((this: AbstractWorker, ev: ErrorEvent) => any) | null; addEventListener<K extends keyof AbstractWorkerEventMap>(type: K, listener: (this: AbstractWorker, ev: AbstractWorkerEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof AbstractWorkerEventMap>(type: K, listener: (this: AbstractWorker, ev: AbstractWorkerEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } interface AudioBuffer { readonly duration: number; readonly length: number; readonly numberOfChannels: number; readonly sampleRate: number; copyFromChannel(destination: Float32Array, channelNumber: number, startInChannel?: number): void; copyToChannel(source: Float32Array, channelNumber: number, startInChannel?: number): void; getChannelData(channel: number): Float32Array; } declare var AudioBuffer: { prototype: AudioBuffer; new(): AudioBuffer; }; interface Blob { readonly size: number; readonly type: string; msClose(): void; msDetachStream(): any; slice(start?: number, end?: number, contentType?: string): Blob; } declare var Blob: { prototype: Blob; new (blobParts?: any[], options?: BlobPropertyBag): Blob; }; interface BlobPropertyBag { endings?: string; type?: string; } interface Body { readonly bodyUsed: boolean; arrayBuffer(): Promise<ArrayBuffer>; blob(): Promise<Blob>; formData(): Promise<FormData>; json(): Promise<any>; text(): Promise<string>; } interface Cache { add(request: Request | string): Promise<void>; addAll(requests: (Request | string)[]): Promise<void>; delete(request: Request | string, options?: CacheQueryOptions): Promise<boolean>; keys(request?: Request | string, options?: CacheQueryOptions): Promise<Request[]>; match(request: Request | string, options?: CacheQueryOptions): Promise<Response>; matchAll(request?: Request | string, options?: CacheQueryOptions): Promise<Response[]>; put(request: Request | string, response: Response): Promise<void>; } declare var Cache: { prototype: Cache; new(): Cache; }; interface CacheStorage { delete(cacheName: string): Promise<boolean>; has(cacheName: string): Promise<boolean>; keys(): Promise<string[]>; match(request: Request | string, options?: CacheQueryOptions): Promise<any>; open(cacheName: string): Promise<Cache>; } declare var CacheStorage: { prototype: CacheStorage; new(): CacheStorage; }; interface Client { readonly id: string; readonly reserved: boolean; readonly type: ClientTypes; readonly url: string; postMessage(message: any, transfer?: any[]): void; } declare var Client: { prototype: Client; new(): Client; }; interface Clients { claim(): Promise<void>; get(id: string): Promise<any>; matchAll(options?: ClientQueryOptions): Promise<Client[]>; openWindow(url: string): Promise<WindowClient | null>; } declare var Clients: { prototype: Clients; new(): Clients; }; interface CloseEvent extends Event { readonly code: number; readonly reason: string; readonly wasClean: boolean; /** @deprecated */ initCloseEvent(typeArg: string, canBubbleArg: boolean, cancelableArg: boolean, wasCleanArg: boolean, codeArg: number, reasonArg: string): void; } declare var CloseEvent: { prototype: CloseEvent; new(type: string, eventInitDict?: CloseEventInit): CloseEvent; }; interface Console { memory: any; assert(condition?: boolean, message?: string, ...data: any[]): void; clear(): void; count(label?: string): void; debug(message?: any, ...optionalParams: any[]): void; dir(value?: any, ...optionalParams: any[]): void; dirxml(value: any): void; error(message?: any, ...optionalParams: any[]): void; exception(message?: string, ...optionalParams: any[]): void; group(groupTitle?: string, ...optionalParams: any[]): void; groupCollapsed(groupTitle?: string, ...optionalParams: any[]): void; groupEnd(): void; info(message?: any, ...optionalParams: any[]): void; log(message?: any, ...optionalParams: any[]): void; markTimeline(label?: string): void; msIsIndependentlyComposed(element: object): boolean; profile(reportName?: string): void; profileEnd(): void; select(element: object): void; table(...tabularData: any[]): void; time(label?: string): void; timeEnd(label?: string): void; timeStamp(label?: string): void; timeline(label?: string): void; timelineEnd(label?: string): void; trace(message?: any, ...optionalParams: any[]): void; warn(message?: any, ...optionalParams: any[]): void; } declare var Console: { prototype: Console; new(): Console; }; interface Coordinates { readonly accuracy: number; readonly altitude: number | null; readonly altitudeAccuracy: number | null; readonly heading: number | null; readonly latitude: number; readonly longitude: number; readonly speed: number | null; } declare var Coordinates: { prototype: Coordinates; new(): Coordinates; }; interface CryptoKey { readonly algorithm: KeyAlgorithm; readonly extractable: boolean; readonly type: string; readonly usages: string[]; } declare var CryptoKey: { prototype: CryptoKey; new(): CryptoKey; }; interface DOMError { readonly name: string; toString(): string; } declare var DOMError: { prototype: DOMError; new(): DOMError; }; interface DOMException { readonly code: number; readonly message: string; readonly name: string; toString(): string; readonly ABORT_ERR: number; readonly DATA_CLONE_ERR: number; readonly DOMSTRING_SIZE_ERR: number; readonly HIERARCHY_REQUEST_ERR: number; readonly INDEX_SIZE_ERR: number; readonly INUSE_ATTRIBUTE_ERR: number; readonly INVALID_ACCESS_ERR: number; readonly INVALID_CHARACTER_ERR: number; readonly INVALID_MODIFICATION_ERR: number; readonly INVALID_NODE_TYPE_ERR: number; readonly INVALID_STATE_ERR: number; readonly NAMESPACE_ERR: number; readonly NETWORK_ERR: number; readonly NOT_FOUND_ERR: number; readonly NOT_SUPPORTED_ERR: number; readonly NO_DATA_ALLOWED_ERR: number; readonly NO_MODIFICATION_ALLOWED_ERR: number; readonly PARSE_ERR: number; readonly QUOTA_EXCEEDED_ERR: number; readonly SECURITY_ERR: number; readonly SERIALIZE_ERR: number; readonly SYNTAX_ERR: number; readonly TIMEOUT_ERR: number; readonly TYPE_MISMATCH_ERR: number; readonly URL_MISMATCH_ERR: number; readonly VALIDATION_ERR: number; readonly WRONG_DOCUMENT_ERR: number; } declare var DOMException: { prototype: DOMException; new(message?: string, name?: string): DOMException; readonly ABORT_ERR: number; readonly DATA_CLONE_ERR: number; readonly DOMSTRING_SIZE_ERR: number; readonly HIERARCHY_REQUEST_ERR: number; readonly INDEX_SIZE_ERR: number; readonly INUSE_ATTRIBUTE_ERR: number; readonly INVALID_ACCESS_ERR: number; readonly INVALID_CHARACTER_ERR: number; readonly INVALID_MODIFICATION_ERR: number; readonly INVALID_NODE_TYPE_ERR: number; readonly INVALID_STATE_ERR: number; readonly NAMESPACE_ERR: number; readonly NETWORK_ERR: number; readonly NOT_FOUND_ERR: number; readonly NOT_SUPPORTED_ERR: number; readonly NO_DATA_ALLOWED_ERR: number; readonly NO_MODIFICATION_ALLOWED_ERR: number; readonly PARSE_ERR: number; readonly QUOTA_EXCEEDED_ERR: number; readonly SECURITY_ERR: number; readonly SERIALIZE_ERR: number; readonly SYNTAX_ERR: number; readonly TIMEOUT_ERR: number; readonly TYPE_MISMATCH_ERR: number; readonly URL_MISMATCH_ERR: number; readonly VALIDATION_ERR: number; readonly WRONG_DOCUMENT_ERR: number; }; interface DOMStringList { readonly length: number; contains(str: string): boolean; item(index: number): string | null; [index: number]: string; } declare var DOMStringList: { prototype: DOMStringList; new(): DOMStringList; }; interface DedicatedWorkerGlobalScopeEventMap extends WorkerGlobalScopeEventMap { "message": MessageEvent; } interface DedicatedWorkerGlobalScope extends WorkerGlobalScope { onmessage: ((this: DedicatedWorkerGlobalScope, ev: MessageEvent) => any) | null; close(): void; postMessage(message: any, transfer?: any[]): void; addEventListener<K extends keyof DedicatedWorkerGlobalScopeEventMap>(type: K, listener: (this: DedicatedWorkerGlobalScope, ev: DedicatedWorkerGlobalScopeEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof DedicatedWorkerGlobalScopeEventMap>(type: K, listener: (this: DedicatedWorkerGlobalScope, ev: DedicatedWorkerGlobalScopeEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var DedicatedWorkerGlobalScope: { prototype: DedicatedWorkerGlobalScope; new(): DedicatedWorkerGlobalScope; }; interface ErrorEvent extends Event { readonly colno: number; readonly error: any; readonly filename: string; readonly lineno: number; readonly message: string; initErrorEvent(typeArg: string, canBubbleArg: boolean, cancelableArg: boolean, messageArg: string, filenameArg: string, linenoArg: number): void; } declare var ErrorEvent: { prototype: ErrorEvent; new(typeArg: string, eventInitDict?: ErrorEventInit): ErrorEvent; }; interface Event { readonly bubbles: boolean; cancelBubble: boolean; readonly cancelable: boolean; readonly currentTarget: EventTarget | null; readonly defaultPrevented: boolean; readonly eventPhase: number; readonly isTrusted: boolean; returnValue: boolean; readonly scoped: boolean; readonly srcElement: object | null; readonly target: EventTarget | null; readonly timeStamp: number; readonly type: string; deepPath(): EventTarget[]; initEvent(type: string, bubbles?: boolean, cancelable?: boolean): void; preventDefault(): void; stopImmediatePropagation(): void; stopPropagation(): void; readonly AT_TARGET: number; readonly BUBBLING_PHASE: number; readonly CAPTURING_PHASE: number; readonly NONE: number; } declare var Event: { prototype: Event; new(typeArg: string, eventInitDict?: EventInit): Event; readonly AT_TARGET: number; readonly BUBBLING_PHASE: number; readonly CAPTURING_PHASE: number; readonly NONE: number; }; interface EventListenerObject { handleEvent(evt: Event): void; } interface EventTarget { addEventListener(type: string, listener: EventListenerOrEventListenerObject | null, options?: boolean | AddEventListenerOptions): void; dispatchEvent(evt: Event): boolean; removeEventListener(type: string, listener?: EventListenerOrEventListenerObject | null, options?: EventListenerOptions | boolean): void; } declare var EventTarget: { prototype: EventTarget; new(): EventTarget; }; interface ExtendableEvent extends Event { waitUntil(f: Promise<any>): void; } declare var ExtendableEvent: { prototype: ExtendableEvent; new(type: string, eventInitDict?: ExtendableEventInit): ExtendableEvent; }; interface ExtendableMessageEvent extends ExtendableEvent { readonly data: any; readonly lastEventId: string; readonly origin: string; readonly ports: ReadonlyArray<MessagePort> | null; readonly source: Client | ServiceWorker | MessagePort | null; } declare var ExtendableMessageEvent: { prototype: ExtendableMessageEvent; new(type: string, eventInitDict?: ExtendableMessageEventInit): ExtendableMessageEvent; }; interface FetchEvent extends ExtendableEvent { readonly clientId: string; readonly request: Request; readonly reservedClientId: string; readonly targetClientId: string; respondWith(r: Promise<Response>): void; } declare var FetchEvent: { prototype: FetchEvent; new(type: string, eventInitDict: FetchEventInit): FetchEvent; }; interface File extends Blob { readonly lastModified: number; /** @deprecated */ readonly lastModifiedDate: Date; readonly name: string; readonly webkitRelativePath: string; } declare var File: { prototype: File; new (parts: (ArrayBuffer | ArrayBufferView | Blob | string)[], filename: string, properties?: FilePropertyBag): File; }; interface FileList { readonly length: number; item(index: number): File | null; [index: number]: File; } declare var FileList: { prototype: FileList; new(): FileList; }; interface FilePropertyBag extends BlobPropertyBag { lastModified?: number; } interface FileReaderEventMap { "abort": ProgressEvent; "error": ProgressEvent; "load": ProgressEvent; "loadend": ProgressEvent; "loadstart": ProgressEvent; "progress": ProgressEvent; } interface FileReader extends EventTarget { readonly error: DOMException | null; onabort: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; onerror: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; onload: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; onloadend: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; onloadstart: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; onprogress: ((this: FileReader, ev: FileReaderProgressEvent) => any) | null; readonly readyState: number; readonly result: any; abort(): void; readAsArrayBuffer(blob: Blob): void; readAsBinaryString(blob: Blob): void; readAsDataURL(blob: Blob): void; readAsText(blob: Blob, label?: string): void; readonly DONE: number; readonly EMPTY: number; readonly LOADING: number; addEventListener<K extends keyof FileReaderEventMap>(type: K, listener: (this: FileReader, ev: FileReaderEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof FileReaderEventMap>(type: K, listener: (this: FileReader, ev: FileReaderEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var FileReader: { prototype: FileReader; new(): FileReader; readonly DONE: number; readonly EMPTY: number; readonly LOADING: number; }; interface FileReaderProgressEvent extends ProgressEvent { readonly target: FileReader | null; } interface FileReaderSync { readAsArrayBuffer(blob: Blob): any; readAsBinaryString(blob: Blob): void; readAsDataURL(blob: Blob): string; readAsText(blob: Blob, encoding?: string): string; } declare var FileReaderSync: { prototype: FileReaderSync; new(): FileReaderSync; }; interface FormData { append(name: string, value: string | Blob, fileName?: string): void; delete(name: string): void; get(name: string): FormDataEntryValue | null; getAll(name: string): FormDataEntryValue[]; has(name: string): boolean; set(name: string, value: string | Blob, fileName?: string): void; } declare var FormData: { prototype: FormData; new(): FormData; new(form: object): FormData; }; interface GlobalFetch { fetch(input?: Request | string, init?: RequestInit): Promise<Response>; } interface Headers { append(name: string, value: string): void; delete(name: string): void; forEach(callback: Function, thisArg?: any): void; get(name: string): string | null; has(name: string): boolean; set(name: string, value: string): void; } declare var Headers: { prototype: Headers; new(init?: HeadersInit): Headers; }; interface IDBArrayKey extends Array<number | string | Date | IDBArrayKey> { } interface IDBCursor { readonly direction: IDBCursorDirection; readonly key: IDBKeyRange | number | string | Date | IDBArrayKey; readonly primaryKey: any; readonly source: IDBObjectStore | IDBIndex; advance(count: number): void; continue(key?: IDBKeyRange | number | string | Date | IDBArrayKey): void; delete(): IDBRequest; update(value: any): IDBRequest; readonly NEXT: string; readonly NEXT_NO_DUPLICATE: string; readonly PREV: string; readonly PREV_NO_DUPLICATE: string; } declare var IDBCursor: { prototype: IDBCursor; new(): IDBCursor; readonly NEXT: string; readonly NEXT_NO_DUPLICATE: string; readonly PREV: string; readonly PREV_NO_DUPLICATE: string; }; interface IDBCursorWithValue extends IDBCursor { readonly value: any; } declare var IDBCursorWithValue: { prototype: IDBCursorWithValue; new(): IDBCursorWithValue; }; interface IDBDatabaseEventMap { "abort": Event; "error": Event; } interface IDBDatabase extends EventTarget { readonly name: string; readonly objectStoreNames: DOMStringList; onabort: ((this: IDBDatabase, ev: Event) => any) | null; onerror: ((this: IDBDatabase, ev: Event) => any) | null; onversionchange: ((this: IDBDatabase, ev: Event) => any) | null; readonly version: number; close(): void; createObjectStore(name: string, optionalParameters?: IDBObjectStoreParameters): IDBObjectStore; deleteObjectStore(name: string): void; transaction(storeNames: string | string[], mode?: IDBTransactionMode): IDBTransaction; addEventListener<K extends keyof IDBDatabaseEventMap>(type: K, listener: (this: IDBDatabase, ev: IDBDatabaseEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof IDBDatabaseEventMap>(type: K, listener: (this: IDBDatabase, ev: IDBDatabaseEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var IDBDatabase: { prototype: IDBDatabase; new(): IDBDatabase; }; interface IDBFactory { cmp(first: any, second: any): number; deleteDatabase(name: string): IDBOpenDBRequest; open(name: string, version?: number): IDBOpenDBRequest; } declare var IDBFactory: { prototype: IDBFactory; new(): IDBFactory; }; interface IDBIndex { readonly keyPath: string | string[]; multiEntry: boolean; readonly name: string; readonly objectStore: IDBObjectStore; readonly unique: boolean; count(key?: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; get(key: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; getKey(key: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; openCursor(range?: IDBKeyRange | number | string | Date | IDBArrayKey, direction?: IDBCursorDirection): IDBRequest; openKeyCursor(range?: IDBKeyRange | number | string | Date | IDBArrayKey, direction?: IDBCursorDirection): IDBRequest; } declare var IDBIndex: { prototype: IDBIndex; new(): IDBIndex; }; interface IDBKeyRange { readonly lower: any; readonly lowerOpen: boolean; readonly upper: any; readonly upperOpen: boolean; } declare var IDBKeyRange: { prototype: IDBKeyRange; new(): IDBKeyRange; bound(lower: any, upper: any, lowerOpen?: boolean, upperOpen?: boolean): IDBKeyRange; lowerBound(lower: any, open?: boolean): IDBKeyRange; only(value: any): IDBKeyRange; upperBound(upper: any, open?: boolean): IDBKeyRange; }; interface IDBObjectStore { autoIncrement: boolean; readonly indexNames: DOMStringList; readonly keyPath: string | string[] | null; readonly name: string; readonly transaction: IDBTransaction; add(value: any, key?: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; clear(): IDBRequest; count(key?: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; createIndex(name: string, keyPath: string | string[], optionalParameters?: IDBIndexParameters): IDBIndex; delete(key: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; deleteIndex(indexName: string): void; get(key: any): IDBRequest; index(name: string): IDBIndex; openCursor(range?: IDBKeyRange | number | string | Date | IDBArrayKey, direction?: IDBCursorDirection): IDBRequest; put(value: any, key?: IDBKeyRange | number | string | Date | IDBArrayKey): IDBRequest; } declare var IDBObjectStore: { prototype: IDBObjectStore; new(): IDBObjectStore; }; interface IDBOpenDBRequestEventMap extends IDBRequestEventMap { "blocked": Event; "upgradeneeded": IDBVersionChangeEvent; } interface IDBOpenDBRequest extends IDBRequest { onblocked: ((this: IDBOpenDBRequest, ev: Event) => any) | null; onupgradeneeded: ((this: IDBOpenDBRequest, ev: IDBVersionChangeEvent) => any) | null; addEventListener<K extends keyof IDBOpenDBRequestEventMap>(type: K, listener: (this: IDBOpenDBRequest, ev: IDBOpenDBRequestEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof IDBOpenDBRequestEventMap>(type: K, listener: (this: IDBOpenDBRequest, ev: IDBOpenDBRequestEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var IDBOpenDBRequest: { prototype: IDBOpenDBRequest; new(): IDBOpenDBRequest; }; interface IDBRequestEventMap { "error": Event; "success": Event; } interface IDBRequest extends EventTarget { readonly error: DOMException; onerror: ((this: IDBRequest, ev: Event) => any) | null; onsuccess: ((this: IDBRequest, ev: Event) => any) | null; readonly readyState: IDBRequestReadyState; readonly result: any; readonly source: IDBObjectStore | IDBIndex | IDBCursor; readonly transaction: IDBTransaction; addEventListener<K extends keyof IDBRequestEventMap>(type: K, listener: (this: IDBRequest, ev: IDBRequestEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof IDBRequestEventMap>(type: K, listener: (this: IDBRequest, ev: IDBRequestEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var IDBRequest: { prototype: IDBRequest; new(): IDBRequest; }; interface IDBTransactionEventMap { "abort": Event; "complete": Event; "error": Event; } interface IDBTransaction extends EventTarget { readonly db: IDBDatabase; readonly error: DOMException; readonly mode: IDBTransactionMode; onabort: ((this: IDBTransaction, ev: Event) => any) | null; oncomplete: ((this: IDBTransaction, ev: Event) => any) | null; onerror: ((this: IDBTransaction, ev: Event) => any) | null; abort(): void; objectStore(name: string): IDBObjectStore; readonly READ_ONLY: string; readonly READ_WRITE: string; readonly VERSION_CHANGE: string; addEventListener<K extends keyof IDBTransactionEventMap>(type: K, listener: (this: IDBTransaction, ev: IDBTransactionEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof IDBTransactionEventMap>(type: K, listener: (this: IDBTransaction, ev: IDBTransactionEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var IDBTransaction: { prototype: IDBTransaction; new(): IDBTransaction; readonly READ_ONLY: string; readonly READ_WRITE: string; readonly VERSION_CHANGE: string; }; interface IDBVersionChangeEvent extends Event { readonly newVersion: number | null; readonly oldVersion: number; } declare var IDBVersionChangeEvent: { prototype: IDBVersionChangeEvent; new(): IDBVersionChangeEvent; }; interface ImageBitmap { readonly height: number; readonly width: number; close(): void; } interface ImageBitmapOptions { colorSpaceConversion?: "none" | "default"; imageOrientation?: "none" | "flipY"; premultiplyAlpha?: "none" | "premultiply" | "default"; resizeHeight?: number; resizeQuality?: "pixelated" | "low" | "medium" | "high"; resizeWidth?: number; } interface ImageData { readonly data: Uint8ClampedArray; readonly height: number; readonly width: number; } declare var ImageData: { prototype: ImageData; new(width: number, height: number): ImageData; new(array: Uint8ClampedArray, width: number, height: number): ImageData; }; interface MessageChannel { readonly port1: MessagePort; readonly port2: MessagePort; } declare var MessageChannel: { prototype: MessageChannel; new(): MessageChannel; }; interface MessageEvent extends Event { readonly data: any; readonly origin: string; readonly ports: ReadonlyArray<MessagePort>; readonly source: object | null; initMessageEvent(type: string, bubbles: boolean, cancelable: boolean, data: any, origin: string, lastEventId: string, source: object): void; } declare var MessageEvent: { prototype: MessageEvent; new(type: string, eventInitDict?: MessageEventInit): MessageEvent; }; interface MessagePortEventMap { "message": MessageEvent; } interface MessagePort extends EventTarget { onmessage: ((this: MessagePort, ev: MessageEvent) => any) | null; close(): void; postMessage(message?: any, transfer?: any[]): void; start(): void; addEventListener<K extends keyof MessagePortEventMap>(type: K, listener: (this: MessagePort, ev: MessagePortEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof MessagePortEventMap>(type: K, listener: (this: MessagePort, ev: MessagePortEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var MessagePort: { prototype: MessagePort; new(): MessagePort; }; interface NavigatorBeacon { sendBeacon(url: string, data?: Blob | Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | FormData | string | null): boolean; } interface NavigatorConcurrentHardware { readonly hardwareConcurrency: number; } interface NavigatorID { readonly appCodeName: string; readonly appName: string; readonly appVersion: string; readonly platform: string; readonly product: string; readonly productSub: string; readonly userAgent: string; readonly vendor: string; readonly vendorSub: string; } interface NavigatorOnLine { readonly onLine: boolean; } interface NotificationEventMap { "click": Event; "close": Event; "error": Event; "show": Event; } interface Notification extends EventTarget { readonly body: string | null; readonly data: any; readonly dir: NotificationDirection; readonly icon: string | null; readonly lang: string | null; onclick: ((this: Notification, ev: Event) => any) | null; onclose: ((this: Notification, ev: Event) => any) | null; onerror: ((this: Notification, ev: Event) => any) | null; onshow: ((this: Notification, ev: Event) => any) | null; readonly permission: NotificationPermission; readonly tag: string | null; readonly title: string; close(): void; addEventListener<K extends keyof NotificationEventMap>(type: K, listener: (this: Notification, ev: NotificationEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof NotificationEventMap>(type: K, listener: (this: Notification, ev: NotificationEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var Notification: { prototype: Notification; new(title: string, options?: NotificationOptions): Notification; requestPermission(callback?: NotificationPermissionCallback): Promise<NotificationPermission>; }; interface NotificationEvent extends ExtendableEvent { readonly action: string; readonly notification: Notification; } declare var NotificationEvent: { prototype: NotificationEvent; new(type: string, eventInitDict: NotificationEventInit): NotificationEvent; }; interface Performance { /** @deprecated */ readonly navigation: PerformanceNavigation; readonly timeOrigin: number; /** @deprecated */ readonly timing: PerformanceTiming; clearMarks(markName?: string): void; clearMeasures(measureName?: string): void; clearResourceTimings(): void; getEntries(): any; getEntriesByName(name: string, type?: string): any; getEntriesByType(type: string): any; /** @deprecated */ getMarks(markName?: string): any; /** @deprecated */ getMeasures(measureName?: string): any; mark(markName: string): void; measure(measureName: string, startMarkName?: string, endMarkName?: string): void; now(): number; setResourceTimingBufferSize(maxSize: number): void; toJSON(): any; } declare var Performance: { prototype: Performance; new(): Performance; }; interface PerformanceNavigation { readonly redirectCount: number; readonly type: number; toJSON(): any; readonly TYPE_BACK_FORWARD: number; readonly TYPE_NAVIGATE: number; readonly TYPE_RELOAD: number; readonly TYPE_RESERVED: number; } declare var PerformanceNavigation: { prototype: PerformanceNavigation; new(): PerformanceNavigation; readonly TYPE_BACK_FORWARD: number; readonly TYPE_NAVIGATE: number; readonly TYPE_RELOAD: number; readonly TYPE_RESERVED: number; }; interface PerformanceTiming { readonly connectEnd: number; readonly connectStart: number; readonly domComplete: number; readonly domContentLoadedEventEnd: number; readonly domContentLoadedEventStart: number; readonly domInteractive: number; readonly domLoading: number; readonly domainLookupEnd: number; readonly domainLookupStart: number; readonly fetchStart: number; readonly loadEventEnd: number; readonly loadEventStart: number; readonly msFirstPaint: number; readonly navigationStart: number; readonly redirectEnd: number; readonly redirectStart: number; readonly requestStart: number; readonly responseEnd: number; readonly responseStart: number; readonly secureConnectionStart: number; readonly unloadEventEnd: number; readonly unloadEventStart: number; toJSON(): any; } declare var PerformanceTiming: { prototype: PerformanceTiming; new(): PerformanceTiming; }; interface Position { readonly coords: Coordinates; readonly timestamp: number; } declare var Position: { prototype: Position; new(): Position; }; interface PositionError { readonly code: number; readonly message: string; toString(): string; readonly PERMISSION_DENIED: number; readonly POSITION_UNAVAILABLE: number; readonly TIMEOUT: number; } declare var PositionError: { prototype: PositionError; new(): PositionError; readonly PERMISSION_DENIED: number; readonly POSITION_UNAVAILABLE: number; readonly TIMEOUT: number; }; interface ProgressEvent extends Event { readonly lengthComputable: boolean; readonly loaded: number; readonly total: number; initProgressEvent(typeArg: string, canBubbleArg: boolean, cancelableArg: boolean, lengthComputableArg: boolean, loadedArg: number, totalArg: number): void; } declare var ProgressEvent: { prototype: ProgressEvent; new(typeArg: string, eventInitDict?: ProgressEventInit): ProgressEvent; }; interface PushEvent extends ExtendableEvent { readonly data: PushMessageData | null; } declare var PushEvent: { prototype: PushEvent; new(type: string, eventInitDict?: PushEventInit): PushEvent; }; interface PushManager { readonly supportedContentEncodings: ReadonlyArray<string>; getSubscription(): Promise<PushSubscription | null>; permissionState(options?: PushSubscriptionOptionsInit): Promise<PushPermissionState>; subscribe(options?: PushSubscriptionOptionsInit): Promise<PushSubscription>; } declare var PushManager: { prototype: PushManager; new(): PushManager; }; interface PushMessageData { arrayBuffer(): ArrayBuffer; blob(): Blob; json(): any; text(): string; } declare var PushMessageData: { prototype: PushMessageData; new(): PushMessageData; }; interface PushSubscription { readonly endpoint: string; readonly expirationTime: number | null; readonly options: PushSubscriptionOptions; getKey(name: PushEncryptionKeyName): ArrayBuffer | null; toJSON(): any; unsubscribe(): Promise<boolean>; } declare var PushSubscription: { prototype: PushSubscription; new(): PushSubscription; }; interface PushSubscriptionChangeEvent extends ExtendableEvent { readonly newSubscription: PushSubscription | null; readonly oldSubscription: PushSubscription | null; } declare var PushSubscriptionChangeEvent: { prototype: PushSubscriptionChangeEvent; new(type: string, eventInitDict?: PushSubscriptionChangeInit): PushSubscriptionChangeEvent; }; interface PushSubscriptionOptions { readonly applicationServerKey: ArrayBuffer | null; readonly userVisibleOnly: boolean; } declare var PushSubscriptionOptions: { prototype: PushSubscriptionOptions; new(): PushSubscriptionOptions; }; interface ReadableStream { readonly locked: boolean; cancel(): Promise<void>; getReader(): ReadableStreamReader; } declare var ReadableStream: { prototype: ReadableStream; new(): ReadableStream; }; interface ReadableStreamReader { cancel(): Promise<void>; read(): Promise<any>; releaseLock(): void; } declare var ReadableStreamReader: { prototype: ReadableStreamReader; new(): ReadableStreamReader; }; interface Request extends Body { readonly cache: RequestCache; readonly credentials: RequestCredentials; readonly destination: RequestDestination; readonly headers: Headers; readonly integrity: string; readonly keepalive: boolean; readonly method: string; readonly mode: RequestMode; readonly redirect: RequestRedirect; readonly referrer: string; readonly referrerPolicy: ReferrerPolicy; readonly signal: object | null; readonly type: RequestType; readonly url: string; clone(): Request; } declare var Request: { prototype: Request; new(input: Request | string, init?: RequestInit): Request; }; interface Response extends Body { readonly body: ReadableStream | null; readonly headers: Headers; readonly ok: boolean; readonly redirected: boolean; readonly status: number; readonly statusText: string; readonly type: ResponseType; readonly url: string; clone(): Response; } declare var Response: { prototype: Response; new(body?: Blob | Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | FormData | string | null, init?: ResponseInit): Response; error(): Response; redirect(url: string, status?: number): Response; }; interface ServiceWorkerEventMap extends AbstractWorkerEventMap { "statechange": Event; } interface ServiceWorker extends EventTarget, AbstractWorker { onstatechange: ((this: ServiceWorker, ev: Event) => any) | null; readonly scriptURL: string; readonly state: ServiceWorkerState; postMessage(message: any, transfer?: any[]): void; addEventListener<K extends keyof ServiceWorkerEventMap>(type: K, listener: (this: ServiceWorker, ev: ServiceWorkerEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof ServiceWorkerEventMap>(type: K, listener: (this: ServiceWorker, ev: ServiceWorkerEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var ServiceWorker: { prototype: ServiceWorker; new(): ServiceWorker; }; interface ServiceWorkerGlobalScopeEventMap extends WorkerGlobalScopeEventMap { "activate": ExtendableEvent; "fetch": FetchEvent; "install": ExtendableEvent; "message": ExtendableMessageEvent; "messageerror": MessageEvent; "notificationclick": NotificationEvent; "notificationclose": NotificationEvent; "push": PushEvent; "pushsubscriptionchange": PushSubscriptionChangeEvent; "sync": SyncEvent; } interface ServiceWorkerGlobalScope extends WorkerGlobalScope { readonly clients: Clients; onactivate: ((this: ServiceWorkerGlobalScope, ev: ExtendableEvent) => any) | null; onfetch: ((this: ServiceWorkerGlobalScope, ev: FetchEvent) => any) | null; oninstall: ((this: ServiceWorkerGlobalScope, ev: ExtendableEvent) => any) | null; onmessage: ((this: ServiceWorkerGlobalScope, ev: ExtendableMessageEvent) => any) | null; onmessageerror: ((this: ServiceWorkerGlobalScope, ev: MessageEvent) => any) | null; onnotificationclick: ((this: ServiceWorkerGlobalScope, ev: NotificationEvent) => any) | null; onnotificationclose: ((this: ServiceWorkerGlobalScope, ev: NotificationEvent) => any) | null; onpush: ((this: ServiceWorkerGlobalScope, ev: PushEvent) => any) | null; onpushsubscriptionchange: ((this: ServiceWorkerGlobalScope, ev: PushSubscriptionChangeEvent) => any) | null; onsync: ((this: ServiceWorkerGlobalScope, ev: SyncEvent) => any) | null; readonly registration: ServiceWorkerRegistration; skipWaiting(): Promise<void>; addEventListener<K extends keyof ServiceWorkerGlobalScopeEventMap>(type: K, listener: (this: ServiceWorkerGlobalScope, ev: ServiceWorkerGlobalScopeEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof ServiceWorkerGlobalScopeEventMap>(type: K, listener: (this: ServiceWorkerGlobalScope, ev: ServiceWorkerGlobalScopeEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var ServiceWorkerGlobalScope: { prototype: ServiceWorkerGlobalScope; new(): ServiceWorkerGlobalScope; }; interface ServiceWorkerRegistrationEventMap { "updatefound": Event; } interface ServiceWorkerRegistration extends EventTarget { readonly active: ServiceWorker | null; readonly installing: ServiceWorker | null; onupdatefound: ((this: ServiceWorkerRegistration, ev: Event) => any) | null; readonly pushManager: PushManager; readonly scope: string; readonly sync: SyncManager; readonly waiting: ServiceWorker | null; getNotifications(filter?: GetNotificationOptions): Promise<Notification[]>; showNotification(title: string, options?: NotificationOptions): Promise<void>; unregister(): Promise<boolean>; update(): Promise<void>; addEventListener<K extends keyof ServiceWorkerRegistrationEventMap>(type: K, listener: (this: ServiceWorkerRegistration, ev: ServiceWorkerRegistrationEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof ServiceWorkerRegistrationEventMap>(type: K, listener: (this: ServiceWorkerRegistration, ev: ServiceWorkerRegistrationEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var ServiceWorkerRegistration: { prototype: ServiceWorkerRegistration; new(): ServiceWorkerRegistration; }; interface SyncEvent extends ExtendableEvent { readonly lastChance: boolean; readonly tag: string; } declare var SyncEvent: { prototype: SyncEvent; new(type: string, init: SyncEventInit): SyncEvent; }; interface SyncManager { getTags(): Promise<string[]>; register(tag: string): Promise<void>; } declare var SyncManager: { prototype: SyncManager; new(): SyncManager; }; interface URL { hash: string; host: string; hostname: string; href: string; readonly origin: string; password: string; pathname: string; port: string; protocol: string; search: string; readonly searchParams: URLSearchParams; username: string; toString(): string; } declare var URL: { prototype: URL; new(url: string, base?: string | URL): URL; createObjectURL(object: any, options?: ObjectURLOptions): string; revokeObjectURL(url: string): void; }; interface URLSearchParams { /** * Appends a specified key/value pair as a new search parameter. */ append(name: string, value: string): void; /** * Deletes the given search parameter, and its associated value, from the list of all search parameters. */ delete(name: string): void; /** * Returns the first value associated to the given search parameter. */ get(name: string): string | null; /** * Returns all the values association with a given search parameter. */ getAll(name: string): string[]; /** * Returns a Boolean indicating if such a search parameter exists. */ has(name: string): boolean; /** * Sets the value associated to a given search parameter to the given value. If there were several values, delete the others. */ set(name: string, value: string): void; } declare var URLSearchParams: { prototype: URLSearchParams; new (init?: string | URLSearchParams): URLSearchParams; }; interface WebSocketEventMap { "close": CloseEvent; "error": Event; "message": MessageEvent; "open": Event; } interface WebSocket extends EventTarget { binaryType: BinaryType; readonly bufferedAmount: number; readonly extensions: string; onclose: ((this: WebSocket, ev: CloseEvent) => any) | null; onerror: ((this: WebSocket, ev: Event) => any) | null; onmessage: ((this: WebSocket, ev: MessageEvent) => any) | null; onopen: ((this: WebSocket, ev: Event) => any) | null; readonly protocol: string; readonly readyState: number; readonly url: string; close(code?: number, reason?: string): void; send(data: string | ArrayBufferLike | Blob | ArrayBufferView): void; readonly CLOSED: number; readonly CLOSING: number; readonly CONNECTING: number; readonly OPEN: number; addEventListener<K extends keyof WebSocketEventMap>(type: K, listener: (this: WebSocket, ev: WebSocketEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof WebSocketEventMap>(type: K, listener: (this: WebSocket, ev: WebSocketEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var WebSocket: { prototype: WebSocket; new(url: string, protocols?: string | string[]): WebSocket; readonly CLOSED: number; readonly CLOSING: number; readonly CONNECTING: number; readonly OPEN: number; }; interface WindowBase64 { atob(encodedString: string): string; btoa(rawString: string): string; } interface WindowClient extends Client { readonly ancestorOrigins: ReadonlyArray<string>; readonly focused: boolean; readonly visibilityState: VisibilityState; focus(): Promise<WindowClient>; navigate(url: string): Promise<WindowClient>; } declare var WindowClient: { prototype: WindowClient; new(): WindowClient; }; interface WindowConsole { readonly console: Console; } interface WorkerEventMap extends AbstractWorkerEventMap { "message": MessageEvent; } interface Worker extends EventTarget, AbstractWorker { onmessage: ((this: Worker, ev: MessageEvent) => any) | null; /** @deprecated */ postMessage(message: any, transfer?: any[]): void; terminate(): void; addEventListener<K extends keyof WorkerEventMap>(type: K, listener: (this: Worker, ev: WorkerEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof WorkerEventMap>(type: K, listener: (this: Worker, ev: WorkerEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var Worker: { prototype: Worker; new(stringUrl: string): Worker; }; interface WorkerGlobalScopeEventMap { "error": ErrorEvent; } interface WorkerGlobalScope extends EventTarget, WorkerUtils, WindowConsole, GlobalFetch { readonly caches: CacheStorage; readonly isSecureContext: boolean; readonly location: WorkerLocation; onerror: ((this: WorkerGlobalScope, ev: ErrorEvent) => any) | null; readonly performance: Performance; readonly self: WorkerGlobalScope; createImageBitmap(image: ImageBitmap | ImageData | Blob, options?: ImageBitmapOptions): Promise<ImageBitmap>; createImageBitmap(image: ImageBitmap | ImageData | Blob, sx: number, sy: number, sw: number, sh: number, options?: ImageBitmapOptions): Promise<ImageBitmap>; msWriteProfilerMark(profilerMarkName: string): void; addEventListener<K extends keyof WorkerGlobalScopeEventMap>(type: K, listener: (this: WorkerGlobalScope, ev: WorkerGlobalScopeEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof WorkerGlobalScopeEventMap>(type: K, listener: (this: WorkerGlobalScope, ev: WorkerGlobalScopeEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var WorkerGlobalScope: { prototype: WorkerGlobalScope; new(): WorkerGlobalScope; }; interface WorkerLocation { readonly hash: string; readonly host: string; readonly hostname: string; readonly href: string; readonly origin: string; readonly pathname: string; readonly port: string; readonly protocol: string; readonly search: string; toString(): string; } declare var WorkerLocation: { prototype: WorkerLocation; new(): WorkerLocation; }; interface WorkerNavigator extends NavigatorID, NavigatorOnLine, NavigatorBeacon, NavigatorConcurrentHardware { } declare var WorkerNavigator: { prototype: WorkerNavigator; new(): WorkerNavigator; }; interface WorkerUtils extends WindowBase64 { readonly indexedDB: IDBFactory; readonly msIndexedDB: IDBFactory; readonly navigator: WorkerNavigator; clearImmediate(handle: number): void; clearInterval(handle: number): void; clearTimeout(handle: number): void; importScripts(...urls: string[]): void; setImmediate(handler: any, ...args: any[]): number; setInterval(handler: any, timeout?: any, ...args: any[]): number; setTimeout(handler: any, timeout?: any, ...args: any[]): number; } interface XMLHttpRequestEventMap extends XMLHttpRequestEventTargetEventMap { "readystatechange": Event; } interface XMLHttpRequest extends EventTarget, XMLHttpRequestEventTarget { msCaching: string; onreadystatechange: ((this: XMLHttpRequest, ev: Event) => any) | null; readonly readyState: number; readonly response: any; readonly responseText: string; responseType: XMLHttpRequestResponseType; readonly responseURL: string; readonly responseXML: object | null; readonly status: number; readonly statusText: string; timeout: number; readonly upload: XMLHttpRequestUpload; withCredentials: boolean; abort(): void; getAllResponseHeaders(): string; getResponseHeader(header: string): string | null; msCachingEnabled(): boolean; open(method: string, url: string, async?: boolean, user?: string | null, password?: string | null): void; overrideMimeType(mime: string): void; send(data?: any): void; setRequestHeader(header: string, value: string): void; readonly DONE: number; readonly HEADERS_RECEIVED: number; readonly LOADING: number; readonly OPENED: number; readonly UNSENT: number; addEventListener<K extends keyof XMLHttpRequestEventMap>(type: K, listener: (this: XMLHttpRequest, ev: XMLHttpRequestEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof XMLHttpRequestEventMap>(type: K, listener: (this: XMLHttpRequest, ev: XMLHttpRequestEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var XMLHttpRequest: { prototype: XMLHttpRequest; new(): XMLHttpRequest; readonly DONE: number; readonly HEADERS_RECEIVED: number; readonly LOADING: number; readonly OPENED: number; readonly UNSENT: number; }; interface XMLHttpRequestEventTargetEventMap { "abort": Event; "error": ErrorEvent; "load": Event; "loadend": ProgressEvent; "loadstart": Event; "progress": ProgressEvent; "timeout": ProgressEvent; } interface XMLHttpRequestEventTarget { onabort: ((this: XMLHttpRequest, ev: Event) => any) | null; onerror: ((this: XMLHttpRequest, ev: ErrorEvent) => any) | null; onload: ((this: XMLHttpRequest, ev: Event) => any) | null; onloadend: ((this: XMLHttpRequest, ev: ProgressEvent) => any) | null; onloadstart: ((this: XMLHttpRequest, ev: Event) => any) | null; onprogress: ((this: XMLHttpRequest, ev: ProgressEvent) => any) | null; ontimeout: ((this: XMLHttpRequest, ev: ProgressEvent) => any) | null; addEventListener<K extends keyof XMLHttpRequestEventTargetEventMap>(type: K, listener: (this: XMLHttpRequestEventTarget, ev: XMLHttpRequestEventTargetEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof XMLHttpRequestEventTargetEventMap>(type: K, listener: (this: XMLHttpRequestEventTarget, ev: XMLHttpRequestEventTargetEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } interface XMLHttpRequestUpload extends EventTarget, XMLHttpRequestEventTarget { addEventListener<K extends keyof XMLHttpRequestEventTargetEventMap>(type: K, listener: (this: XMLHttpRequestUpload, ev: XMLHttpRequestEventTargetEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; removeEventListener<K extends keyof XMLHttpRequestEventTargetEventMap>(type: K, listener: (this: XMLHttpRequestUpload, ev: XMLHttpRequestEventTargetEventMap[K]) => any, options?: boolean | EventListenerOptions): void; removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; } declare var XMLHttpRequestUpload: { prototype: XMLHttpRequestUpload; new(): XMLHttpRequestUpload; }; declare type EventListenerOrEventListenerObject = EventListener | EventListenerObject; interface DecodeErrorCallback { (error: DOMException): void; } interface DecodeSuccessCallback { (decodedData: AudioBuffer): void; } interface ErrorEventHandler { (event: Event | string, source?: string, fileno?: number, columnNumber?: number, error?: Error): void; } interface ForEachCallback { (keyId: Int8Array | Int16Array | Int32Array | Uint8Array | Uint16Array | Uint32Array | Uint8ClampedArray | Float32Array | Float64Array | DataView | ArrayBuffer | null, status: MediaKeyStatus): void; } interface FunctionStringCallback { (data: string): void; } interface NotificationPermissionCallback { (permission: NotificationPermission): void; } interface PositionCallback { (position: Position): void; } interface PositionErrorCallback { (error: PositionError): void; } declare var onmessage: ((this: DedicatedWorkerGlobalScope, ev: MessageEvent) => any) | null; declare function close(): void; declare function postMessage(message: any, transfer?: any[]): void; declare function dispatchEvent(evt: Event): boolean; declare var caches: CacheStorage; declare var isSecureContext: boolean; declare var location: WorkerLocation; declare var onerror: ((this: DedicatedWorkerGlobalScope, ev: ErrorEvent) => any) | null; declare var performance: Performance; declare var self: WorkerGlobalScope; declare function createImageBitmap(image: ImageBitmap | ImageData | Blob, options?: ImageBitmapOptions): Promise<ImageBitmap>; declare function createImageBitmap(image: ImageBitmap | ImageData | Blob, sx: number, sy: number, sw: number, sh: number, options?: ImageBitmapOptions): Promise<ImageBitmap>; declare function msWriteProfilerMark(profilerMarkName: string): void; declare function dispatchEvent(evt: Event): boolean; declare var indexedDB: IDBFactory; declare var msIndexedDB: IDBFactory; declare var navigator: WorkerNavigator; declare function clearImmediate(handle: number): void; declare function clearInterval(handle: number): void; declare function clearTimeout(handle: number): void; declare function importScripts(...urls: string[]): void; declare function setImmediate(handler: any, ...args: any[]): number; declare function setInterval(handler: any, timeout?: any, ...args: any[]): number; declare function setTimeout(handler: any, timeout?: any, ...args: any[]): number; declare function atob(encodedString: string): string; declare function btoa(rawString: string): string; declare var console: Console; declare function fetch(input?: Request | string, init?: RequestInit): Promise<Response>; declare function addEventListener<K extends keyof DedicatedWorkerGlobalScopeEventMap>(type: K, listener: (this: DedicatedWorkerGlobalScope, ev: DedicatedWorkerGlobalScopeEventMap[K]) => any, options?: boolean | AddEventListenerOptions): void; declare function addEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | AddEventListenerOptions): void; declare function removeEventListener<K extends keyof DedicatedWorkerGlobalScopeEventMap>(type: K, listener: (this: DedicatedWorkerGlobalScope, ev: DedicatedWorkerGlobalScopeEventMap[K]) => any, options?: boolean | EventListenerOptions): void; declare function removeEventListener(type: string, listener: EventListenerOrEventListenerObject, options?: boolean | EventListenerOptions): void; type FormDataEntryValue = string | File; type HeadersInit = Headers | string[][] | { [key: string]: string }; type AlgorithmIdentifier = string | Algorithm; type AAGUID = string; type BodyInit = any; type ByteString = string; type CryptoOperationData = ArrayBufferView; type GLbitfield = number; type GLboolean = boolean; type GLbyte = number; type GLclampf = number; type GLenum = number; type GLfloat = number; type GLint = number; type GLintptr = number; type GLshort = number; type GLsizei = number; type GLsizeiptr = number; type GLubyte = number; type GLuint = number; type GLushort = number; type IDBKeyPath = string; type RequestInfo = Request | string; type USVString = string; type payloadtype = number; type ClientTypes = "window" | "worker" | "sharedworker" | "all"; type BinaryType = "blob" | "arraybuffer"; type IDBCursorDirection = "next" | "nextunique" | "prev" | "prevunique"; type IDBRequestReadyState = "pending" | "done"; type IDBTransactionMode = "readonly" | "readwrite" | "versionchange"; type KeyFormat = "raw" | "spki" | "pkcs8" | "jwk"; type KeyType = "public" | "private" | "secret"; type KeyUsage = "encrypt" | "decrypt" | "sign" | "verify" | "deriveKey" | "deriveBits" | "wrapKey" | "unwrapKey"; type MediaKeyStatus = "usable" | "expired" | "output-downscaled" | "output-not-allowed" | "status-pending" | "internal-error"; type NotificationDirection = "auto" | "ltr" | "rtl"; type NotificationPermission = "default" | "denied" | "granted"; type PushEncryptionKeyName = "p256dh" | "auth"; type PushPermissionState = "granted" | "denied" | "prompt"; type ReferrerPolicy = "" | "no-referrer" | "no-referrer-when-downgrade" | "origin-only" | "origin-when-cross-origin" | "unsafe-url"; type RequestCache = "default" | "no-store" | "reload" | "no-cache" | "force-cache"; type RequestCredentials = "omit" | "same-origin" | "include"; type RequestDestination = "" | "document" | "sharedworker" | "subresource" | "unknown" | "worker"; type RequestMode = "navigate" | "same-origin" | "no-cors" | "cors"; type RequestRedirect = "follow" | "error" | "manual"; type RequestType = "" | "audio" | "font" | "image" | "script" | "style" | "track" | "video"; type ResponseType = "basic" | "cors" | "default" | "error" | "opaque" | "opaqueredirect"; type ServiceWorkerState = "installing" | "installed" | "activating" | "activated" | "redundant"; type VisibilityState = "hidden" | "visible" | "prerender" | "unloaded"; type XMLHttpRequestResponseType = "" | "arraybuffer" | "blob" | "document" | "json" | "text";
PypiClean
/modules/events.py
import pytz from metabot.util import adminui from metabot.util import eventutil from metabot.util import html from metabot.util import humanize from metabot.util import icons ALIASES = ('calendar', 'event', 'events') def modhelp(unused_ctx, unused_modconf, sections): # pylint: disable=missing-docstring sections['commands'].add('/events \u2013 Display recent and upcoming events') def moddispatch(ctx, msg, modconf): # pylint: disable=missing-docstring if ctx.type in ('message', 'callback_query') and ctx.command in ALIASES: if ctx.chat['type'] != 'private': return group(ctx, msg) if ctx.prefix == 'set': return settings(ctx, msg, modconf) return private(ctx, msg, modconf) if ctx.type == 'inline_query' and ctx.prefix.lstrip('/') in ALIASES: return inline(ctx, modconf) return False def group(ctx, msg): """Handle /events in a group chat.""" group_id = '%s' % ctx.chat['id'] groupconf = ctx.bot.config['issue37']['moderator'][group_id] calcodes, tzinfo, count, days, unused_hour, unused_dow = eventutil.get_group_conf(groupconf) if not calcodes or not tzinfo: missing = [] if not calcodes: missing.append('choose one or more calendars') if not tzinfo: missing.append('set the time zone') return msg.add( "I'm not configured for this group! Ask a bot admin to go into the <b>moderator</b> " 'module settings, group <b>%s</b>, and %s.', group_id, humanize.list(missing)) events, unused_alerts = eventutil.get_group_events(ctx.bot, calcodes, tzinfo, count, days) if not events: msg.add('No events in the next %s days!', days) else: url = icons.match(events[0]['summary']) or icons.match(events[0]['description']) if url: msg.add('photo:' + url) msg.add('\n'.join( eventutil.format_event(ctx.bot, event, tzinfo, full=False) for event in events)) def private(ctx, msg, modconf): # pylint: disable=too-many-locals """Handle /events in a private chat.""" eventid, timezone = ctx.split(2) if ':' in eventid and timezone: suffix = ' ' + timezone calcodes = eventid.split(':', 1)[0] else: suffix = '' user_id = '%s' % ctx.user['id'] userconf = modconf['users'][user_id] calcodes = userconf.get('calendars') timezone = userconf.get('timezone') if not calcodes or not timezone: missing = [] if not calcodes: missing.append('choose one or more calendars') if not timezone: missing.append('set your time zone') msg.add('Please %s!', humanize.list(missing)) return msg.button('Settings', '/events set') calendar_view = ctx.bot.multibot.multical.view(calcodes.split()) tzinfo = pytz.timezone(timezone) prevev, event, nextev = calendar_view.get_event(eventid) if not event: prevev, event, nextev = calendar_view.get_event() if not event: msg.add('No upcoming events!') else: msg.add(eventutil.format_event(ctx.bot, event, tzinfo, full=True)) buttons = [None, ('Settings', '/events set'), None] if prevev: buttons[0] = ('Prev', '/events %s%s' % (prevev['local_id'], suffix)) if suffix: buttons[1] = ('My Events', '/events') elif event and event['local_id'] != calendar_view.current_local_id: buttons[1] = ('Current', '/events') if nextev: buttons[2] = ('Next', '/events %s%s' % (nextev['local_id'], suffix)) msg.buttons(buttons) def inline(ctx, modconf): # pylint: disable=too-many-branches,too-many-locals """Handle @BOTNAME events.""" user_id = '%s' % ctx.user['id'] userconf = modconf['users'][user_id] calcodes = userconf.get('calendars') timezone = userconf.get('timezone') if not calcodes or not timezone: missing = [] if not calcodes: missing.append('choose one or more calendars') if not timezone: missing.append('set your time zone') return ctx.reply_inline([], is_personal=True, cache_time=30, switch_pm_text='Click to %s!' % humanize.list(missing), switch_pm_parameter='L2V2ZW50cw') calendar_view = ctx.bot.multibot.multical.view(calcodes.split()) tzinfo = pytz.timezone(timezone) terms = ctx.text.lower().split()[1:] full = False if terms and terms[0].lower() == 'full': terms.pop(0) full = True nextid = None results = [] while len(results) < 25: _, event, nextev = calendar_view.get_event(nextid) nextid = nextev and nextev['local_id'] if not event: break if full: text = ('%s %s' % (event['summary'], event['description'])).lower() else: text = event['summary'].lower() for term in terms: if term not in text: break else: subtitle = eventutil.humanize_range(event['start'], event['end'], tzinfo) if event['location']: subtitle = '%s @ %s' % (subtitle, event['location'].split(',', 1)[0]) if full and event['description']: title = '%s \u2022 %s' % (event['summary'], subtitle) description = html.sanitize(event['description'], strip=True) else: title = event['summary'] description = subtitle results.append({ 'description': description, 'input_message_content': { 'disable_web_page_preview': True, 'message_text': eventutil.format_event(ctx.bot, event, tzinfo, full=full), 'parse_mode': 'HTML', }, 'id': event['local_id'], #'thumb_url': icon, 'title': title, 'type': 'article', }) if not nextid: break ctx.reply_inline(results, is_personal=True, cache_time=30, switch_pm_text='Settings', switch_pm_parameter='L2V2ZW50cyBzZXQ') def settings(ctx, msg, modconf): """Handle /events set.""" _, text = ctx.split(2) msg.path('/events', 'Events') msg.path('set', 'Settings') user_id = '%s' % ctx.user['id'] adminui.Menu( ('calendars', adminui.calendars, 'Which calendars do you want to see?'), ('timezone', adminui.timezone, 'What time zone are you in?'), ).handle(adminui.Frame(ctx, msg, modconf['users'], user_id, None, text))
PypiClean
/letter-tools-0.2.4.tar.gz/letter-tools-0.2.4/letter_tools/__init__.py
import random import requests import re def range(start = 1, stop = 1, step = 1): """Find a range from one letter to another. Args: start: the start of the range, stop: the end of the range, step: each increment it goes up by Returns: the range from start to stop Raises: Exception: string start or string stop is above Z Exception: step is not integer and is thus invalid TypeError: str or int is required """ if isinstance(step, int) == False: raise Exception("step is {} and is thus invalid.".format(type(step))) if isinstance(start, int) == True: if start == 1 and stop == 1: return [1] else: if stop == 1: stop = start start = 0 if start == stop: return [start] else: res = [] while start < stop + 1: res.append(start) start += 1 return res[0:len(res):step] elif isinstance(start, str) == True: if stop == 1: stop = start start = 1 re = """abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ""" letter_list = list(re) n = 0 for i in letter_list: n += 1 if i == stop: stop = n if i == start: start = n if start > 52: raise Exception(str(start)+" is more then 52 and is invalid") if stop > 52: raise Exception(str(stop)+" is more then 52 and is invalid") full = "".join(re.split()) full = full[0:] stop = int(stop) if start != 1: return full[start - 1:stop:step] return full[start - 1:stop:step] else: raise TypeError("int or str are required.") def rand(a = 1, b = 1): """Randomly pick letters or numbers from a range. Args: a: start letter/number in rand range b: start letter/number in rand range Returns: Randomly picked number from a to b Raises: Exception: a or b is more then Z """ if isinstance(a, int) == True: return random.randint(a, b) else: re = """abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ""" if b == 1: b = a a = 1 letter_list = list(re) n = 0 for i in letter_list: n += 1 if i == b: b = n if i == a: a = n if a > 52: raise Exception(str(a)+" is more then 52 and is invalid") if b > 52: raise Exception(str(b)+" is more then 52 and is invalid") full = "".join(re.split()) full = full[0:] b = int(b) a -= 1 b -= 1 return full[random.randint(a, b)] def word_score(word, opt=None): """ Count up the score of a word. a=1, b=2, c=3 Args: word: the word to get the score of opt: if opt does not equal None z will be 1 and a will be 26 Returns: The score of the word Raises: KeyError: character is invalid """ if opt == None: arr = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12, 'm': 13, 'n': 14, 'o': 15, 'p': 16, 'q': 17, 'r': 18, 's': 19, 't': 20, 'u': 21, 'v': 22, 'w': 23, 'x': 24, 'y': 25, 'z': 26} score = 0 for i in list(word): score += arr[i] return score else: arr = {'a': 26, 'b': 25, 'c': 24, 'd': 23, 'e': 22, 'f': 21, 'g': 20, 'h': 19, 'i': 18, 'j': 17, 'k': 16, 'l': 15, 'm': 14, 'n': 13, 'o': 12, 'p': 11, 'q': 10, 'r': 9, 's': 8, 't': 7, 'u': 6, 'v': 5, 'w': 4, 'x': 3, 'y': 2, 'z': 1} score = 0 for i in list(word): score += arr[i] return score def scrabble_score(word): """ Count up the score of a word in scrabble points. Args: word: the word to get the score of Returns: The score of the word in scrabble points Raises: KeyError: character is invalid """ arr = {'a': 1,'b': 3,'c': 3,'d': 2,'e': 1,'f': 4,'g': 2,'h': 4,'i': 1,'j': 8,'k': 5,'l': 1,'m': 3,'n': 1,'o': 1,'p': 3,'q': 10,'r': 1,'s': 1,'t': 1,'u': 1,'v': 4,'w': 4,'x': 8,'y': 4,'z': 10} score = 0 for i in list(word): score += arr[i] return score def randomize(word): """ Mix up the letters in a word. Args: word: the word to be changed Results: The word in a different order """ letters = list(word) return "".join(random.sample(letters,len(letters))) def derandomize(Word): """ Show all possible words that Word can be. Args: Word: the word to be derandomized Results: All valid words with the same letters """ words = requests.get("https://raw.githubusercontent.com/hostedposted/letter-tools/master/words.json").json() return [word for word in words if sorted(Word) == sorted(word) or re.search("^"+Word.replace("?", ".")+"$", word)] def custom_score(word, opt): """Make your custom word score. Just put in a dictionary each letter being assigned a score. Args: word: the word to get the score of opt: the options to use. Retuns: The word in the score opt gave. Raises: Exception: if opt is not a dict """ if isinstance(opt, dict) == False: raise Exception("options are not a dict.") score = 0 for i in list(word): score += opt[i] return score
PypiClean
/FunID-0.3.16.2.tar.gz/FunID-0.3.16.2/funid/src/manage_input.py
import os import sys import shutil import re import subprocess import json import pandas as pd import numpy as np import logging from copy import deepcopy from unidecode import unidecode from Bio import SeqIO from funid.src.tool import ( initialize_path, get_genus_species, get_id, manage_unicode, mkdir, ) from pathlib import Path from funid.src import save from funid.src.logics import isnewicklegal, isuniquecolumn, isvalidcolor from funid.src.hasher import decode, newick_legal, hash_funinfo_list ## newick illegal characters # fmt: off NEWICK_ILLEGAL = ("(",'"',"[",":",";","/","[","]","{","}","(",")",",","]","+",'"',")"," ",) # fmt: on # default funinfo class class Funinfo: def __init__(self): self.original_id = "" # original id, can be newick illegal self.id = "" # newick illegal characters removed self.hash = "" # hash : HSXXHE self.description = "" # full description from fasta self.ori_genus = "" # original genus self.genus = "" # final genus self.ori_species = "" # original species self.bygene_species = {"concatenated": ""} # species name designated by gene self.final_species = "" # final species designated by concatenated analysis # species identifier if multiple branches with same species exists - ambiguous in result self.species_identifier = 0 self.source = "" self.datatype = "" # DB or Query self.group = "" # original taxonomic group self.adjusted_group = "" # adjusted taxonomic group by group clustering self.seq = {} self.unclassified_seq = [] self.color = None # color for highlighting in phylogenetic tree self.flat = [] # list of flat genes def update_seqrecord(self, seq, gene=None): flag = 0 self.description = seq.description self.genus, self.ori_species = get_genus_species(seq.description) if gene in self.seq: logging.error( f"More than 1 sequence for {gene} found for {self.id} during update_seqrecord" ) flag = -1 elif gene is None: self.unclassified_seq.append(str(seq.seq.ungap("-"))) else: self.seq[gene] = str(seq.seq.ungap("-")) self.bygene_species[gene] = self.ori_species return flag def update_seq(self, gene, seq): # get input as Entrez seqrecord! Important! flag = 0 if gene in self.seq: if ( self.seq[gene] != seq ): # if more than 1 sequence per gene gets in, and if they are different logging.error( f"More than 1 sequence for {gene} found for {self.id} during update_seq" ) flag = -1 else: pass else: self.seq[gene] = seq self.bygene_species[gene] = self.ori_species # Update concatenated self.bygene_species["concatenated"] = self.ori_species return flag def update_description(self, description): self.description = description def update_genus(self, genus): flag = 0 # Try to solve illegal unicode characters if pd.isnull(genus): genus = "" # Genus with space causes error while mafft genus = genus.strip().replace(" ", "_") genus = manage_unicode(genus, column="Genus") # Check ambiguity if self.genus != "" and self.genus != genus: flag = -1 logging.error( f"Colliding genus info found for {self.original_id}, {self.genus} and {genus}" ) # Update original if should if self.ori_genus == "": self.ori_genus = genus # Update genus self.genus = genus return flag def update_ori_species(self, species): flag = 0 # Try to solve illegal unicode characters if pd.isnull(species): species = "" species = species.strip() species = manage_unicode(species, column="Species") # Check ambiguity if self.ori_species != "" and self.ori_species != species: flag = -1 logging.error( f"Colliding species info found for {self.original_id}, {self.ori_species} and {species}" ) # Update original if should if self.ori_species == "": self.ori_species = species return flag def update_species(self, gene, species): self.bygene_species[gene] = species def update_group(self, group): flag = 0 # Try to solve illegal unicode characters if pd.isnull(group): group = "" # Group with space causes error while mafft group = group.strip().replace(" ", "_") group = manage_unicode(group, column="Group") # Check ambiguity if self.group != "" and self.group != group: logging.error( f"Colliding group info found for {self.original_id}, {self.group} and {group}" ) flag = -1 # Update group self.group = group return flag def update_color(self, color): if pd.isnull(color): color = None self.color = color else: color = manage_unicode(str(color).strip(), column="Color") if isvalidcolor(color) is True: self.color = color else: logging.error( f"Color {color} does not seems to be valid svg color nor hex code" ) raise Exception logging.debug(f"Updated color {color}") def update_datatype(self, datatype): flag = 0 # Available datatypes : db, query if not (datatype in ("db", "query", "outgroup")): logging.error(f"DEVELOPMENTAL ERROR: {datatype} is not available datatype") raise Exception # Check ambiguity if self.datatype != "" and self.datatype != datatype: flag = -1 logging.error( f"Colliding datatype found for {self.original_id}, {self.datatype} and {datatype}" ) self.datatype = datatype return flag def update_id(self, id_, regexs=None): if not regexs == None: id_ = get_id(id_, tuple(regexs)) # if cannot find id by regex id_ = str(id_) self.original_id = id_ id_ = newick_legal(id_) self.id = id_ def update_hash(self, n): self.hash = f"HS{n}HE" def __repr__(self): return f"FI: {self.id}" def __hash__(self): return hash((self.original_id, self.hash, self.description)) def __eq__(self, other): if not isinstance(other, type(self)): return NotImplemented return ( self.original_id == other.original_id and self.hash == other.hash and self.description == other.description ) # getting data input from fasta file def input_fasta(path, opt, fasta_list, funinfo_dict, datatype): # initialize path to use function "get_genus_species" initialize_path(path) # Fasta files only for file in fasta_list: # Copy input files to designation if datatype == "query": shutil.copy(file, path.out_query) elif datatype == "db": shutil.copy(file, path.out_db) tmp_list = [] full = ".".join(file.split(".")[:-1]) # full file path name = ".".join(file.split("/")[-1].split(".")[:-1]) # name logging.info(f"{file}: Fasta file") error_flag = 0 try: fasta_list = list(SeqIO.parse(file, "fasta")) for seq in fasta_list: if not opt.regex == None: id_ = get_id(seq.description, tuple(opt.regex)) else: id_ = seq.description id_ = newick_legal(id_) if id_ in funinfo_dict: error_flag += funinfo_dict[id_].update_seqrecord(seq) error_flag += funinfo_dict[id_].update_datatype(datatype) error_flag += funinfo_dict[id_].update_group("") if get_genus_species(seq.description)[0] != "": error_flag += funinfo_dict[id_].update_genus( get_genus_species(seq.description)[0] ) if get_genus_species(seq.description)[1] != "": error_flag += funinfo_dict[id_].update_ori_species( get_genus_species(seq.description)[1] ) # For new Funinfo else: newinfo = Funinfo() error_flag += newinfo.update_seqrecord(seq) error_flag += newinfo.update_datatype(datatype) error_flag += newinfo.update_group( "" ) # because group not designated yet # id by regex match newinfo.update_id(seq.description, regexs=opt.regex) if get_genus_species(seq.description)[0] != "": error_flag += newinfo.update_genus( get_genus_species(seq.description)[0] ) if get_genus_species(seq.description)[1] != "": error_flag += newinfo.update_ori_species( get_genus_species(seq.description)[1] ) funinfo_dict[id_] = deepcopy(newinfo) except: logging.warning(f"{file} does not seems to be valid fasta file skipping") if error_flag < 0: raise Exception return funinfo_dict # getting datafile from excel or tabular file def input_table(funinfo_dict, path, opt, table_list, datatype): string_error = 0 initialize_path(path) # this one is ugly df_list = [] # extensionto filetype translation dict_extension = { ".csv": "csv", ".tsv": "csv", ".xlsx": "excel", ".xls": "excel", ".parquet": "parquet", ".ftr": "feather", ".feather": "feather", } # Running table by table operations for table in table_list: # Read each of the table by each of the extensions flag_read_table = 0 for extension in dict_extension: if table.endswith(extension): try: if dict_extension[extension] == "csv": df = pd.read_csv(table) flag_read_table = 1 elif dict_extension[extension] == "excel": df = pd.read_excel(table) flag_read_table = 1 elif dict_extension[extension] == "parquet": df = pd.read_parquet(table, engine="pyarrow") flag_read_table = 1 elif dict_extension[extension] == "feather": df = pd.read_feather(table, use_threads=True) flag_read_table = 1 except: logging.error( f"Table {table} cannot be read as {dict_extension[extension]} file. Please check files, extensions and seperators" ) raise Exception if flag_read_table == 0: logging.error( f"Table {table} cannot recognized as either csv, xlsx, feather or parquet. Please check if extensions endswith .csv, .tsv, .xlsx, .parquet, .ftr or .feather" ) raise Exception # Lower case column names df.columns = df.columns.str.lower() df.columns = df.columns.str.strip() df_list.append(df) # Clean up columns # Check if "id" column exists and unique flag_id = isuniquecolumn( list_column=list(df.columns), column=("accession", "id"), table_name=table ) # If old accession column, change to id if flag_id == "accession": df.rename(columns={"accession": "id"}, inplace=True) # Check if "genus" column exists and unique # Column "genus" is mandatory in db, and optional in query check_none = True if datatype == "db" else False flag_genus = isuniquecolumn( list_column=list(df.columns), column=tuple(("genus",)), table_name=table, check_none=check_none, ) # Check if "species" column exists and unique # Column "species" is mandatory in db, and optional in query check_none = True if datatype == "db" else False flag_species = isuniquecolumn( list_column=list(df.columns), column=tuple(("species",)), table_name=table, check_none=check_none, ) # Check "opt.level" column exists and unique # Column "opt.level" is mandatory in db, and optional inquery check_none = True if datatype == "db" else False flag_level = isuniquecolumn( list_column=list(df.columns), column=tuple((opt.level,)), table_name=table, check_none=check_none, ) # Check column color # color column is optional flag_color = isuniquecolumn( list_column=list(df.columns), column=tuple(("color",)), table_name=table, check_none=False, ) # Sequence column operations, download sequences with GenMine if 1: # If download on/off option added, change this part download_dict = {} # for downloaded sequences download_set = set() # 1 letter + 5 digit regex should be last, because they overlap with 2 letter + 6 digit ids / shotgun regex_genbank = r"(([A-Z]{1}[0-9]{5})(\.[0-9]{1}){0,1})|(([A-Z]{2}[\_]{0,1}[0-9]{6}){1}([\.][0-9]){0,1})|(([A-Z]{4}[0-9]{8})(\.[0-9]{1}){0,1})|(([A-Z]{6}[0-9]{9,})(\.[0-9]{1}){0,1})" # if gene name were not designated by user, use seq opt.gene = list(set([gene.lower().strip() for gene in opt.gene])) # find all NCBI accessions in seq for gene in opt.gene: if isuniquecolumn( list_column=df.columns, column=tuple((gene,)), table_name=table, check_none=False, ): for n, _ in enumerate(df[gene]): if not (pd.isna(df[gene][n])): if re.search(regex_genbank, df[gene][n]): # remove unexpected indents with strip download_set.add(df[gene][n].strip()) # if NCBI accessions detected in sequence part, download it if len(download_set) > 0: print(f"{path.GenMine}") logging.info( f"Running GenMine to download {len(download_set)} sequences from GenBank" ) logging.info(download_set) # Write GenMine input file with open(f"{path.GenMine}/Accessions.txt", "w") as fg: for acc in download_set: fg.write(f"{acc.strip()}\n") # Run GenMine accession_path = f"{path.GenMine}/Accessions.txt" # To prevent space errors in windows if " " in path.GenMine: accession_path = f'"{accession_path}"' GenMine_path = f'"{path.GenMine}"' else: GenMine_path = path.GenMine cmd = f"GenMine -c {accession_path} -o {GenMine_path} -e {opt.email}" logging.info(f"{cmd}") return_code = subprocess.call(cmd, shell=True) if return_code != 0: logging.error(f"GenMine failed with return_code: {return_code}") logging.error(f"This is usually NCBI server connection error") logging.error( f"Check your network problems, or replace all accession numbers in your db file with actual sequences to run locally" ) raise Exception GenMine_df_list = [ file for file in os.listdir(path.GenMine) if file.endswith("_result.xlsx") ] if len(GenMine_df_list) == 1: download_df = pd.read_excel(f"{path.GenMine}/{GenMine_df_list[0]}") # Generate download_dict (I think this can be done with pandas operation, but a bit tricky. Will be done later) for n, acc in enumerate(download_df["acc"]): download_dict[acc.strip()] = download_df["seq"][n] # replace accession to sequence downloaded def update_from_GenMine(string): string = str(string) accession_wo_version = string.strip().split(".")[0] accession_w_version = string.strip() # Do not consider sequence version if accession_wo_version in download_dict: return download_dict[accession_wo_version] # Remove accessions which failed download to prevent conflict with sequence validation elif not (accession_wo_version in download_dict) and ( accession_w_version in download_set ): logging.warning(f"Failed updating {string}") return "" # For sequence input else: return string for gene in opt.gene: if gene in df.columns: df[gene] = df[gene].apply(update_from_GenMine) # Remove GenMine results to prevent collision with next set for file in os.listdir(path.GenMine): if "_result.xlsx" in file: os.remove(f"{path.GenMine}/{file}") elif len(GenMine_df_list) == 0: logging.warning( f"None of the GenMine results were succesfully parsed" ) else: logging.error( f"DEVELOPMENTAL ERROR: Multiple GenMine result colliding!" ) raise Exception error_flag = 0 # Manage unicode for ID df["id"] = df["id"].apply( lambda x: manage_unicode(str(x), column="ID/Accession") ) # Generate funinfo by each row for n, acc in enumerate(df["id"]): # Check if each of the ids are unique # Remove non-unicode first new_acc = True # Generate funinfo for each id if df["id"][n] in funinfo_dict: newinfo = funinfo_dict[df["id"][n]] new_acc = False logging.warning(f"Duplicate id {df['id'][n]} found!") else: funinfo_dict[df["id"][n]] = Funinfo() newinfo = funinfo_dict[df["id"][n]] newinfo.update_id(df["id"][n]) # if flag_genus is true, try to parse genus if not (flag_genus is None or flag_genus is False): error_flag += newinfo.update_genus(df["genus"][n]) # if flag_species is true, try to parse species if not (flag_species is None or flag_species is False): error_flag += newinfo.update_ori_species(df["species"][n]) # if flag_level is true, try to parse the optimal taxonomic group if not (flag_level is None or flag_level is False): error_flag += newinfo.update_group(df[flag_level][n]) # if flag_color is true, try to parse color for taxon if not (flag_color is None or flag_color is False): newinfo.update_color(df[flag_color][n]) # update datatype error_flag += newinfo.update_datatype(datatype) # parse each of the genes for gene in opt.gene: seq_error = 0 if gene in df.columns: if not (pd.isna(df[gene][n])) or str(df[gene][n]).strip() == "": # skip blank sequences if df[gene][n].startswith(">"): # remove fasta header seq_string = "".join(df[gene][n].split("\n")[1:]) else: seq_string = df[gene][n] # adjust seq_string seq_string = seq_string.replace("\n", "").replace(" ", "") # Finding if DNA sequence contains error seq_error_cnt = 0 seq_error_list = [] seq_string = manage_unicode(seq_string) for x in seq_string: # x is every character of sequence if not x.lower() in "acgtryswkmbdhvn-.": seq_error_cnt += 1 seq_error_list.append(x) if seq_error_cnt > 0: logging.warning( f"Illegal DNA character {seq_error_list} found in {gene} of {datatype} {df['id'][n]}" ) elif seq_string.lower().strip() in ("nan", "na"): logging.warning( f"Sequence {df['id'][n]} {seq_string} detected as nan, removing it" ) elif seq_error_cnt == 0: # remove gaps for preventing BLAST error error_flag += newinfo.update_seq( gene, seq_string.replace("-", "").replace(".", "") ) if error_flag < 0: raise Exception # After successfully parsed this table, save it save.save_df( df, f"{path.out_db}/Saved_{'.'.join(table.split('/')[-1].split('.')[:-1])}.{opt.matrixformat}", fmt=opt.matrixformat, ) return funinfo_dict def db_input(funinfo_dict, opt, path) -> list: # Get DB input logging.info(f"Input DB list: {opt.db}") funinfo_dict = input_table( funinfo_dict=funinfo_dict, path=path, opt=opt, table_list=opt.db, datatype="db" ) # validate dataset # if only one group exists, outgroup cannot work group_set = set(funinfo_dict[key].group for key in funinfo_dict) group_set.discard("") if len(group_set) <= 1: logging.error( f"Only 1 group soley detected : {group_set}. Please add outgroup sequences" ) raise Exception # check if minimum outgroup number count exceeds minimum group group_cnt_dict = {x: 0 for x in group_set} for key in funinfo_dict: if type(funinfo_dict[key].group) is str: if funinfo_dict[key].group in group_set: group_cnt_dict[funinfo_dict[key].group] += 1 if any(group_cnt_dict[x] < opt.maxoutgroup for x in group_cnt_dict): logging.warning( f"Sequences in database of some group has lower number than MINIMUM_OUTGROUP_COUNT. It may cause error when outgroup selection, or may select not most appropriate outgroup to group. Please lower number of MINIMUM_OUTGROUP_COUNT in option or add more sequences to these groups" ) return funinfo_dict def query_input(funinfo_dict, opt, path): query_fasta = [ file for file in opt.query if any(file.endswith(x) for x in (".fa", ".fna", ".fas", ".fasta", ".txt")) ] query_table = [ file for file in opt.query if any( file.endswith(x) for x in (".csv", ".tsv", ".xlsx", ".ftr", ".feather", ".parquet") ) ] funinfo_dict = input_table( funinfo_dict=funinfo_dict, path=path, opt=opt, table_list=query_table, datatype="query", ) funinfo_dict = input_fasta( path=path, opt=opt, fasta_list=query_fasta, funinfo_dict=funinfo_dict, datatype="query", ) # Save query initially in raw format, because gene has not been assigned yet for file in query_table: shutil.copy(f"{file}", f"{path.out_query}") for file in query_fasta: shutil.copy(f"{file}", f"{path.out_query}") logging.info( f"Total {len([funinfo_dict[key].datatype =='query' for key in funinfo_dict.keys()])} sequences parsed from query" ) return funinfo_dict # combined db and query input def data_input(V, R, opt, path): funinfo_dict = {} # get database input funinfo_dict = db_input(funinfo_dict=funinfo_dict, opt=opt, path=path) # get query input funinfo_dict = query_input(funinfo_dict=funinfo_dict, opt=opt, path=path) # combine all data V.list_FI = [funinfo_dict[key] for key in funinfo_dict] # hashing data for safety in tree analysis V.list_FI = hash_funinfo_list(V.list_FI) # make hash dict for FI in V.list_FI: V.dict_hash_FI[FI.hash] = FI return V, R, opt # This function updates initial input query files to saved files with matrices and downloaded sequences def update_queryfile(V, path, opt): # Dictionary to generate dataframe out_dict = {"ID": []} for gene in opt.gene: out_dict[gene] = [] for FI in V.list_FI: if FI.datatype == "query": out_dict["ID"].append(FI.original_id) for gene in opt.gene: if gene in FI.seq: out_dict[gene].append(FI.seq[gene]) else: out_dict[gene].append("") df_out = pd.DataFrame(out_dict) save.save_df( df_out, f"{path.out_query}/Saved_query.{opt.matrixformat}", fmt=opt.matrixformat )
PypiClean
/OnedriveCMD-0.1.8.1.tar.gz/OnedriveCMD-0.1.8.1/onedrivecmd/utils/uploader.py
import json from progress.bar import Bar import requests from collections import OrderedDict try: from static import * from helper_file import * from helper_item import * from session import * from helper_print import * except ImportError: from .static import * from .helper_file import * from .helper_item import * from .session import * from .helper_print import * ## Upload related def upload_one_piece(uploadUrl = '', token = '', source_file = '', range_this = [], file_size = 0, requests_session = None): """list->int Post one piece of file to Onedrive via API. """ if requests_session is None: requests_session = requests.Session() # this is how everything calculated content_length = range_this[1] - range_this[0] + 1 file_piece = file_read_seek_len(source_file, range_this[0], content_length) # Since we are setting up the header by ourselves, we must make sure # the DATA TYPE of the headers are correct (i.e., everything in string) # or sometimes requests is not able to do # auto data type converting. # On OS X everything works fine; Ubuntu would throw an # Header value 10485760 must be of type str or bytes, not <type 'int'> headers = {'Authorization': 'bearer {access_token}'.format(access_token = token), 'Content-Range': 'bytes {start}-{to}/{total}'.format(start = range_this[0], to = range_this[1], total = str(file_size)), 'Content-Length': str(content_length), } req = requests_session.put(uploadUrl, data = file_piece, headers = headers) return req.status_code def upload_self(client, source_file = '', dest_path = '', chunksize = 10247680): """OneDriveClient, str, str, int->Bool Upload a file/dir via the API, instead of the SDK. Ref: https://dev.onedrive.com/items/upload_post.htm """ ## get upload URL if not dest_path.endswith('/'): dest_path += '/' if source_file.endswith('/') and source_file is not "/": source_file=source_file[:-1] # check if it's a file if os.path.isfile(source_file): # Prepare API call # token expires in 3600s, just refresh it if TTL<50min. if token_time_to_live(client) < 50*60: refresh_token(client) dest_path = ('' if path_to_remote_path(dest_path)=='/' else path_to_remote_path(dest_path)) + '/' + path_to_name(source_file) # Stamps print(" ") print_time() print_job_binary(source_file,"od:"+dest_path) info_json = json.dumps({'item': OrderedDict([('@name.conflictBehavior', 'rename'), ('name', path_to_name(source_file))])}) api_url = client.base_url + 'drive/root:{dest_path}:/upload.createSession'.format(dest_path = dest_path) req = requests.post(api_url, data = info_json, headers = {'Authorization': 'bearer {access_token}'.format(access_token = get_access_token(client)), 'content-type': 'application/json'}) if req.status_code > 201: # Avoid print message exaclty after the bar. print(" ") print_error("Request", str(req.status_code)+" "+req.json()['error']['message']) return False req = convert_utf8_dict_to_dict(req.json()) uploadUrl = req['uploadUrl'] # filesize cannot > 10GiB file_size = os.path.getsize(source_file) # API may be unable to cope with empty files, as I tested by uploading with range_list [[0,0]]. if file_size==0: print("Empty file detected, trying SDK...") client.item(drive = "me", path = dest_path).upload_async(source_file) return True range_list = [[i, i + chunksize - 1] for i in range(0, file_size, chunksize)] range_list[-1][-1] = file_size - 1 # Upload with a progress bar bar = Bar('Uploading', max = len(range_list), suffix = '%(percent).1f%% - %(eta)ds') bar.next() # nessesery to init the Bar # Session reuse when uploading, hopefully will kill some overhead requests_session = requests.Session() for i in range_list: for j in range(0,6): if j==5: print_error(note="Trial limit exceeded, skip this file.") return False try: upload_one_piece(uploadUrl = uploadUrl, token = get_access_token(client), source_file = source_file, range_this = i, file_size = file_size, requests_session = requests_session) break except Exception as e: print_error("Upload",str(e)+", will try again later.") continue bar.next() bar.finish() # So it's a dir, upload it recursively. else: new_dest_path=dest_path+path_to_name(source_file) for new_source_file in os.listdir(source_file): upload_self(client, source_file+"/"+new_source_file, new_dest_path, chunksize) return True def upload_self_hack(client, source_file = '', dest_path = ''): """OneDriveClient, str, str->Bool Upload a file/dir via the SDK. """ if not dest_path.endswith('/'): dest_path += '/' if source_file.endswith('/'): source_file=source_file[:-1] # check if it's a file if os.path.isfile(source_file): # token refresh if token_time_to_live(client) < 50*60: refresh_token(client) dest_path = ('' if path_to_remote_path(dest_path)=='/' else path_to_remote_path(dest_path)) + '/' + path_to_name(source_file) # Stamps print(" ") print_time() print_job_binary(source_file,dest_path) # upload with SDK. This is the only difference with upload_self(...) for j in range(0,6): if j==5: print_error(note="Trial limit exceeded, skip this file.") return False try: client.item(drive = "me", path = dest_path).upload_async(source_file) break except Exception as e: print_error("Upload",str(e)+", will try again later.") continue # so it's a directory else: new_dest_path=dest_path+path_to_name(source_file)+"/" for new_source_file in os.listdir(source_file): upload_self_hack(client, source_file+"/"+new_source_file, new_dest_path) return True if __name__ == '__main__': pass
PypiClean
/love_course_2016_2019-2023.3.1.0-py3-none-any.whl/LoveCourse20162019/docs/wu-ya-jiu-shu/连招1.0学习训练系统完整版:乌鸦连招1.0学习训练系统完整版:女性性格分类:女性性格分类1.md
# 连招1.0 学习训练系统完整版:乌鸦 连招1.0学习训练系统完整版:女性性格分类:女性性格分类1 嗯好,就是大家都看过我的文章吧,女性性格分来没看过的打个一,没看过的打个一,我的女性性格分来,再没看过,然后,身边有看过,打直到看了吧,性格分来你们看的肯定感觉,特别难特别难懂。 主要确实是英文自动比较多,女性格分来就是,在我体系里面,也是第一步,也是定位节套必须做的,就是所以我们,第一个要给一个自己一个定位,因为就是每个女人都是不一样的,我们在炮牛过程当中。 根据不同女人所使的手段,要一定的调整,所以就是说,今天作业我可以先按布置下去,就是你们自己,有适合教全个方面分析,觉得你能炮的卖的是哪几种,是以后一个训练的阶段,比如说你现在还是个学生,那你炮那种。 是说白负买不能炮,是炮牛的女乔人,那几乎是没戏的,不过你机场特别的牛逼,已经在我高节套里,会低位心眼的情况下,才可以炮到,正常情况,你们这学案的几数个根本就是不可能,以一个学生身份,炮一个就是。 无坏强、企业的,总经是以女人的,我只是举个例子,就是不可能的,所以就是说,我们就是要有个定位,不要就是每天,偏卖的蝦片,有个最主要,像你是学生,你炮学生最靠谱的,另外还有几个分支点,比如说食堂的小博。 什么也是可以的,其实我就是你只能炮学生,但是我们要有一个范围,它比较适合我们,炮牛是我,我学会炮牛能炮所有女人,这纯属于扯蛋,好吧,尤其是,就哪怕是我,炮跟自身价值,就是不相符的人,都是特别困难的。 懂吧,那不是说不可能,但是比较难,就是我失败率会很高,但是一般人,不会喜欢就是,分自己差距特别大的,不过这个就是,让你们不要觉得,这个东西特别神话了,但是没中女人,确实有定的分类,我打开那刻。 然后主要今天,就是我先讲一下,m sr,这三种指数的判定,然后这个文件,我看在这个头,这个只是,这个只是,突略版知道吧,这是我最终版,别三张头发在外面晚上,然后第一张头,大家可以打开看,这是第 s。 第 s 指什么,就是传统跟开放的态度,我们要怎么判断,对待陌生男人,主要就是我们,不可能,就是说你跟他熟了,这肯定就非常好判断了,对吧,那用其他方式,我是说你刚刚碰见一个,你怎么判断的,他是传统开放的。 你要看他对陌生男人的态度,不是说对熟人啊,对爸妈没人生,你们看不到头吗,我发文字吧,对,对陌生男人的态度,还有就是对陌生男人,肢体出口,这个我们都是顺着,这个我们都是顺着就可以测试完了。 而且知道他是怎样的,就比如说,手机上看不到,第 s 指的判断,就是觉得,太多还要支付反应,后期我还会讲,其他判断方式,这只是一个初期的判断方式,所以也就是我,1。0判断方式,有 2。0 3。0还有。 有待总结,比如说他对陌生男人,他都是不理惨,就是得基础分0。5,因为就是最低最低都一动吧,然后他是领帮性的招呼,按他加一份,主动说话,加两份,对 陌生肢体出风的反应,我是验法,这个是 名点 5。 欣威躲避,是 1,无反应是 2,一个合适 3,正常姑娘一般都是,欣威躲避,家里毛性招呼,或者无反应家里毛性招呼,这就是属于正常显的,所以我们判断的时候,就可以用这两个,一想你别看虽然简单。 我的东西就是简单,粗暴有效,我们咱们一下子可以锁面这个女生,在这种的观点,就社会的,开放愁愁观点,然后剩下就是看她性格,懂吧,就是一个从来不说话的女的,她再当了,当不当哪去,懂吧,一个特别爱说话。 女儿她在,传统也不是特别传统,就比如说话劳就很,上能发展成来不理采,验反正我们一般情况就是这个,比如说她第 S 值比较,第 S 值,高也不会高到哪,或者第 S 值比较高,她第 S 值低也低不当哪,懂吧。 我这个可能有点复杂,其实不难的,然后第 S 值不说,大家可以看我打出来的东西,对就是,然后计算出这两个主要判断方式,还是传统开放,这个性格自逼开网,说实话影响不大,就像自逼的女孩,这样其实很多女孩。 都是正常聊天的,但主要还是看她对她,对她的态度跟反应懂吧,她就甚至不说话,女孩其实有些时候,并不是那么的,就是一个很自逼的女孩,她也有时候,并不一定是那么传统,想法会很开放,这个你们如果接触女人多的会。 了解我说,这个比例一定是对的,对就是说,她就是她的后天,形成她的那种观念的,原来就是零价,就是她先前的观念,对就是说,她后天的东西,大概是比她先前重,就是比重大到两倍只一只上,懂吧,你再算谁。 这个我会有罪,没关系,怎么很骚的,是怎样的,方方,1加2,可能3。25,为什么有1加2成3。25加1,BHC只是你要把这两个,就是对陌生男的态度,跟直体出风的反应,加起来再成一1。25。 不是说一直接可以加,1加2成1。25,懂吧,是这样的,就等于6,然后聚集我那个表格,就在我那早就列出来了,我也没表格,就是在重复的,我的表格就是,我都已经工质于重了,这个已经没有什么,秘密可言。 大家就可以参加我那个表格,就是对,有那种看着不说话,所以说你一定要看对陌生男人的态度,但是这个也不是完全准的,还有一个判定指数,我没有写,就是有一种女孩,这个种情况就是。 跟你后一种指数在挂钩的时候谈恋判冠,就有一种女孩,经历丰富的女孩,懂吧,有可能对陌生人,人的态度特别的底处,这不是说话,而且从来也不是特别爱说话,但是其实很骚,你就看后面。 我就是正常情况按我觉得判定就可以,所以我说是1。0,这个基本上90%都是准的,没有百分之百准的,那人只要一个人要是想为一庄,那他永远有偉庄的办法,永远是看不出来的懂吧,但是正常人不会天先。 是因为做一个人都会偉庄的,所以咱们这个钱可以不用考虑,然后就是第2个,嗯,第S指判转,就是一个M指的判定啊,2到15,就是最终计算出来的答案,我这个算出来的范围一定是5到25的懂吧,你不可能就是我。 就是特别保守,25是特别开放,然后具体5到多少那个,是嗯,多少刀刀刀刀,正常多少刀刀,多少是保守,多少刀多少是开放,然后1S指就是判定方法主要取决于它的感情经历,这个的感情经历的就是后天因素。 比它先天因素更要重要,感情经历账刀是它的四倍以上,就是一个人年龄,其实跟它的实际上,它的就是同属度关系并不是特别的,只有它经历的四分之一,然后具体怎么看呢,这个算法比较麻烦些,首先你要看它的恋爱次数啊。 是说真正恋爱,不是说那种的玩两天的,是它真喜欢过的男人,走吧,长期关系懂的男朋友,是它确实喜欢,而不是说那种的不喜欢的,怎么人掉了呀,对这个次数是站到最主要的,就是它谈恋爱次数,其次就是它跟男人对吧。 上传,SAC就上传了意思,上传的次数,这个只有0。1,其实上传不是那么关键,但是最关键是它,就是跟人感情糾葛的次数,看到0。4,还有就是这个我是,夏娥伯也说了吗,当恋爱次数大于,SAC次数五倍以上。 直接定义为最高,这个你们可能不理解,为什么,这如果一个女人,你们可能没这样的女人,也许甚至是处理,或者说只跟一两个男的睡觉,但是她谈过的男朋友,却特别多,这种女人是真正特别厉害的,所以就是。 说当恋爱次数大于,SAC就上传次数五倍以上,直接就可以定义成五,当然没必要再细纷了,这个其实我应该弄一个,就是更复杂点的公司,其实应该就是那个,上面处意下面再成的,因为它这个,但是我想想没必要。 因为人吧,比如说有些时候你谈一次恋爱,有时候甚至顶别人谈十次恋爱,这都是有可能发生的,所以我相信那种算公司反而,更复杂,但是更不准,还不如按我这种粗练算出来的,比较标准一些,所以我就没有更复杂的公司。 本来我开始想要更复杂公司,但是我算出来的结论总是不准的,所以我就没有那种公司算,大家只要在脑子里形成,它还是一个怎样的这个概念,概念就是,它真正谈恋爱的次数,才是决定一个女人感情精力的真正。 还有就是它跟睡觉的男人次数,其实有些女人把你别看睡过,很多男人,但是其实还是感情精力特别的少,那只是它当一些而已,所以说跟男人上床,并不是特别重要,所以说关键还是它的真恋爱,不是说那种玩意儿的那样。 那也不算,唐其恋爱次数,冯云亮丽尔斯加上,特色上床次数,称一零点一,这个公司有个错的地方,就是,再加上,这个床完略再加上零点四,然后零到九吧,不用零到十,不是,基础有一个零点五的得分,但行十分钟就走。 然后就年龄这个大家看下就行了,我跟大家说这个成长比例,就是十四岁十六岁,大概也就是这么个增长,为什么就是,前期女人成熟特盘,女人成熟主要是在一到二十岁之前成熟的,其实就一到十八岁。 为什么十八到二十十二点五了,因为二点五就是十八到二十十数,于正常性的女人,好吧,幼稚的女人之后在十八岁下才存在,但是这个并不是特别关键,但是也可以做的康好指标,所以成熟主要M值就等于,ES值成一。 四加上一,对啊,这个只能判断啊,而且这个不重要啊,但是你一个女人跟多少男人睡过,其实我们是可以感觉出来的,这就是什么感觉,但是它就是在床上的反应啊,是不是很熟练啊对吧,这种的,五到二十五。 还就是最终得分是五到二十五,然后还有最后一个数字啊,稍等一下,这个,我讲最后一个好,这次全了,我讲完大家不要就是光看共诚,讲完我会给大家在系统讲一下,我只是先按理论性的给大家讲一下。 我讲完大家不要就是光看共诚,讲完我会给大家在系统讲一下,只是先按理论性的给大家讲一下,平常是永远不懂理论来分析的,但是这个有变于我们了解,然后就是,第三个就是它的儿子就是所谓的家庭,儿子怎么判断。 一个人解决不了文宝,就是穷的丁当小,那你就变成你,就是每天上乱吃不了,下乱这种的女孩,这种比较傻一般没见过,然后比较穷,勉强能上学,而且比如说那种的上学还得自己出去打工的。 正常就是三就比如说家庭小康这样的,生活也比较正常,就是说我们通常碰见女孩其实都是三的,还有一些就是家里稍微调点好一点,比如说父母玩,基本上一万以上就是,咱们这带人父母能蒸到一万以上的壳。 现在你就按照你们现在周围的比例,是当往上挑,比如说你们现在,当然你比如说你在一下城市生活,比如在上海北京,你可能蒸一万以上也就算一个三,懂吧,那你就可能要蒸的更多才能算到四。 这个不能完全就是按照我这个固定的比例,你们要进行自己的微调,我只给你们大概的一个感觉,然后就是而是,是一个人的性格方面,它哪个跟扛卡,就是你不要说是,他吃了的给你花多少钱来判定,这个不是不准的。 我们要想判定一个女人,她是扛卡还是她哪个要判定,她愿意付出自己身上所拥有的多少东西,比如说,她没跟一个男人情谱啊,愿意跟你情,就是一种付出,比如说她只有十块钱,愿意跟你花十块钱。 那这个女人就给你付出了五,而且不见你不付出,如果说一个女人有1000万,那她给你100块钱,这也不算什么,懂吧,所以说你要看一个人,他身上付出自己的百分比,来看,具体就是看,有的其实很多家里。 家庭条件不错的人,你就完全不能说是安特爱,对啊,看每次出去吃吃饭,一两百一两百的请大家吃饭,感觉挺扛卡,其实这不是啊,不是这样看的,这样看就不准确了,真正看法是,特愿意付出她的多少东西,比如说有钱人。 安特你叫站用空的时间,比如说这个女孩挺有钱,你看她愿意没有花时间,给你再一块,而不是说这个女孩,从来不用我掏钱,每次出去还跟我,这也胖那么帅什么,然后这个吧,我一直是,只怀疑态度,就是她社会家人需求。 其实往往是跟,RS是Ri,还有那个RS值,差不多对等,所以你看我写的是约等约,我不说等约,只是粗略估计出来,这是我现在唯一一个,就是算数值,算的算是最不准的一点,因为这个就是,我可能就是。 还是就是数量太少,我还要经历长期的就是这个时辈,我再胖断确实的,暂时就是大家可以,按照2。5,跟2。5,也就是1比1的比例,来看她的家庭,跟她们康徘她的,也就是说,一个人是否对那种,一个女人对一个男人。 社会的价值需求而止,其实跟家庭跟,跟她康徘这个比例是1比1的,约等约1比,但是不完全是,并不是帅,一个特别有钱的女的就,不会去穷孝子,但是也不是说,一个特别穷的,这种女孩,就是一定要让女,觉得你不负有。 就不错,从女孩放而特别,需求,特别就是那个,费男男朋友特别有钱,这些都是跟人,长期思维决定的,只是说大价,粗略胖胖出来,可以帮我们以后,方便的胖胖很多东西,我再发会头讲,传统跟开放,传统跟开放。 我们不能单单,就是用我刚才的公事,自编开朗对吧,从来不主动说话,那你要胖断她,这个女孩是,女孩跟你说话,还是她从来不说话,她如果是见谁都不说话,那她还是自编的,如果她是见谁,都说话就见你不说话。 你说这女孩是自编,那是扯的,那只是她对你讨厌而已,那也许人家是勿,所以我说的是对陌生男人,不是,那个Ds值得怕你,是非陌生男人,知道,从来不主动说话,肯定没有,跟朋友她都不喜欢主动说话。 不说只有一两个好朋友,主动说话,这是可以的,经常问问好好的,就像这种的,比高冷其实已经,觉得你们高冷女孩一般是二分左右,正常聊天就是正常能跟你互动,按钮女孩就是,像我是谁那块。 跟你聊那个女孩其实就属于似,因为我觉得那个女孩自己挺能发起,聊天的,就是跟你聊,你上次让我给你胖断那个案例,你就是似,然后我跟大家说,什么是悶骚女,刚才已经跟你们说了,悶骚女人其实基本上都是。 所有属性为中等的,我跟说悶骚女人什么,就是礼貌性打招火,清为躲鼻,无反应,然后正常聊天,因为中国女人大部分都很悶骚,在表面上表现的正常女人就是悶骚的,这个其实是具有普遍性的,然后下来我在接的。 说感情精力方面,感情精力方面我刚才其实已经说了,有些女人就是,如果弹厂特别,就这个就是弹压自数五,是一方面,或者是她弹压自数时候,她的对手,比如说这个,她有我的男朋友特别就是,感情方面特别牛逼。 比如说她跟一个PLA弹过脸,那你就可以就是,算做是跟三个男朋友弹过脸,就不要算一个,你走吧,如果她跟一个那种特别二B的人弹压,你说甚至可以说这不叫弹压,这都是可以的,比如说很多女生说我弹过两次。 我一看前两个男朋友基本都,是那种的往聊性的,也算也尝起,但是就是冰长往聊也没见过面,这都可以不算弹压,走吧,并是要有客顾明星的弹压才算是,情感经历也是非常能判断于一个女性,她对于感情的认知的。 其实高手往往都是,举举于她这个数值特别高的情况,那年龄关系比我们大,其实我也将过年龄特别小,但是手玩特别强的人,所以说我们就是,如果算出这个是,图略的数值,然后跟我表格上的,边缘比较接近,我举个例子。 在不这个,比以前的东西发生了,比如说你看,你算出来一个女生,对吧,性开放度是19,或者22,或者是,这你就不能判断她是正常还是开放了,你要再根据其他更多的测试,来之前下来不测试,来判断她是哪种类型。 比如说你算成熟度,挨9或者12,这就是有浮动,出于边缘情况下,或者11,这些数字都是有争议的,你看她更应该往哪边靠,往这靠,我说了吗,这些东西,你不能完全看数字,数字是给咱们一个基本的概念。 咱们能有一个大概的判定,但不是绝对的,如果你完全就像机械音,你让你利用这些数字,数字还完全想算出一个性格,然后再给它定外,只几乎不可能的,然后我今天在简单讲一下,就是,因为下面还有两节课。 这个是比较重要的,我先讲一下,讲一下就是传统文开放区别,传统的女生,挺好的,知道比较重要,传统女生在我们进挪的时候,更需要采用更多的暗示,并且,几乎的不能用性暗示,对于就是传统的女生。 我们今天要做暗示的时候,明白没有,用感情暗示,不要用性暗示,然后进挪我们要更加的缓嘛,更加的自然,对吧,记住了,这非常重要的,判定指数,判定完了我们干啥,你们肯定是优勇的,不是判定了,知道是这个行来。 我给你指定方案,不是的,是说判定她的每个指数,都对我们接下来的行为,跟判每个女人的时候,是有很大区别的,比如说开放的女孩,比如开放女孩,我们进挪的时候,更应该采取直接进挪,你如果显得微微弄。 她会觉得你很不男人,好吧,没有需要,必须就是比如说,就在夜间,我直接见一个女人,上去直接楼窑,这是无所谓的,你要是还不敢碰,或者说就抓人家手,这都是非常楼的,或者抓人家,就是那种,比如说哪两个指头。 那种特别维促的进挪,你也人家头发,但是比如说哪两个指头,你也人家头发,这就感觉有点维促,但是你用这招对付传统女孩,是可以的,反而她觉得,用的小艾埋,非常的有感觉,但是你要对于开放,就正常想法。 对于正常女孩的进挪,就屈于她们两个之间,其实大多数人用的惯例,都很适合于正常性女生,正常性女生,就是,正常这个我反而瞎不回事,就是正常的进挪,我们比如说上去,爱拍下的腿,拍下在肩,就正常不清服的动作。 都可以去做,但是对于传统的那个,你们不知道见过没有,就是用碰一下就做看你的,也有是装的,但大多数是真的,为什么,因为她觉得这样不好,其实我问过,你我找过这样女朋友,问她你为什么要做,她说爱我觉得不好。 不舒服,其实具体怎么样,她也不知道,然后就是自力跟开朗的,我跟你说,这不是S值啊,我刚才讲的,我刚才讲的是第S值,明白没有,紧先传统看,我代讲第S值的应用,第S值应用是自力跟开朗,即使一个女生很开放。 她说我是自力的,这种是非常困难的,为什么要,具体我分到六种数值呢,那肯定有用的,对于第S值的女生,如果她开放的话,那你就要更多的去引领,那你比如说,我一直做心安时的这趟,我一直,但是你一定要,你主动说。 她可能就是不怎么说话,但是你要代理话题,就是这样的,最难的就是属于,哎又传统又自逼的女生,你代理话的时候,特别的麻烦,因为你稍微做点,二十是有可能她就觉得你,不好不跟你说话了,所以。 更加多的使用幽默式的聊天,让她感觉你有趣,然后缓慢的进行,爱美式聊天,我讲的东西可能有点太多了,也记不住啊,就是对于又传统,又自逼的,就是第S值跟第S值都低的,对于你那种就是说,比如说第S值比较高。 比如说第S值是五第S值是一的,那想法还简单点,虽然她很传统,但是她愿意跟你交流,往往都跟你交流过程中,偷偷很多的信息,你通过那些信息,再给她进行幽默搞笑,只是说你少进行信案是多进行感情上的案是一般。 还是可以结少的东吧,这样的女孩,往往是开得起玩笑的,只是说思想比较传统,而且这种我跟你们说吧,还再给你们说一个,就是第S值跟第S值,就是第S值是可变的,第S值也是可变的,但是第S值的可变度比较低。 所以我说,一个艾术花的女孩,除了你给她很大次,她才会变成不艾术花的女孩,但是一个很传统,跟一个很开放之间之间,可以互换的,但是在单位时间内,她是不会变的,比如说在我们做团队游戏的时候,我可以通过空场。 来达到平时,第S值只有,第S值只有三的人变成四,但是我不可能让一个第S值有1的人,顺间变成五,最多让她变成二,比如说很多女孩,没玩过那么开的游戏,你要是直接玩那种的轻作尔的,那可能不行,但是你要是对吧。 抱一下,那她可能还能接受,能适当的微调,而且第S值,是可以经过长期训练来改变,比如说你被1亿调到五,都是有可能的,当然我没那么做过,我一般跨的过,比如说不二调到五,已经是很高的,我还没有被1调到五。 或者1调到三还可以,这个就是要经过我们长期的调校来进去,训练,然后就是说自兵跟开朗,自兵开朗这个就是,几乎不变,所以我们就是刚才,已经说了好多,有问题可以先来打出吧,因为这个比较难懂一点,你想想。 想我没种女生的性格不同,特别有助于我们,以后使用机巧的时候的成功率,如果你设计要直约过的,你用我的方法是非常有用的,因为你在旁边一个,你能够在我身边,因为你在旁边一个,你就可以直接根据我的。 这些给你们的公式,判断出这个女生喜欢哪一集,哪一集来的方法,不喜欢怎样去做,我刚才在传统开放,自兵开了你也举了很多,很多的例子了,我说你没有有不懂的机器训练,没有就跳过下一话题了,这个,有录音。 你们可以回去反复挺,反正这个才是1。0,后面会给你讲2。0,3。0的,然后警觉就是勤感,经历啊,这个非常重要,性格同样重要,其实儿子相对来说,更不是特别重要,儿子往往是决定他的长期走势,跟价值投资管。 动吧,但是前两者,直接会决定一个女人,就是,s指决定用的机巧类型,动吧,然后,经历儿子决定这个女生的难度,我跟你们说啊,经历,你们可能觉得,没谈过这儿的女孩难泡,我不觉得,我觉得没谈过这儿的女孩。 是最好泡的,正经事,我说走进她心啊,儿子说跟她上场,她上场过去很简单,但是跟她让她爱上你,让经历丰富人爱上的特别的,必须对,因为她每谈过一次恋爱,她心里的就那种,包括的冰,会更加的完整一些。 就说谈过十次恋爱,就是真爱的,比较真爱,比较客古明星恋爱你,就是特色,心整个被冰包住了,你要想找到,一个位置彻底的扎进去,特别的呢,你要戳痛头的内型狠吗,你想要再让她受到,伤害非常闷的,所以说。 我们怕的这种网友决定和技术,女玩家,就是出在情感厅,特别丰富的前提下,或者就是,女玩家还有这种情况,特别聪明,我们小有很多男生,追逃,我跟你说,没有说是一个女生,没谈过这儿,她有特别牛逼。 为什么有的女生,甚至一次恋爱都没谈过,你感觉她手玩特别强,那也是有丰富男人去追逃,才有的,当然了,情感经历丰富,往往分数就会高,但这个必定,为什么情感,为什么分数,我能决定一个人技巧。 你们总觉得美女技巧高,其实也不是的,美女最开始技巧也不行,她往是因为,如果加较阳的美女还可以,因为加较阳的美女不会,亲自跟别人男的谈恋爱的,然后,当她谈恋爱的时候,她已经被很多男人追过。 所以她技巧想得高,记住啊,这美女句话都很重要,但是,正常情况美女在最开始,也是跟仇女一样,所以一个仇女要天天去谈恋爱,她的技巧还是可以的,其实你们在生活中,你们可能没有发现,有些女的长得其实并不怎么样。 但是她可能狗到不少男的,就是她的手,有手外,往往最有手外的其实还是美,在美女这种出的比较多,也畢竟她们,能更多的跟一些男生进行锻炼,也就是我说的,实战才是关键,她的实战量大,而且,但是想反她的上床数量。 反而不重要,你们再技术这一点,一个女人谈过来来自数越多,她的上床数量越少,就是这两者的差值越大,也就是倍数越高,这个女人的技巧越强,那你去看她吧,你在吧,她说你会有感冒了,她应该去看她。 你这个我已经判断了,所以我们在判断的时候,除了我给你的以上公事,就比如说没有到五百吧,但是她可能就跟一个男的上床,或者跟两个男的上床,但是她谈过四个或者八个男朋友,那你也应该在感情经历的这个数值上。 也就是儿子上,适当的往高加,知道吧,我想我现在说的已经很香了,你们听懂没有,就是谈恋来自数越多的上床,当然数到越少,反而决定一个女人越强,就是这么个意思,就是儿子的判断上,就是不单以我这个公事为标准。 你还可以再往上加,但是如果一个女人只谈过一次恋爱,她跟十个男的上床,她跟十个男的睡觉,那就可以稍微减点,明白了吗,开荒和破冰,对了,我理解小胖说这个意思,其实这个,别看这是我基础之上,但是其实这个。 反而你们理解来比较难,但是你们能做到判断一个女生类型,使用正确方法就够了,小胖理解到什么意思,她说你找一个初女的话,是吧,直说,比较浪费时间,但你要破冰,是你不一个女人已经就是,特别有丰富的精恋的女人。 然后再把它玩动在手掌里,不太都冰敲碎,非常难,所以你看小胖,因为小胖其实技术比较高,我们初级课程,她大部分都不提高,然后她讲的这个是非常对的,开荒,你就找一个初女,原来要比你,拿到一个。 感情精静丰富的女人,就是把她把握在手里头,等男的多,等男的多,懂吧,然后儿子,儿子讲完了,那我就讲这个,刚才讲的是M指,她讲的是M指,我说错了,大家不要,记错了,她讲的是M指成熟,而现在她讲儿子。 儿子是最麻烦的,因为这个我研究都不是特别偷车,因为你知道为什么,因为我接触的层面,我也不是特别窮的人,接触特别富人,所以我只能按自己的,大概印象把给订用一下,但是这个也是有用的,因为这,你想我。 我可能天天跟一群窮人,玩完再跟一堆富二代玩,我的圈子,没那么广泛,没办法,我经过分代的研究,还有就是,根据各种学员的案例,来判定的,所以这个,数值跟很多人相比,还是我是相对准确的,但是这个数值。 你可以自己,如果有见解的话,你再跟我说,我来更加的,玩上一些,也就是说,这个也就是,属于我这个技巧一头,并不玩上一点,等我二点零的时候,我想不这一点,完全玩上了,就是,首先我们看一个人的家庭。 一个人家庭,原因其实不光是窮干富啊,我为什么说,一个家庭,原因其实决定和,扛卡跟他了,扛卡跟他完往,后天,先前就是的,一个家庭窮的人,往往就是,会更加的对,就是未来的经济,要扛大,因为人。 永远是对他缺少的东西,有这口,但也有那种家庭,很窮,但我还要当师人的,你们应该听过,不是特意当往往,家里月窮,以后月需要钱,就比如说,一个女孩特别窮,你要是有钱的话,你搞定她,她帮个大款,她很开心的。 甚至当小三的愿意,对你这个小方不在吗,其实你觉得,你说后宫管理,这是属于简单性后宫万,就是这个女生的儿,儿值,太敌的情况,你这是最好的,然后儿值高,其实我,也跟不少有钱人接触过,这些有钱人。 也是非常小气的,就是他们算有钱,但是却很购门,就是,给点小人小会行,但是,不过真的,给你华特别多的钱,很多人都说,有钱人从你,其实不是,一种,怎么说呢,人从的习惯,可能就是被人坑多了,所以就是。 不想付出特别多,这种情况,就是也比较多,然后现在我就讲一下,康海的康海,康海跟康海跟康海,我刚才已经说了,就是,不是说,具体多少,而是她能付出的百分比,你应对康海的女人的时候,一定不能注动投资。 明白没有,因为康海女人的时候,就是儿值比较高的,一定不能注动投资,哪怕你很有钱,你有钱,那你就,投资,投,比如说你你你你,给她投,100块钱吧,那你也让她投30块钱进了,不是说,你投100块钱对于。 你来说,是张有10%的,比如说你投100块钱,是我总资厂的1%,那你也让她投,相信她的,这一部分,这个跟有钱没钱,没没关系,关键还是看,看来就是投资的比例,你用,要尊须用我的黄金比例,甚至你。 一个人越贪了,她来你要投资让她比你多,这种付出才重要,相反,如果一个人特别康海,我不建议你让她投资特别多,除非你也想踹回来,要不然她容易就是,以后看透你是神药,什么样的人,对一个康海的人。 我们更加用康海来对头,对于她来的人,我们要抓住她的这一点,因为一个康海的人,你对她康海,她是会理解你的,这个其实,拿人也在某种程度上,试用,但是我现在主要说泡牛,比如说我以前有个女朋友,上大学的时候。 我出去划多少钱,她一定要划多少,其实我意思是我家有钱,会多套点操作不愿意,这是属于康海的人,然后我就偷偷地,给她放卡上从钱,各种,就办这样的事情,非常地感动,然后就拿出生活非的,一个月八百生活非。 晚上就拿出五百,然后多满衣服,这就是她所做的,其实并不一定你对一个女人好,她就对你不好知道,钱其实这个人比较康海,虽然很穷,但是很康海,但是如果她,哪怕是这个付加女,你天天给她花钱,也屁用没有。 那你一定要让她投资,懂吧,康海跟她来是关系到她投资,这个家庭,RS值,关系到你们长期关系,之中的稳定性,懂了没有,这两个数值,虽然说是决定她的社会需求而止,但是决定的地方是不一样,一个是决定投资。 一个是决定未来,明白了吧,搞定小三容易,这个懂以后再说,这个你还得跟我说,你正宫娘娘们价值了,这也得通过新更分类分,然后今天的课程主要不可能基本讲完了,后面吧,还有两三节课可能。 主要就是分类讲具体美类的处理,然后大家有问题想可以问一下
PypiClean
/fluidon-doepy-0.0.6.tar.gz/fluidon-doepy-0.0.6/README.md
Welcome to DOEPY ================= Design of Experiments Generator in Python (`pip install fluidon-doepy`) ------------------------------------------------------------------ ## Introduction --- [Design of Experiments (DOE)](https://en.wikipedia.org/wiki/Design_of_experiments) is an important activity for any scientist, engineer, or statistician planning to conduct experimental analysis. This exercise has become **critical in this age of rapidly expanding field of data science and associated statistical modeling and machine learning**. A well-planned DOE can give a researcher a meaningful data set to act upon with the optimal number of experiments, thus preserving critical resources. > After all, the aim of Data Science is essentially to conduct the highest quality scientific investigation and modeling with real world data. And to do good science with data, one needs to collect it through carefully thought-out experiments to cover all corner cases and reduce any possible bias. ## How to use it? ### What supporting packages are required? First make sure you have all the necessary packages installed. You can simply run the .bash (Unix/Linux) and .bat (Windows) files provided in the repo, to install those packages from your command line interface. They contain the following commands, ``` pip install numpy pip install pandas pip install pydoe pip install diversipy ``` ### How to install the package? (On Linux and Windows) You can use ``pip`` to install ``doepy``:: pip install fluidon-doepy (On Mac OS), first install pip, ``` curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py python get-pip.py ``` Then proceed as above. ### Github The package is hosted at this **[Github repo](https://github.com/fluidon-sw/doepy)**. ### Quick start Let's say you have a design problem with the following table for the parameters range. Imagine this as a generic example of a chemical process in a manufacturing plant. You have 3 levels of `Pressure`, 3 levels of `Temperature`, 2 levels of `FlowRate`, and 2 levels of `Time`. Pressure | Temperature | FlowRate | Time ------------ | ------------- | -------------|----------------- 40 | 290 | 0.2 | 5 50 | 320 | 0.3 | 8 70 | 350 | - | - First, import `build` module from the package, ```from doepy import build``` Then, try a simple example by building a **full factorial design**. We will use `build.full_fact()` function for this. You have to pass a dictionary object to the function which encodes your experimental data. ``` build.full_fact( {'Pressure':[40,55,70], 'Temperature':[290, 320, 350], 'Flow rate':[0.2,0.4], 'Time':[5,8]} ) ``` If you build a full-factorial DOE out of this, you should get a table with 3x3x2x2 = 36 entries. Pressure | Temperature | FlowRate | Time ------------ | ------------- | -------------|----------------- 40 | 290 | 0.2 | 5 50 | 290 | 0.2 | 5 70 | 290 | 0.2 | 5 40 | 320 | 0.2 | 5 ... | ... | ... | ... ...| ... | ... | ... 40 | 290 | 0.3 | 8 50 | 290 | 0.3 | 8 70 | 290 | 0.3 | 8 40 | 320 | 0.3 | 8 ... | ... | ... | ... ...| ... | ... | ... There are, of course, half-factorial designs to try! ### Latin Hypercube design Sometimes, a set of **randomized design points within a given range** could be attractive for the experimenter to asses the impact of the process variables on the output. [Monte Carlo simulations](https://en.wikipedia.org/wiki/Monte_Carlo_method) are a close example of this approach. However, a Latin Hypercube design is a better choice for experimental design rather than building a complete random matrix, as it tries to subdivide the sample space in smaller cells and choose only one element out of each subcell. This way, a more **uniform spreading of the random sample points** can be obtained. User can choose the density of sample points. For example, if we choose to generate a Latin Hypercube of 12 experiments from the same input files, that could look like, ``` build.space_filling_lhs( {'Pressure':[40,55,70], 'Temperature':[290, 320, 350], 'Flow rate':[0.2,0.4], 'Time':[5,11]}, num_samples = 12 ) ``` Pressure | Temperature | FlowRate | Time ------------ | ------------- | -------------|----------------- 63.16 | 313.32 | 0.37 | 10.52 61.16 | 343.88 | 0.23 | 5.04 57.83 | 327.46 | 0.35 | 9.47 68.61 | 309.81 | 0.35 | 8.39 66.01 | 301.29 | 0.22 | 6.34 45.76 | 347.97 | 0.27 | 6.94 40.48 | 320.72 | 0.29 | 9.68 51.46 | 293.35 | 0.20 | 7.11 43.63 | 334.92 | 0.30 | 7.66 47.87 | 339.68 | 0.26 | 8.59 55.28 | 317.68 | 0.39 | 5.61 53.99 | 297.07 | 0.32 | 10.43 Of course, there is no guarantee that you will get the same matrix if you run this function because this are randomly sampled, but you get the idea! ### Other functions to try Try any one of the following designs, * Full factorial: `build.full_fact()` * 2-level fractional factorial: `build.frac_fact_res()` * Plackett-Burman: `build.plackett_burman()` * Sukharev grid: `build.sukharev()` * Box-Behnken: ``build.box_behnken()`` * Box-Wilson (Central-composite) with center-faced option: ``build.central_composite()`` with ``face='ccf'`` option * Box-Wilson (Central-composite) with center-inscribed option: ``build.central_composite()`` with ``face='cci'`` option * Box-Wilson (Central-composite) with center-circumscribed option: ``build.central_composite()`` with ``face='ccc'`` option * Latin hypercube (simple): ``build.lhs()`` * Latin hypercube (space-filling): ``build.space_filling_lhs()`` * Random k-means cluster: ``build.random_k_means()`` * Maximin reconstruction: ``build.maximin()`` * Halton sequence based: ``build.halton()`` * Uniform random matrix: ``build.uniform_random()`` ### Read from and write to CSV files Internally, you pass on a dictionary object and get back a Pandas DataFrame. But, for reading from and writing to CSV files, you have to use the `read_write` module of the package. ``` from doepy import read_write data_in=read_write.read_variables_csv('../Data/params.csv') ``` Then you can use this `data_in` object in the DOE generating functions. For writing back to a CSV, ``` df_lhs=build.space_filling_lhs(data_in,num_samples=100) filename = 'lhs' read_write.write_csv(df_lhs,filename=filename) ``` You should see a `lhs.csv` file in your directory. ### A simple pipeline for building a DOE table Combining the `build` functions and the `read_write` module, one can devise a simple pipeline to build a DOE from a CSV file input. Suppose you have a file in your directory called `ranges.csv` that contains min/max values of an arbitrary number of parameters. Just two lines of code will generate a space-filling Latin hypercube design, based on this file, with 100 randomized samples spanning over the min/max ranges and save it to a file called `DOE_table.csv`. ``` from doepy import build, read_write read_write.write_csv( build.space_filling_lhs(read_write.read_variables_csv('ranges.csv'), num_samples=100), filename='DOE_table.csv' ) ``` ## Features At its heart, `doepy` is just a collection of functions, which wrap around the core packages (mentioned below) and generate **design-of-experiment (DOE) matrices** for a statistician or engineer from an arbitrary range of input variables. ### Limitation of the foundation packages used Both the core packages, which act as foundations to this repo, are not complete in the sense that they do not cover all the necessary functions to generate a DOE table that a design engineer may need while planning an experiment. Also, they offer only low-level APIs in the sense that the standard output from them are normalized numpy arrays. It was felt that users, who may not be comfortable in dealing with Python objects directly, should be able to take advantage of their functionalities through a simplified user interface. ### Simplified user interface There are other DOE generators out there, but they generate n-dimensional arrays. `doepy` is built on the simple theme of being intuitive and easy to work with - for researchers, engineers, and social scientists of all background - not just the ones who can code. **User just needs to provide a simple CSV file with a single table of variables and their ranges (2-level i.e. min/max or 3-level).** Some of the functions work with 2-level min/max range while some others need 3-level ranges from the user (low-mid-high). Intelligence is built into the code to handle the case if the range input is not appropriate and to generate levels by simple linear interpolation from the given input. The code will generate the DOE as per user's choice and write the matrix in a CSV file on to the disk. In this way, **the only API user needs to be exposed to, are input and output CSV files**. These files then can be used in any engineering simulator, software, process-control module, or fed into process equipments. ### Pandas DataFrame support Under the hood, `doepy` generates Numpy arrays and convert them to Pandas DataFrame. Therefore, programmatically, it is simple to get those Numpy arrays or DataFrames to do more, if the user wishes so. ### Coming in a future release - support for more types of files Support for more input/output types will come in future releases - MS Excel, JSON, etc. ### Designs available * Full factorial, * 2-level fractional factorial, * Plackett-Burman, * Sukharev grid, * Box-Behnken, * Box-Wilson (Central-composite) with center-faced option, * Box-Wilson (Central-composite) with center-inscribed option, * Box-Wilson (Central-composite) with center-circumscribed option, * Latin hypercube (simple), * Latin hypercube (space-filling), * Random k-means cluster, * Maximin reconstruction, * Halton sequence based, * Uniform random matrix ## About Design of Experiment ### What is a scientific experiment? In its simplest form, a scientific experiment aims to predict the outcome by introducing a change of the preconditions, which is represented by one or more [independent variables](https://en.wikipedia.org/wiki/Dependent_and_independent_variables), also referred to as “input variables” or “predictor variables.” The change in one or more independent variables is generally hypothesized to result in a change in one or more [dependent variables](https://en.wikipedia.org/wiki/Dependent_and_independent_variables), also referred to as “output variables” or “response variables.” The experimental design may also identify [control variables](https://en.wikipedia.org/wiki/Controlling_for_a_variable) that must be held constant to prevent external factors from affecting the results. ### What is Experimental Design? Experimental design involves not only the selection of suitable independent, dependent, and control variables, but planning the delivery of the experiment under statistically optimal conditions given the constraints of available resources. There are multiple approaches for determining the set of design points (unique combinations of the settings of the independent variables) to be used in the experiment. Main concerns in experimental design include the establishment of [validity](https://en.wikipedia.org/wiki/Validity_%28statistics%29), [reliability](https://en.wikipedia.org/wiki/Reliability_%28statistics%29), and [replicability](https://en.wikipedia.org/wiki/Reproducibility). For example, these concerns can be partially addressed by carefully choosing the independent variable, reducing the risk of measurement error, and ensuring that the documentation of the method is sufficiently detailed. Related concerns include achieving appropriate levels of [statistical power](https://en.wikipedia.org/wiki/Statistical_power) and [sensitivity](https://en.wikipedia.org/wiki/Sensitivity_and_specificity). The need for careful design of experiment arises in all fields of serious scientific, technological, and even social science investigation — *computer science, physics, geology, political science, electrical engineering, psychology, business marketing analysis, financial analytics*, etc… ### Options for open-source DOE builder package in Python? Unfortunately, the majority of the state-of-the-art DOE generators are part of commercial statistical software packages like [JMP (SAS)](https://www.jmp.com/) or [Minitab](www.minitab.com/en-US/default.aspx). However, a researcher will surely benefit if there is open-source code that presents an intuitive user interface for generating an experimental design plan from a simple list of input variables. There are a couple of DOE builder Python packages but individually they don’t cover all the necessary DOE methods and they lack a simplified user API, where one can just input a CSV file of input variables’ range and get back the DOE matrix in another CSV file. ## Acknowledgements and Requirements The code was written in Python 3.7. It uses following external packages that needs to be installed on your system to use it, * **`pydoe`**: A package designed to help the scientist, engineer, statistician, etc., to construct appropriate experimental designs. [Check the docs here](https://pythonhosted.org/pyDOE/). * **`diversipy`**: A collection of algorithms for sampling in hypercubes, selecting diverse subsets, and measuring diversity. [Check the docs here](https://www.simonwessing.de/diversipy/doc/). * **`numpy`** * **`pandas`**
PypiClean
/shimao-nosidebar-frontend-20180926.0.tar.gz/shimao-nosidebar-frontend-20180926.0/hass_frontend_es5/870fcc2320e7d91600a1.chunk.js
(window.webpackJsonp=window.webpackJsonp||[]).push([[44],{138:function(e,t){!function(e){function t(e,t){if("function"==typeof window.CustomEvent)return new CustomEvent(e,t);var n=document.createEvent("CustomEvent");return n.initCustomEvent(e,!!t.bubbles,!!t.cancelable,t.detail),n}function n(e){if(s)return e.ownerDocument!==document?e.ownerDocument:null;var t=e.__importDoc;if(!t&&e.parentNode){if("function"==typeof(t=e.parentNode).closest)t=t.closest("link[rel=import]");else for(;!c(t)&&(t=t.parentNode););e.__importDoc=t}return t}function r(e){function t(){"loading"!==document.readyState&&document.body&&(document.removeEventListener("readystatechange",t),e())}document.addEventListener("readystatechange",t),t()}function o(e){r(function(){return function(e){var t=u(document,"link[rel=import]:not([import-dependency])"),n=t.length;n?d(t,function(t){return i(t,function(){0==--n&&e()})}):e()}(function(){return e&&e()})})}function i(e,t){if(e.__loaded)t&&t();else if("script"===e.localName&&!e.src||"style"===e.localName&&!e.firstChild)e.__loaded=!0,t&&t();else{var n=function n(r){e.removeEventListener(r.type,n),e.__loaded=!0,t&&t()};e.addEventListener("load",n),E&&"style"===e.localName||e.addEventListener("error",n)}}function c(e){return e.nodeType===Node.ELEMENT_NODE&&"link"===e.localName&&"import"===e.rel}function a(){var e=this;this.a={},this.b=0,this.g=new MutationObserver(function(t){return e.w(t)}),this.g.observe(document.head,{childList:!0,subtree:!0}),this.loadImports(document)}function u(e,t){return e.childNodes.length?e.querySelectorAll(t):p}function d(e,t,n){var r=e?e.length:0,o=n?-1:1;for(n=n?r-1:0;n<r&&0<=n;n+=o)t(e[n],n)}var l=document.createElement("link"),s="import"in l,p=l.querySelectorAll("*"),m=null;0=="currentScript"in document&&Object.defineProperty(document,"currentScript",{get:function(){return m||("complete"!==document.readyState?document.scripts[document.scripts.length-1]:null)},configurable:!0});var f=/(url\()([^)]*)(\))/g,h=/(@import[\s]+(?!url\())([^;]*)(;)/g,v=/(<link[^>]*)(rel=['|"]?stylesheet['|"]?[^>]*>)/g,y={u:function(e,t){if(e.href&&e.setAttribute("href",y.c(e.getAttribute("href"),t)),e.src&&e.setAttribute("src",y.c(e.getAttribute("src"),t)),"style"===e.localName){var n=y.o(e.textContent,t,f);e.textContent=y.o(n,t,h)}},o:function(e,t,n){return e.replace(n,function(e,n,r,o){return e=r.replace(/["']/g,""),t&&(e=y.c(e,t)),n+"'"+e+"'"+o})},c:function(e,t){if(void 0===y.f){y.f=!1;try{var n=new URL("b","http://a");n.pathname="c%20d",y.f="http://a/c%20d"===n.href}catch(e){}}return y.f?new URL(e,t).href:((n=y.s)||(n=document.implementation.createHTMLDocument("temp"),y.s=n,n.i=n.createElement("base"),n.head.appendChild(n.i),n.h=n.createElement("a")),n.i.href=t,n.h.href=e,n.h.href||e)}},b={async:!0,load:function(e,t,n){if(e)if(e.match(/^data:/)){var r=(e=e.split(","))[1];r=-1<e[0].indexOf(";base64")?atob(r):decodeURIComponent(r),t(r)}else{var o=new XMLHttpRequest;o.open("GET",e,b.async),o.onload=function(){var e=o.responseURL||o.getResponseHeader("Location");e&&0===e.indexOf("/")&&(e=(location.origin||location.protocol+"//"+location.host)+e);var r=o.response||o.responseText;304===o.status||0===o.status||200<=o.status&&300>o.status?t(r,e):n(r)},o.send()}else n("error: href must be specified")}},E=/Trident/.test(navigator.userAgent)||/Edge\/\d./i.test(navigator.userAgent);a.prototype.loadImports=function(e){var t=this;d(u(e,"link[rel=import]"),function(e){return t.l(e)})},a.prototype.l=function(e){var t=this,n=e.href;if(void 0!==this.a[n]){var r=this.a[n];r&&r.__loaded&&(e.__import=r,this.j(e))}else this.b++,this.a[n]="pending",b.load(n,function(e,r){e=t.A(e,r||n),t.a[n]=e,t.b--,t.loadImports(e),t.m()},function(){t.a[n]=null,t.b--,t.m()})},a.prototype.A=function(e,t){if(!e)return document.createDocumentFragment();E&&(e=e.replace(v,function(e,t,n){return-1===e.indexOf("type=")?t+" type=import-disable "+n:e}));var n=document.createElement("template");if(n.innerHTML=e,n.content)!function e(t){d(u(t,"template"),function(t){d(u(t.content,'script:not([type]),script[type="application/javascript"],script[type="text/javascript"]'),function(e){var t=document.createElement("script");d(e.attributes,function(e){return t.setAttribute(e.name,e.value)}),t.textContent=e.textContent,e.parentNode.replaceChild(t,e)}),e(t.content)})}(e=n.content);else for(e=document.createDocumentFragment();n.firstChild;)e.appendChild(n.firstChild);(n=e.querySelector("base"))&&(t=y.c(n.getAttribute("href"),t),n.removeAttribute("href"));var r=0;return d(u(e,'link[rel=import],link[rel=stylesheet][href][type=import-disable],style:not([type]),link[rel=stylesheet][href]:not([type]),script:not([type]),script[type="application/javascript"],script[type="text/javascript"]'),function(e){i(e),y.u(e,t),e.setAttribute("import-dependency",""),"script"===e.localName&&!e.src&&e.textContent&&(e.setAttribute("src","data:text/javascript;charset=utf-8,"+encodeURIComponent(e.textContent+"\n//# sourceURL="+t+(r?"-"+r:"")+".js\n")),e.textContent="",r++)}),e},a.prototype.m=function(){var e=this;if(!this.b){this.g.disconnect(),this.flatten(document);var t=!1,n=!1,r=function(){n&&t&&(e.loadImports(document),e.b||(e.g.observe(document.head,{childList:!0,subtree:!0}),e.v()))};this.C(function(){n=!0,r()}),this.B(function(){t=!0,r()})}},a.prototype.flatten=function(e){var t=this;d(u(e,"link[rel=import]"),function(e){var n=t.a[e.href];(e.__import=n)&&n.nodeType===Node.DOCUMENT_FRAGMENT_NODE&&(t.a[e.href]=e,e.readyState="loading",e.__import=e,t.flatten(n),e.appendChild(n))})},a.prototype.B=function(e){var t=u(document,"script[import-dependency]"),n=t.length;!function r(o){if(o<n){var c=t[o],a=document.createElement("script");c.removeAttribute("import-dependency"),d(c.attributes,function(e){return a.setAttribute(e.name,e.value)}),m=a,c.parentNode.replaceChild(a,c),i(a,function(){m=null,r(o+1)})}else e()}(0)},a.prototype.C=function(e){var t=u(document,"style[import-dependency],link[rel=stylesheet][import-dependency]"),r=t.length;if(r){var o=E&&!!document.querySelector("link[rel=stylesheet][href][type=import-disable]");d(t,function(t){if(i(t,function(){t.removeAttribute("import-dependency"),0==--r&&e()}),o&&t.parentNode!==document.head){var c=document.createElement(t.localName);for(c.__appliedElement=t,c.setAttribute("type","import-placeholder"),t.parentNode.insertBefore(c,t.nextSibling),c=n(t);c&&n(c);)c=n(c);c.parentNode!==document.head&&(c=null),document.head.insertBefore(t,c),t.removeAttribute("type")}})}else e()},a.prototype.v=function(){var e=this;d(u(document,"link[rel=import]"),function(t){return e.j(t)},!0)},a.prototype.j=function(e){e.__loaded||(e.__loaded=!0,e.import&&(e.import.readyState="complete"),e.dispatchEvent(t(e.import?"load":"error",{bubbles:!1,cancelable:!1,detail:void 0})))},a.prototype.w=function(e){var t=this;d(e,function(e){return d(e.addedNodes,function(e){e&&e.nodeType===Node.ELEMENT_NODE&&(c(e)?t.l(e):t.loadImports(e))})})};var g=null;if(s)d(u(document,"link[rel=import]"),function(e){e.import&&"loading"===e.import.readyState||(e.__loaded=!0)}),l=function(e){c(e=e.target)&&(e.__loaded=!0)},document.addEventListener("load",l,!0),document.addEventListener("error",l,!0);else{var _=Object.getOwnPropertyDescriptor(Node.prototype,"baseURI");Object.defineProperty((!_||_.configurable?Node:Element).prototype,"baseURI",{get:function(){var e=c(this)?this:n(this);return e?e.href:_&&_.get?_.get.call(this):(document.querySelector("base")||window.location).href},configurable:!0,enumerable:!0}),Object.defineProperty(HTMLLinkElement.prototype,"import",{get:function(){return this.__import||null},configurable:!0,enumerable:!0}),r(function(){g=new a})}o(function(){return document.dispatchEvent(t("HTMLImportsLoaded",{cancelable:!0,bubbles:!0,detail:void 0}))}),e.useNative=s,e.whenReady=o,e.importForElement=n,e.loadImports=function(e){g&&g.loadImports(e)}}(window.HTMLImports=window.HTMLImports||{})},251:function(e,t,n){"use strict";function r(e){window.HTMLImports?HTMLImports.whenReady(e):e()}n.r(t),n.d(t,"importHref",function(){return o}),n.d(t,"importHrefPromise",function(){return i}),n(138);var o=function(e,t,n,o){var i=document.head.querySelector('link[href="'+e+'"][import-href]');i||((i=document.createElement("link")).rel="import",i.href=e,i.setAttribute("import-href","")),o&&i.setAttribute("async","");var c=function(){i.removeEventListener("load",a),i.removeEventListener("error",u)},a=function(e){c(),i.__dynamicImportLoaded=!0,t&&r(function(){t(e)})},u=function(e){c(),i.parentNode&&i.parentNode.removeChild(i),n&&r(function(){n(e)})};return i.addEventListener("load",a),i.addEventListener("error",u),null==i.parentNode?document.head.appendChild(i):i.__dynamicImportLoaded&&i.dispatchEvent(new Event("load")),i},i=function(e){return new Promise(function(t,n){return o(e,t,n)})}}}]); //# sourceMappingURL=870fcc2320e7d91600a1.chunk.js.map
PypiClean
/eureka-opensource-1.0.3.tar.gz/eureka-opensource-1.0.3/static/yui-3.4.1/event-base-ie/event-base-ie-debug.js
(function() { var stateChangeListener, GLOBAL_ENV = YUI.Env, config = YUI.config, doc = config.doc, docElement = doc && doc.documentElement, EVENT_NAME = 'onreadystatechange', pollInterval = config.pollInterval || 40; if (docElement.doScroll && !GLOBAL_ENV._ieready) { GLOBAL_ENV._ieready = function() { GLOBAL_ENV._ready(); }; /*! DOMReady: based on work by: Dean Edwards/John Resig/Matthias Miller/Diego Perini */ // Internet Explorer: use the doScroll() method on the root element. // This isolates what appears to be a safe moment to manipulate the // DOM prior to when the document's readyState suggests it is safe to do so. if (self !== self.top) { stateChangeListener = function() { if (doc.readyState == 'complete') { GLOBAL_ENV.remove(doc, EVENT_NAME, stateChangeListener); GLOBAL_ENV.ieready(); } }; GLOBAL_ENV.add(doc, EVENT_NAME, stateChangeListener); } else { GLOBAL_ENV._dri = setInterval(function() { try { docElement.doScroll('left'); clearInterval(GLOBAL_ENV._dri); GLOBAL_ENV._dri = null; GLOBAL_ENV._ieready(); } catch (domNotReady) { } }, pollInterval); } } })(); YUI.add('event-base-ie', function(Y) { /* * Custom event engine, DOM event listener abstraction layer, synthetic DOM * events. * @module event * @submodule event-base */ function IEEventFacade() { // IEEventFacade.superclass.constructor.apply(this, arguments); Y.DOM2EventFacade.apply(this, arguments); } /* * (intentially left out of API docs) * Alternate Facade implementation that is based on Object.defineProperty, which * is partially supported in IE8. Properties that involve setup work are * deferred to temporary getters using the static _define method. */ function IELazyFacade(e) { var proxy = Y.config.doc.createEventObject(e), proto = IELazyFacade.prototype; // TODO: necessary? proxy.hasOwnProperty = function () { return true; }; proxy.init = proto.init; proxy.halt = proto.halt; proxy.preventDefault = proto.preventDefault; proxy.stopPropagation = proto.stopPropagation; proxy.stopImmediatePropagation = proto.stopImmediatePropagation; Y.DOM2EventFacade.apply(proxy, arguments); return proxy; } var imp = Y.config.doc && Y.config.doc.implementation, useLazyFacade = Y.config.lazyEventFacade, buttonMap = { 0: 1, // left click 4: 2, // middle click 2: 3 // right click }, relatedTargetMap = { mouseout: 'toElement', mouseover: 'fromElement' }, resolve = Y.DOM2EventFacade.resolve, proto = { init: function() { IEEventFacade.superclass.init.apply(this, arguments); var e = this._event, x, y, d, b, de, t; this.target = resolve(e.srcElement); if (('clientX' in e) && (!x) && (0 !== x)) { x = e.clientX; y = e.clientY; d = Y.config.doc; b = d.body; de = d.documentElement; x += (de.scrollLeft || (b && b.scrollLeft) || 0); y += (de.scrollTop || (b && b.scrollTop) || 0); this.pageX = x; this.pageY = y; } if (e.type == "mouseout") { t = e.toElement; } else if (e.type == "mouseover") { t = e.fromElement; } // fallback to t.relatedTarget to support simulated events. // IE doesn't support setting toElement or fromElement on generic // events, so Y.Event.simulate sets relatedTarget instead. this.relatedTarget = resolve(t || e.relatedTarget); // which should contain the unicode key code if this is a key event // if (e.charCode) { // this.which = e.charCode; // } // for click events, which is normalized for which mouse button was // clicked. if (e.button !== undefined) { this.which = this.button = buttonMap[e.button] || e.button; } }, stopPropagation: function() { this._event.cancelBubble = true; this._wrapper.stopped = 1; this.stopped = 1; }, stopImmediatePropagation: function() { this.stopPropagation(); this._wrapper.stopped = 2; this.stopped = 2; }, preventDefault: function(returnValue) { this._event.returnValue = returnValue || false; this._wrapper.prevented = 1; this.prevented = 1; } }; Y.extend(IEEventFacade, Y.DOM2EventFacade, proto); Y.extend(IELazyFacade, Y.DOM2EventFacade, proto); IELazyFacade.prototype.init = function () { var e = this._event, overrides = this._wrapper.overrides, define = IELazyFacade._define, lazyProperties = IELazyFacade._lazyProperties, prop; this.altKey = e.altKey; this.ctrlKey = e.ctrlKey; this.metaKey = e.metaKey; this.shiftKey = e.shiftKey; this.type = (overrides && overrides.type) || e.type; this.clientX = e.clientX; this.clientY = e.clientY; for (prop in lazyProperties) { if (lazyProperties.hasOwnProperty(prop)) { define(this, prop, lazyProperties[prop]); } } if (this._touch) { this._touch(e, this._currentTarget, this._wrapper); } }; IELazyFacade._lazyProperties = { charCode: function () { var e = this._event; return e.keyCode || e.charCode; }, keyCode: function () { return this.charCode; }, button: function () { var e = this._event; return (e.button !== undefined) ? (buttonMap[e.button] || e.button) : (e.which || e.charCode || this.charCode); }, which: function () { return this.button; }, target: function () { return resolve(this._event.srcElement); }, relatedTarget: function () { var e = this._event, targetProp = relatedTargetMap[e.type] || 'relatedTarget'; // fallback to t.relatedTarget to support simulated events. // IE doesn't support setting toElement or fromElement on generic // events, so Y.Event.simulate sets relatedTarget instead. return resolve(e[targetProp] || e.relatedTarget); }, currentTarget: function () { return resolve(this._currentTarget); }, wheelDelta: function () { var e = this._event; if (e.type === "mousewheel" || e.type === "DOMMouseScroll") { return (e.detail) ? (e.detail * -1) : // wheelDelta between -80 and 80 result in -1 or 1 Math.round(e.wheelDelta / 80) || ((e.wheelDelta < 0) ? -1 : 1); } }, pageX: function () { var e = this._event, val = e.pageX, doc, bodyScroll, docScroll; if (val === undefined) { doc = Y.config.doc; bodyScroll = doc.body && doc.body.scrollLeft; docScroll = doc.documentElement.scrollLeft; val = e.clientX + (docScroll || bodyScroll || 0); } return val; }, pageY: function () { var e = this._event, val = e.pageY, doc, bodyScroll, docScroll; if (val === undefined) { doc = Y.config.doc; bodyScroll = doc.body && doc.body.scrollTop; docScroll = doc.documentElement.scrollTop; val = e.clientY + (docScroll || bodyScroll || 0); } return val; } }; /** * Wrapper function for Object.defineProperty that creates a property whose * value will be calulated only when asked for. After calculating the value, * the getter wll be removed, so it will behave as a normal property beyond that * point. A setter is also assigned so assigning to the property will clear * the getter, so foo.prop = 'a'; foo.prop; won't trigger the getter, * overwriting value 'a'. * * Used only by the DOMEventFacades used by IE8 when the YUI configuration * <code>lazyEventFacade</code> is set to true. * * @method _define * @param o {DOMObject} A DOM object to add the property to * @param prop {String} The name of the new property * @param valueFn {Function} The function that will return the initial, default * value for the property. * @static * @private */ IELazyFacade._define = function (o, prop, valueFn) { function val(v) { var ret = (arguments.length) ? v : valueFn.call(this); delete o[prop]; Object.defineProperty(o, prop, { value: ret, configurable: true, writable: true }); return ret; } Object.defineProperty(o, prop, { get: val, set: val, configurable: true }); }; if (imp && (!imp.hasFeature('Events', '2.0'))) { if (useLazyFacade) { // Make sure we can use the lazy facade logic try { Object.defineProperty(Y.config.doc.createEventObject(), 'z', {}); } catch (e) { useLazyFacade = false; } } Y.DOMEventFacade = (useLazyFacade) ? IELazyFacade : IEEventFacade; } }, '3.4.1' ,{after:['event-base'], requires:['node-base']});
PypiClean
/sphinxcontrib-autoprogram-0.1.8.tar.gz/sphinxcontrib-autoprogram-0.1.8/doc/changelog.rst
Changelog ========= Version 0.1.8 ------------- Released on February 11, 2023. - Test against Python 3.10, 3.11. Version 0.1.7 ------------- Released on February 10, 2021. - Publish to PyPI via Github Actions. Version 0.1.6 ------------- Released on February 10, 2021. - Dropped support for Python 2 and Pypy - Declare this extension safe for parallel reading - Migrate to Github Actions for CI [:issue:`28`, :pull:`32` by Langston Barrett] - Test against recent versions of Sphinx [:issue:`33`, :pull:`32` by Langston Barrett] - Format source code with Black [:issue:`30`, :pull:`32` by Langston Barrett] - Add documentation to the ``sdist`` [:issue:`26`, :pull:`32` by Langston Barrett] - Fixed unwanted ``<blockquote>`` tags in multi-line command descriptions that are indented to match surrounding code. [:pull:`21` by dgw] Version 0.1.5 ------------- Released on May 15, 2018. - New ``:groups:`` option to render argument groups. [by Lukas Atkinson] Version 0.1.4 ------------- Released on February 27, 2018. - Fixed a :rst:dir`.. autoprogram::` bug that raises :exc:`AttributeError` during build without ``:no_usage_codeblock:`` option on Python 2. [:bbissue:`168`, :bbissue:`169`] - Fixed an issue with Sphinx 1.7 which removed ``sphinx.util.compat``. [:issue:`1`, :pull:`2` by Zach Riggle] Version 0.1.3 ------------- Released on October 7, 2016. - Fixed a bug that descriptions with :class:`~argparse.RawTextHelpFormatter` had been incorrectly formatted. [:bbpull:`123` by Aaron Meurer] - Fixed crash when metavars is a tuple (i.e. for ``nargs > 1``). [:bbpull:`112` by Alex Honeywell] - Fixed usage string for subcommands (subcommands were previously showing the top-level command usage). [:bbpull:`112` by Alex Honeywell] - Added :ref:`new options <autoprogram-options>` to :rst:dir:`.. autoprogram::` directive: [:bbpull:`112` by Alex Honeywell] - ``maxdepth`` - ``no_usage_codeblock`` - ``start_command`` - ``strip_usage`` - Fixed suppressed arguments (using :const:`argparse.SUPPRESS` flag) to become ignored. [:bbissue:`166`] Version 0.1.2 ------------- Released on August 18, 2015. - Fixed crash with empty fields. [:bbissue:`110`] - Fixed :exc:`ImportError` with non-module Python scripts (i.e. files not ending with :file:`.py`). [:bbpull:`101` by Matteo Bachetti] Version 0.1.1 ------------- Released on April 22, 2014. - Omit metavars of ``store_const``/``store_true``/``store_false`` options. - Sort subcommands in alphabetical order if Python 2.6 which doesn't have :class:`collections.OrderedDict`. Version 0.1.0 ------------- Released on March 2, 2014. The first release.
PypiClean
/backend-library-createquestions-0.1.0.tar.gz/backend-library-createquestions-0.1.0/question_builder/data/question_type_repository/question_getter.py
from collections import defaultdict from .. import config from ..config import QuestionMode from ..domain.data_converters import to_verb_question from ..domain.lemma_data import LemmaData from neo4j import DirectDriver from neo4j import BoltStatementResult from typing import List, Dict class QuestionGetter: """ Handles all the db operations related to user content management """ def __init__(self, driver: DirectDriver, mode: str): """ Initializes the neo4j db """ self._driver = driver self.mode = mode self.mode_function_registry = { QuestionMode.N_QUESTIONS_MODE: self._get_n_questions, QuestionMode.N_QUESTIONS_PER_WORD_MODE: self._get_n_questions_per_word, } def get(self, user_id: str, lemma_list: List[str], n_questions: int, n_questions_per_word: int) -> Dict[str, List[LemmaData]]: with self._driver.session() as session: lemma_information = self._get_lemma_information( session, user_id, lemma_list ) lemma_to_questions = defaultdict(list) for res in session.read_transaction( self.mode_function_registry[self.mode], user_id, list(lemma_information.keys()), n_questions, n_questions_per_word, ): content_rel_json = res[config.CONTENT_KEY] target_lemma = content_rel_json[config.LEMMA_KEY] content_rel_json[config.VERBGAMES_ITEMS_KEY][ config.LEMMA_CONJUGATIONS_KEY ] = lemma_information[target_lemma][config.LEMMA_CONJUGATIONS_KEY] verb_question = to_verb_question(content_rel_json) level = lemma_information[target_lemma][config.LEVEL_KEY] mastered = lemma_information[target_lemma][config.MASTERED_KEY] lemma_data = LemmaData(verb_question, level, mastered) lemma_to_questions[target_lemma].append(lemma_data) return lemma_to_questions def _get_lemma_information(self, session, user_id: str, lemma_list: List[str]) -> Dict: lemma_dict = {} for res in session.read_transaction(self._get_lemma_data, user_id, lemma_list): target_lemma = res[config.LEMMA_KEY] mastered = res[config.MASTERED_KEY] level = res[config.LEVEL_KEY] lemma_conjugations = res[config.LEMMA_CONJUGATIONS_KEY] lemma_dict[target_lemma] = { config.LEVEL_KEY: level, config.MASTERED_KEY: mastered, config.LEMMA_CONJUGATIONS_KEY: lemma_conjugations, } return lemma_dict @classmethod def _get_n_questions( cls, tx, user_id: str, lemma_list: List[str], n_questions: int, n_questions_per_word: int ) -> BoltStatementResult: return tx.run( cls.query_n_data_questions, user_id=user_id, lemma_list=lemma_list, n_questions=n_questions, ) @classmethod def _get_n_questions_per_word( cls, tx, user_id: str, lemma_list: List[str], n_questions: int, n_questions_per_word: int ) -> BoltStatementResult: return tx.run( cls.query_n_data_questions_per_word, user_id=user_id, lemma_list=lemma_list, n_questions_per_word=n_questions_per_word, ) @classmethod def _get_lemma_data( cls, tx, user_id: str, lemma_list: List ) -> BoltStatementResult: return tx.run(cls.query_lemma_data, user_id=user_id, lemma_list=lemma_list)
PypiClean